VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 49591

Last change on this file since 49591 was 49519, checked in by vboxsync, 11 years ago

Devices/Graphics/DevVGA_VDMA: warning.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 60.8 KB
Line 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16//#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23
24#include "DevVGA.h"
25#include "HGSMI/SHGSMIHost.h"
26#include "HGSMI/HGSMIHostHlp.h"
27
28#include <VBox/VBoxVideo3D.h>
29
30#ifdef DEBUG_misha
31#define WARN_BP() do { AssertFailed(); } while (0)
32#else
33#define WARN_BP() do { } while (0)
34#endif
35#define WARN(_msg) do { \
36 LogRel(_msg); \
37 WARN_BP(); \
38 } while (0)
39
40#ifdef VBOX_VDMA_WITH_WORKERTHREAD
41typedef enum
42{
43 VBOXVDMAPIPE_STATE_CLOSED = 0,
44 VBOXVDMAPIPE_STATE_CREATED = 1,
45 VBOXVDMAPIPE_STATE_OPENNED = 2,
46 VBOXVDMAPIPE_STATE_CLOSING = 3
47} VBOXVDMAPIPE_STATE;
48
49typedef struct VBOXVDMAPIPE
50{
51 RTSEMEVENT hEvent;
52 /* critical section for accessing pipe properties */
53 RTCRITSECT hCritSect;
54 VBOXVDMAPIPE_STATE enmState;
55 /* true iff the other end needs Event notification */
56 bool bNeedNotify;
57} VBOXVDMAPIPE, *PVBOXVDMAPIPE;
58
59typedef enum
60{
61 VBOXVDMAPIPE_CMD_TYPE_UNDEFINED = 0,
62 VBOXVDMAPIPE_CMD_TYPE_DMACMD = 1,
63 VBOXVDMAPIPE_CMD_TYPE_DMACTL = 2
64} VBOXVDMAPIPE_CMD_TYPE;
65
66typedef struct VBOXVDMAPIPE_CMD_BODY
67{
68 VBOXVDMAPIPE_CMD_TYPE enmType;
69 union
70 {
71 PVBOXVDMACBUF_DR pDr;
72 PVBOXVDMA_CTL pCtl;
73 void *pvCmd;
74 } u;
75}VBOXVDMAPIPE_CMD_BODY, *PVBOXVDMAPIPE_CMD_BODY;
76
77typedef struct VBOXVDMAPIPE_CMD
78{
79 HGSMILISTENTRY Entry;
80 VBOXVDMAPIPE_CMD_BODY Cmd;
81} VBOXVDMAPIPE_CMD, *PVBOXVDMAPIPE_CMD;
82
83#define VBOXVDMAPIPE_CMD_FROM_ENTRY(_pE) ( (PVBOXVDMAPIPE_CMD)((uint8_t *)(_pE) - RT_OFFSETOF(VBOXVDMAPIPE_CMD, Entry)) )
84
85typedef struct VBOXVDMAPIPE_CMD_POOL
86{
87 HGSMILIST List;
88 uint32_t cCmds;
89 VBOXVDMAPIPE_CMD aCmds[1];
90} VBOXVDMAPIPE_CMD_POOL, *PVBOXVDMAPIPE_CMD_POOL;
91#endif
92
93
94/* state transformations:
95 *
96 * submitter | processor
97 * STOPPED
98 * |
99 * |
100 * >
101 * LISTENING ---> PROCESSING
102 * ^ _/
103 * | _/
104 * | _/
105 * | _/
106 * | _/
107 * | _/
108 * | /
109 * < >
110 * PAUSED
111 *
112 * */
113#define VBVAEXHOSTCONTEXT_STATE_STOPPED 0
114#define VBVAEXHOSTCONTEXT_STATE_LISTENING 1
115#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 2
116#define VBVAEXHOSTCONTEXT_STATE_PAUSED 3
117
118typedef struct VBVAEXHOSTCONTEXT
119{
120 VBVABUFFER *pVBVA;
121 uint32_t cbCurData;
122 volatile uint32_t u32State;
123 volatile uint32_t u32Pause;
124 volatile uint32_t u32cOtherCommands;
125} VBVAEXHOSTCONTEXT;
126
127/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
128 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
129 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
130 * see mor edetailed comments in headers for function definitions */
131static bool VBoxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva);
132static int VBoxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
133
134/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
135 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
136static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
137
138static void VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
139static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
140static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
141static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
142static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
143static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
144
145typedef struct VBOXVDMAHOST
146{
147 PHGSMIINSTANCE pHgsmi;
148 PVGASTATE pVGAState;
149 VBVAEXHOSTCONTEXT CmdVbva;
150#ifdef VBOX_VDMA_WITH_WATCHDOG
151 PTMTIMERR3 WatchDogTimer;
152#endif
153#ifdef VBOX_VDMA_WITH_WORKERTHREAD
154 VBOXVDMAPIPE Pipe;
155 HGSMILIST PendingList;
156 RTTHREAD hWorkerThread;
157 VBOXVDMAPIPE_CMD_POOL CmdPool;
158#endif
159} VBOXVDMAHOST, *PVBOXVDMAHOST;
160
161
162#ifdef VBOX_WITH_CRHGSMI
163
164typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
165typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
166
167typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
168{
169 uint32_t cRefs;
170 int32_t rc;
171 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
172 void *pvCompletion;
173 VBOXVDMACMD_CHROMIUM_CTL Cmd;
174} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
175
176#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
177
178static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
179{
180 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
181 Assert(pHdr);
182 if (pHdr)
183 {
184 pHdr->cRefs = 1;
185 pHdr->rc = VERR_NOT_IMPLEMENTED;
186 pHdr->Cmd.enmType = enmCmd;
187 pHdr->Cmd.cbCmd = cbCmd;
188 return &pHdr->Cmd;
189 }
190
191 return NULL;
192}
193
194DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
195{
196 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
197 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
198 if(!cRefs)
199 {
200 RTMemFree(pHdr);
201 }
202}
203
204DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
205{
206 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
207 ASMAtomicIncU32(&pHdr->cRefs);
208}
209
210DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
211{
212 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
213 return pHdr->rc;
214}
215
216static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
217{
218 RTSemEventSignal((RTSEMEVENT)pvContext);
219}
220
221static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
222{
223 vboxVDMACrCtlRelease(pCmd);
224}
225
226
227static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
228{
229 if ( pVGAState->pDrv
230 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
231 {
232 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
233 pHdr->pfnCompletion = pfnCompletion;
234 pHdr->pvCompletion = pvCompletion;
235 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
236 return VINF_SUCCESS;
237 }
238#ifdef DEBUG_misha
239 Assert(0);
240#endif
241 return VERR_NOT_SUPPORTED;
242}
243
244static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
245{
246 RTSEMEVENT hComplEvent;
247 int rc = RTSemEventCreate(&hComplEvent);
248 AssertRC(rc);
249 if(RT_SUCCESS(rc))
250 {
251 rc = vboxVDMACrCtlPostAsync (pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
252#ifdef DEBUG_misha
253 AssertRC(rc);
254#endif
255 if (RT_SUCCESS(rc))
256 {
257 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
258 AssertRC(rc);
259 if(RT_SUCCESS(rc))
260 {
261 RTSemEventDestroy(hComplEvent);
262 }
263 }
264 else
265 {
266 /* the command is completed */
267 RTSemEventDestroy(hComplEvent);
268 }
269 }
270 return rc;
271}
272
273static void vboxVDMACrCmdNotifyPerform(struct VBOXVDMAHOST *pVdma)
274{
275 PVGASTATE pVGAState = pVdma->pVGAState;
276 pVGAState->pDrv->pfnCrCmdNotifyCmds(pVGAState->pDrv);
277}
278
279/*
280 * @returns
281 *
282 */
283static int vboxVDMACrCmdPreprocess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
284{
285 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
286 return VINF_EOF;
287
288 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
289
290 /* check if the command is cancelled */
291 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
292 {
293 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
294 return VINF_EOF;
295 }
296
297 /* come commands can be handled right away? */
298 switch (pCmd->u8OpCode)
299 {
300 case VBOXCMDVBVA_OPTYPE_NOPCMD:
301 pCmd->i8Result = 0;
302 return VINF_EOF;
303 default:
304 return VINF_SUCCESS;
305 }
306}
307
308static DECLCALLBACK(int) vboxVDMACrCmdCltCmdGet(HVBOXCRCMDCLT hClt, PVBOXCMDVBVA_HDR *ppNextCmd, uint32_t *pcbNextCmd)
309{
310 struct VBOXVDMAHOST *pVdma = hClt;
311
312 VBoxVBVAExHPCmdCheckRelease(&pVdma->CmdVbva);
313
314 uint32_t cbCmd;
315 uint8_t *pu8Cmd;
316
317 for(;;)
318 {
319 int rc = VBoxVBVAExHPCmdGet(&pVdma->CmdVbva, &pu8Cmd, &cbCmd);
320 switch (rc)
321 {
322 case VINF_SUCCESS:
323 {
324 rc = vboxVDMACrCmdPreprocess(pVdma, pu8Cmd, cbCmd);
325 switch (rc)
326 {
327 case VINF_SUCCESS:
328 *ppNextCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
329 *pcbNextCmd = cbCmd;
330 return VINF_SUCCESS;
331 case VINF_EOF:
332 continue;
333 default:
334 Assert(!RT_FAILURE(rc));
335 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
336 }
337 break;
338 }
339 case VINF_EOF:
340 return VINF_EOF;
341 case VINF_PERMISSION_DENIED:
342 /* processing was paused, processing state was released, only VBoxVBVAExHS*** calls are now allowed */
343 return VINF_EOF;
344 case VINF_INTERRUPTED:
345 /* command processing was interrupted, processor state remains set. client can process any commands */
346 vboxVDMACrCmdNotifyPerform(pVdma);
347 return VINF_EOF;
348 default:
349 Assert(!RT_FAILURE(rc));
350 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
351 }
352 }
353
354 WARN(("Warning: vboxVDMACrCmdCltCmdGet unexpected state\n"));
355 return VERR_INTERNAL_ERROR;
356}
357
358static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
359{
360 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd;
361 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP) vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP,
362 sizeof (*pCmd));
363 if (pCmd)
364 {
365 VBOXCRCMD_CLTINFO CltInfo;
366 CltInfo.hClient = pVdma;
367 CltInfo.pfnCmdGet = vboxVDMACrCmdCltCmdGet;
368 PVGASTATE pVGAState = pVdma->pVGAState;
369 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
370 pCmd->cbVRam = pVGAState->vram_size;
371 pCmd->pCrCmdClientInfo = &CltInfo;
372 int rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
373 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
374 if (RT_SUCCESS(rc))
375 {
376 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
377 }
378 vboxVDMACrCtlRelease(&pCmd->Hdr);
379 return rc;
380 }
381 return VERR_NO_MEMORY;
382}
383
384static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
385
386/* check if this is external cmd to be passed to chromium backend */
387static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
388{
389 PVBOXVDMACMD pDmaCmd = NULL;
390 uint32_t cbDmaCmd = 0;
391 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
392 int rc = VINF_NOT_SUPPORTED;
393
394 cbDmaCmd = pCmdDr->cbBuf;
395
396 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
397 {
398 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
399 {
400 AssertMsgFailed(("invalid buffer data!"));
401 return VERR_INVALID_PARAMETER;
402 }
403
404 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
405 {
406 AssertMsgFailed(("invalid command buffer data!"));
407 return VERR_INVALID_PARAMETER;
408 }
409
410 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
411 }
412 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
413 {
414 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
415 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
416 {
417 AssertMsgFailed(("invalid command buffer data from offset!"));
418 return VERR_INVALID_PARAMETER;
419 }
420 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
421 }
422
423 if (pDmaCmd)
424 {
425 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
426 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
427
428 switch (pDmaCmd->enmType)
429 {
430 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
431 {
432 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
433 if (cbBody < sizeof (*pCrCmd))
434 {
435 AssertMsgFailed(("invalid chromium command buffer size!"));
436 return VERR_INVALID_PARAMETER;
437 }
438 PVGASTATE pVGAState = pVdma->pVGAState;
439 rc = VINF_SUCCESS;
440 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
441 {
442 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
443 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
444 break;
445 }
446 else
447 {
448 Assert(0);
449 }
450
451 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
452 AssertRC(tmpRc);
453 break;
454 }
455 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
456 {
457 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
458 if (cbBody < sizeof (*pTransfer))
459 {
460 AssertMsgFailed(("invalid bpb transfer buffer size!"));
461 return VERR_INVALID_PARAMETER;
462 }
463
464 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
465 AssertRC(rc);
466 if (RT_SUCCESS(rc))
467 {
468 pCmdDr->rc = VINF_SUCCESS;
469 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
470 AssertRC(rc);
471 rc = VINF_SUCCESS;
472 }
473 break;
474 }
475 default:
476 break;
477 }
478 }
479 return rc;
480}
481
482int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
483{
484 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
485 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
486 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
487 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
488 AssertRC(rc);
489 pDr->rc = rc;
490
491 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
492 rc = VBoxSHGSMICommandComplete(pIns, pDr);
493 AssertRC(rc);
494 return rc;
495}
496
497int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
498{
499 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
500 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
501 pCmdPrivate->rc = rc;
502 if (pCmdPrivate->pfnCompletion)
503 {
504 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
505 }
506 return VINF_SUCCESS;
507}
508
509#endif
510
511#ifdef VBOX_VDMA_WITH_WORKERTHREAD
512/* to simplify things and to avoid extra backend if modifications we assume the VBOXVDMA_RECTL is the same as VBVACMDHDR */
513AssertCompile(sizeof(VBOXVDMA_RECTL) == sizeof(VBVACMDHDR));
514AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, left) == RT_SIZEOFMEMB(VBVACMDHDR, x));
515AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, top) == RT_SIZEOFMEMB(VBVACMDHDR, y));
516AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, width) == RT_SIZEOFMEMB(VBVACMDHDR, w));
517AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, height) == RT_SIZEOFMEMB(VBVACMDHDR, h));
518AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, left) == RT_OFFSETOF(VBVACMDHDR, x));
519AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, top) == RT_OFFSETOF(VBVACMDHDR, y));
520AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, width) == RT_OFFSETOF(VBVACMDHDR, w));
521AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, height) == RT_OFFSETOF(VBVACMDHDR, h));
522
523static int vboxVDMANotifyPrimaryUpdate (PVGASTATE pVGAState, unsigned uScreenId, const VBOXVDMA_RECTL * pRectl)
524{
525 pVGAState->pDrv->pfnVBVAUpdateBegin (pVGAState->pDrv, uScreenId);
526
527 /* Updates the rectangle and sends the command to the VRDP server. */
528 pVGAState->pDrv->pfnVBVAUpdateProcess (pVGAState->pDrv, uScreenId,
529 (const PVBVACMDHDR)pRectl /* <- see above AssertCompile's and comments */,
530 sizeof (VBOXVDMA_RECTL));
531
532 pVGAState->pDrv->pfnVBVAUpdateEnd (pVGAState->pDrv, uScreenId, pRectl->left, pRectl->top,
533 pRectl->width, pRectl->height);
534
535 return VINF_SUCCESS;
536}
537#endif
538
539static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
540 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
541 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
542 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
543{
544 /* we do not support color conversion */
545 Assert(pDstDesc->format == pSrcDesc->format);
546 /* we do not support stretching */
547 Assert(pDstRectl->height == pSrcRectl->height);
548 Assert(pDstRectl->width == pSrcRectl->width);
549 if (pDstDesc->format != pSrcDesc->format)
550 return VERR_INVALID_FUNCTION;
551 if (pDstDesc->width == pDstRectl->width
552 && pSrcDesc->width == pSrcRectl->width
553 && pSrcDesc->width == pDstDesc->width)
554 {
555 Assert(!pDstRectl->left);
556 Assert(!pSrcRectl->left);
557 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
558 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
559 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
560 }
561 else
562 {
563 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
564 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
565 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
566 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
567 Assert(cbDstLine <= pDstDesc->pitch);
568 uint32_t cbDstSkip = pDstDesc->pitch;
569 uint8_t * pvDstStart = pvDstSurf + offDstStart;
570
571 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
572 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
573 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
574 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
575 Assert(cbSrcLine <= pSrcDesc->pitch);
576 uint32_t cbSrcSkip = pSrcDesc->pitch;
577 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
578
579 Assert(cbDstLine == cbSrcLine);
580
581 for (uint32_t i = 0; ; ++i)
582 {
583 memcpy (pvDstStart, pvSrcStart, cbDstLine);
584 if (i == pDstRectl->height)
585 break;
586 pvDstStart += cbDstSkip;
587 pvSrcStart += cbSrcSkip;
588 }
589 }
590 return VINF_SUCCESS;
591}
592
593static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
594{
595 if (!pRectl1->width)
596 *pRectl1 = *pRectl2;
597 else
598 {
599 int16_t x21 = pRectl1->left + pRectl1->width;
600 int16_t x22 = pRectl2->left + pRectl2->width;
601 if (pRectl1->left > pRectl2->left)
602 {
603 pRectl1->left = pRectl2->left;
604 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
605 }
606 else if (x21 < x22)
607 pRectl1->width = x22 - pRectl1->left;
608
609 x21 = pRectl1->top + pRectl1->height;
610 x22 = pRectl2->top + pRectl2->height;
611 if (pRectl1->top > pRectl2->top)
612 {
613 pRectl1->top = pRectl2->top;
614 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
615 }
616 else if (x21 < x22)
617 pRectl1->height = x22 - pRectl1->top;
618 }
619}
620
621/*
622 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
623 */
624static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
625{
626 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
627 Assert(cbBlt <= cbBuffer);
628 if (cbBuffer < cbBlt)
629 return VERR_INVALID_FUNCTION;
630
631 /* we do not support stretching for now */
632 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
633 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
634 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
635 return VERR_INVALID_FUNCTION;
636 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
637 return VERR_INVALID_FUNCTION;
638 Assert(pBlt->cDstSubRects);
639
640 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
641 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
642
643 if (pBlt->cDstSubRects)
644 {
645 VBOXVDMA_RECTL dstRectl, srcRectl;
646 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
647 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
648 {
649 pDstRectl = &pBlt->aDstSubRects[i];
650 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
651 {
652 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
653 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
654 dstRectl.width = pDstRectl->width;
655 dstRectl.height = pDstRectl->height;
656 pDstRectl = &dstRectl;
657 }
658
659 pSrcRectl = &pBlt->aDstSubRects[i];
660 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
661 {
662 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
663 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
664 srcRectl.width = pSrcRectl->width;
665 srcRectl.height = pSrcRectl->height;
666 pSrcRectl = &srcRectl;
667 }
668
669 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
670 &pBlt->dstDesc, &pBlt->srcDesc,
671 pDstRectl,
672 pSrcRectl);
673 AssertRC(rc);
674 if (!RT_SUCCESS(rc))
675 return rc;
676
677 vboxVDMARectlUnite(&updateRectl, pDstRectl);
678 }
679 }
680 else
681 {
682 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
683 &pBlt->dstDesc, &pBlt->srcDesc,
684 &pBlt->dstRectl,
685 &pBlt->srcRectl);
686 AssertRC(rc);
687 if (!RT_SUCCESS(rc))
688 return rc;
689
690 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
691 }
692
693#ifdef VBOX_VDMA_WITH_WORKERTHREAD
694 int iView = 0;
695 /* @todo: fixme: check if update is needed and get iView */
696 vboxVDMANotifyPrimaryUpdate (pVdma->pVGAState, iView, &updateRectl);
697#endif
698
699 return cbBlt;
700}
701
702static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
703{
704 if (cbBuffer < sizeof (*pTransfer))
705 return VERR_INVALID_PARAMETER;
706
707 PVGASTATE pVGAState = pVdma->pVGAState;
708 uint8_t * pvRam = pVGAState->vram_ptrR3;
709 PGMPAGEMAPLOCK SrcLock;
710 PGMPAGEMAPLOCK DstLock;
711 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
712 const void * pvSrc;
713 void * pvDst;
714 int rc = VINF_SUCCESS;
715 uint32_t cbTransfer = pTransfer->cbTransferSize;
716 uint32_t cbTransfered = 0;
717 bool bSrcLocked = false;
718 bool bDstLocked = false;
719 do
720 {
721 uint32_t cbSubTransfer = cbTransfer;
722 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
723 {
724 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
725 }
726 else
727 {
728 RTGCPHYS phPage = pTransfer->Src.phBuf;
729 phPage += cbTransfered;
730 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
731 AssertRC(rc);
732 if (RT_SUCCESS(rc))
733 {
734 bSrcLocked = true;
735 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
736 }
737 else
738 {
739 break;
740 }
741 }
742
743 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
744 {
745 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
746 }
747 else
748 {
749 RTGCPHYS phPage = pTransfer->Dst.phBuf;
750 phPage += cbTransfered;
751 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
752 AssertRC(rc);
753 if (RT_SUCCESS(rc))
754 {
755 bDstLocked = true;
756 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
757 }
758 else
759 {
760 break;
761 }
762 }
763
764 if (RT_SUCCESS(rc))
765 {
766 memcpy(pvDst, pvSrc, cbSubTransfer);
767 cbTransfer -= cbSubTransfer;
768 cbTransfered += cbSubTransfer;
769 }
770 else
771 {
772 cbTransfer = 0; /* to break */
773 }
774
775 if (bSrcLocked)
776 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
777 if (bDstLocked)
778 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
779 } while (cbTransfer);
780
781 if (RT_SUCCESS(rc))
782 return sizeof (*pTransfer);
783 return rc;
784}
785
786static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
787{
788 do
789 {
790 Assert(pvBuffer);
791 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
792
793 if (!pvBuffer)
794 return VERR_INVALID_PARAMETER;
795 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
796 return VERR_INVALID_PARAMETER;
797
798 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
799 uint32_t cbCmd = 0;
800 switch (pCmd->enmType)
801 {
802 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
803 {
804#ifdef VBOXWDDM_TEST_UHGSMI
805 static int count = 0;
806 static uint64_t start, end;
807 if (count==0)
808 {
809 start = RTTimeNanoTS();
810 }
811 ++count;
812 if (count==100000)
813 {
814 end = RTTimeNanoTS();
815 float ems = (end-start)/1000000.f;
816 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
817 }
818#endif
819 /* todo: post the buffer to chromium */
820 return VINF_SUCCESS;
821 }
822 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
823 {
824 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
825 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
826 Assert(cbBlt >= 0);
827 Assert((uint32_t)cbBlt <= cbBuffer);
828 if (cbBlt >= 0)
829 {
830 if ((uint32_t)cbBlt == cbBuffer)
831 return VINF_SUCCESS;
832 else
833 {
834 cbBuffer -= (uint32_t)cbBlt;
835 pvBuffer -= cbBlt;
836 }
837 }
838 else
839 return cbBlt; /* error */
840 break;
841 }
842 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
843 {
844 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
845 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
846 Assert(cbTransfer >= 0);
847 Assert((uint32_t)cbTransfer <= cbBuffer);
848 if (cbTransfer >= 0)
849 {
850 if ((uint32_t)cbTransfer == cbBuffer)
851 return VINF_SUCCESS;
852 else
853 {
854 cbBuffer -= (uint32_t)cbTransfer;
855 pvBuffer -= cbTransfer;
856 }
857 }
858 else
859 return cbTransfer; /* error */
860 break;
861 }
862 case VBOXVDMACMD_TYPE_DMA_NOP:
863 return VINF_SUCCESS;
864 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
865 return VINF_SUCCESS;
866 default:
867 AssertBreakpoint();
868 return VERR_INVALID_FUNCTION;
869 }
870 } while (1);
871
872 /* we should not be here */
873 AssertBreakpoint();
874 return VERR_INVALID_STATE;
875}
876
877#ifdef VBOX_VDMA_WITH_WORKERTHREAD
878
879int vboxVDMAPipeConstruct(PVBOXVDMAPIPE pPipe)
880{
881 int rc = RTSemEventCreate(&pPipe->hEvent);
882 AssertRC(rc);
883 if (RT_SUCCESS(rc))
884 {
885 rc = RTCritSectInit(&pPipe->hCritSect);
886 AssertRC(rc);
887 if (RT_SUCCESS(rc))
888 {
889 pPipe->enmState = VBOXVDMAPIPE_STATE_CREATED;
890 pPipe->bNeedNotify = true;
891 return VINF_SUCCESS;
892// RTCritSectDelete(pPipe->hCritSect);
893 }
894 RTSemEventDestroy(pPipe->hEvent);
895 }
896 return rc;
897}
898
899int vboxVDMAPipeOpenServer(PVBOXVDMAPIPE pPipe)
900{
901 int rc = RTCritSectEnter(&pPipe->hCritSect);
902 AssertRC(rc);
903 if (RT_SUCCESS(rc))
904 {
905 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CREATED);
906 switch (pPipe->enmState)
907 {
908 case VBOXVDMAPIPE_STATE_CREATED:
909 pPipe->enmState = VBOXVDMAPIPE_STATE_OPENNED;
910 pPipe->bNeedNotify = false;
911 rc = VINF_SUCCESS;
912 break;
913 case VBOXVDMAPIPE_STATE_OPENNED:
914 pPipe->bNeedNotify = false;
915 rc = VINF_ALREADY_INITIALIZED;
916 break;
917 default:
918 AssertBreakpoint();
919 rc = VERR_INVALID_STATE;
920 break;
921 }
922
923 RTCritSectLeave(&pPipe->hCritSect);
924 }
925 return rc;
926}
927
928int vboxVDMAPipeCloseServer(PVBOXVDMAPIPE pPipe)
929{
930 int rc = RTCritSectEnter(&pPipe->hCritSect);
931 AssertRC(rc);
932 if (RT_SUCCESS(rc))
933 {
934 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED
935 || pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSING);
936 switch (pPipe->enmState)
937 {
938 case VBOXVDMAPIPE_STATE_CLOSING:
939 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSED;
940 rc = VINF_SUCCESS;
941 break;
942 case VBOXVDMAPIPE_STATE_CLOSED:
943 rc = VINF_ALREADY_INITIALIZED;
944 break;
945 default:
946 AssertBreakpoint();
947 rc = VERR_INVALID_STATE;
948 break;
949 }
950
951 RTCritSectLeave(&pPipe->hCritSect);
952 }
953 return rc;
954}
955
956int vboxVDMAPipeCloseClient(PVBOXVDMAPIPE pPipe)
957{
958 int rc = RTCritSectEnter(&pPipe->hCritSect);
959 AssertRC(rc);
960 if (RT_SUCCESS(rc))
961 {
962 bool bNeedNotify = false;
963 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED
964 || pPipe->enmState == VBOXVDMAPIPE_STATE_CREATED
965 || pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED);
966 switch (pPipe->enmState)
967 {
968 case VBOXVDMAPIPE_STATE_OPENNED:
969 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSING;
970 bNeedNotify = pPipe->bNeedNotify;
971 pPipe->bNeedNotify = false;
972 break;
973 case VBOXVDMAPIPE_STATE_CREATED:
974 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSED;
975 pPipe->bNeedNotify = false;
976 break;
977 case VBOXVDMAPIPE_STATE_CLOSED:
978 rc = VINF_ALREADY_INITIALIZED;
979 break;
980 default:
981 AssertBreakpoint();
982 rc = VERR_INVALID_STATE;
983 break;
984 }
985
986 RTCritSectLeave(&pPipe->hCritSect);
987
988 if (bNeedNotify)
989 {
990 rc = RTSemEventSignal(pPipe->hEvent);
991 AssertRC(rc);
992 }
993 }
994 return rc;
995}
996
997
998typedef DECLCALLBACK(bool) FNHVBOXVDMARWCB(PVBOXVDMAPIPE pPipe, void *pvCallback);
999typedef FNHVBOXVDMARWCB *PFNHVBOXVDMARWCB;
1000
1001int vboxVDMAPipeModifyServer(PVBOXVDMAPIPE pPipe, PFNHVBOXVDMARWCB pfnCallback, void * pvCallback)
1002{
1003 int rc = RTCritSectEnter(&pPipe->hCritSect);
1004 AssertRC(rc);
1005 if (RT_SUCCESS(rc))
1006 {
1007 do
1008 {
1009 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED
1010 || pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSING);
1011
1012 if (pPipe->enmState >= VBOXVDMAPIPE_STATE_OPENNED)
1013 {
1014 bool bProcessing = pfnCallback(pPipe, pvCallback);
1015 pPipe->bNeedNotify = !bProcessing;
1016 if (bProcessing)
1017 {
1018 RTCritSectLeave(&pPipe->hCritSect);
1019 rc = VINF_SUCCESS;
1020 break;
1021 }
1022 else if (pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSING)
1023 {
1024 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSED;
1025 RTCritSectLeave(&pPipe->hCritSect);
1026 rc = VINF_EOF;
1027 break;
1028 }
1029 }
1030 else
1031 {
1032 AssertBreakpoint();
1033 rc = VERR_INVALID_STATE;
1034 RTCritSectLeave(&pPipe->hCritSect);
1035 break;
1036 }
1037
1038 RTCritSectLeave(&pPipe->hCritSect);
1039
1040 rc = RTSemEventWait(pPipe->hEvent, RT_INDEFINITE_WAIT);
1041 AssertRC(rc);
1042 if (!RT_SUCCESS(rc))
1043 break;
1044
1045 rc = RTCritSectEnter(&pPipe->hCritSect);
1046 AssertRC(rc);
1047 if (!RT_SUCCESS(rc))
1048 break;
1049 } while (1);
1050 }
1051
1052 return rc;
1053}
1054
1055int vboxVDMAPipeModifyClient(PVBOXVDMAPIPE pPipe, PFNHVBOXVDMARWCB pfnCallback, void * pvCallback)
1056{
1057 int rc = RTCritSectEnter(&pPipe->hCritSect);
1058 AssertRC(rc);
1059 if (RT_SUCCESS(rc))
1060 {
1061 bool bNeedNotify = false;
1062 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED);
1063 if (pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED)
1064 {
1065 bool bModified = pfnCallback(pPipe, pvCallback);
1066 if (bModified)
1067 {
1068 bNeedNotify = pPipe->bNeedNotify;
1069 pPipe->bNeedNotify = false;
1070 }
1071 }
1072 else
1073 rc = VERR_INVALID_STATE;
1074
1075 RTCritSectLeave(&pPipe->hCritSect);
1076
1077 if (bNeedNotify)
1078 {
1079 rc = RTSemEventSignal(pPipe->hEvent);
1080 AssertRC(rc);
1081 }
1082 }
1083 return rc;
1084}
1085
1086int vboxVDMAPipeDestruct(PVBOXVDMAPIPE pPipe)
1087{
1088 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED
1089 || pPipe->enmState == VBOXVDMAPIPE_STATE_CREATED);
1090 /* ensure the pipe is closed */
1091 vboxVDMAPipeCloseClient(pPipe);
1092
1093 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED);
1094
1095 if (pPipe->enmState != VBOXVDMAPIPE_STATE_CLOSED)
1096 return VERR_INVALID_STATE;
1097
1098 int rc = RTCritSectDelete(&pPipe->hCritSect);
1099 AssertRC(rc);
1100
1101 rc = RTSemEventDestroy(pPipe->hEvent);
1102 AssertRC(rc);
1103
1104 return VINF_SUCCESS;
1105}
1106#endif
1107
1108static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
1109{
1110 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1111 const uint8_t * pvBuf;
1112 PGMPAGEMAPLOCK Lock;
1113 int rc;
1114 bool bReleaseLocked = false;
1115
1116 do
1117 {
1118 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
1119
1120 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
1121 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
1122 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
1123 {
1124 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1125 pvBuf = pvRam + pCmd->Location.offVramBuf;
1126 }
1127 else
1128 {
1129 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
1130 uint32_t offset = pCmd->Location.phBuf & 0xfff;
1131 Assert(offset + pCmd->cbBuf <= 0x1000);
1132 if (offset + pCmd->cbBuf > 0x1000)
1133 {
1134 /* @todo: more advanced mechanism of command buffer proc is actually needed */
1135 rc = VERR_INVALID_PARAMETER;
1136 break;
1137 }
1138
1139 const void * pvPageBuf;
1140 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
1141 AssertRC(rc);
1142 if (!RT_SUCCESS(rc))
1143 {
1144 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
1145 break;
1146 }
1147
1148 pvBuf = (const uint8_t *)pvPageBuf;
1149 pvBuf += offset;
1150
1151 bReleaseLocked = true;
1152 }
1153
1154 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
1155 AssertRC(rc);
1156
1157 if (bReleaseLocked)
1158 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1159 } while (0);
1160
1161 pCmd->rc = rc;
1162
1163 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
1164 AssertRC(rc);
1165}
1166
1167static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
1168{
1169 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1170 pCmd->i32Result = VINF_SUCCESS;
1171 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
1172 AssertRC(rc);
1173}
1174
1175#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1176typedef struct
1177{
1178 struct VBOXVDMAHOST *pVdma;
1179 VBOXVDMAPIPE_CMD_BODY Cmd;
1180 bool bHasCmd;
1181} VBOXVDMACMD_PROCESS_CONTEXT, *PVBOXVDMACMD_PROCESS_CONTEXT;
1182
1183static DECLCALLBACK(bool) vboxVDMACommandProcessCb(PVBOXVDMAPIPE pPipe, void *pvCallback)
1184{
1185 PVBOXVDMACMD_PROCESS_CONTEXT pContext = (PVBOXVDMACMD_PROCESS_CONTEXT)pvCallback;
1186 struct VBOXVDMAHOST *pVdma = pContext->pVdma;
1187 HGSMILISTENTRY *pEntry = hgsmiListRemoveHead(&pVdma->PendingList);
1188 if (pEntry)
1189 {
1190 PVBOXVDMAPIPE_CMD pPipeCmd = VBOXVDMAPIPE_CMD_FROM_ENTRY(pEntry);
1191 Assert(pPipeCmd);
1192 pContext->Cmd = pPipeCmd->Cmd;
1193 hgsmiListPrepend(&pVdma->CmdPool.List, pEntry);
1194 pContext->bHasCmd = true;
1195 return true;
1196 }
1197
1198 pContext->bHasCmd = false;
1199 return false;
1200}
1201
1202static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
1203{
1204 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
1205 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1206 VBOXVDMACMD_PROCESS_CONTEXT Context;
1207 Context.pVdma = pVdma;
1208
1209 int rc = vboxVDMAPipeOpenServer(&pVdma->Pipe);
1210 AssertRC(rc);
1211 if (RT_SUCCESS(rc))
1212 {
1213 do
1214 {
1215 rc = vboxVDMAPipeModifyServer(&pVdma->Pipe, vboxVDMACommandProcessCb, &Context);
1216 AssertRC(rc);
1217 if (RT_SUCCESS(rc))
1218 {
1219 switch (Context.Cmd.enmType)
1220 {
1221 case VBOXVDMAPIPE_CMD_TYPE_DMACMD:
1222 {
1223 PVBOXVDMACBUF_DR pDr = Context.Cmd.u.pDr;
1224 vboxVDMACommandProcess(pVdma, pDr);
1225 break;
1226 }
1227 case VBOXVDMAPIPE_CMD_TYPE_DMACTL:
1228 {
1229 PVBOXVDMA_CTL pCtl = Context.Cmd.u.pCtl;
1230 vboxVDMAControlProcess(pVdma, pCtl);
1231 break;
1232 }
1233 default:
1234 AssertBreakpoint();
1235 break;
1236 }
1237
1238 if (rc == VINF_EOF)
1239 {
1240 rc = VINF_SUCCESS;
1241 break;
1242 }
1243 }
1244 else
1245 break;
1246 } while (1);
1247 }
1248
1249 /* always try to close the pipe to make sure the client side is notified */
1250 int tmpRc = vboxVDMAPipeCloseServer(&pVdma->Pipe);
1251 AssertRC(tmpRc);
1252 return rc;
1253}
1254#endif
1255
1256#ifdef VBOX_VDMA_WITH_WATCHDOG
1257static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
1258{
1259 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
1260 PVGASTATE pVGAState = pVdma->pVGAState;
1261 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
1262}
1263
1264static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
1265{
1266 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
1267 if (cMillis)
1268 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
1269 else
1270 TMTimerStop(pVdma->WatchDogTimer);
1271 return VINF_SUCCESS;
1272}
1273#endif
1274
1275int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
1276{
1277 int rc;
1278#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1279 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(RT_OFFSETOF(VBOXVDMAHOST, CmdPool.aCmds[cPipeElements]));
1280#else
1281 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
1282#endif
1283 Assert(pVdma);
1284 if (pVdma)
1285 {
1286 pVdma->pHgsmi = pVGAState->pHGSMI;
1287 pVdma->pVGAState = pVGAState;
1288
1289#ifdef VBOX_VDMA_WITH_WATCHDOG
1290 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
1291 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
1292 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
1293 AssertRC(rc);
1294#endif
1295#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1296 hgsmiListInit(&pVdma->PendingList);
1297 rc = vboxVDMAPipeConstruct(&pVdma->Pipe);
1298 AssertRC(rc);
1299 if (RT_SUCCESS(rc))
1300 {
1301 rc = RTThreadCreate(&pVdma->hWorkerThread, vboxVDMAWorkerThread, pVdma, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
1302 AssertRC(rc);
1303 if (RT_SUCCESS(rc))
1304 {
1305 hgsmiListInit(&pVdma->CmdPool.List);
1306 pVdma->CmdPool.cCmds = cPipeElements;
1307 for (uint32_t i = 0; i < cPipeElements; ++i)
1308 {
1309 hgsmiListAppend(&pVdma->CmdPool.List, &pVdma->CmdPool.aCmds[i].Entry);
1310 }
1311# if 0 //def VBOX_WITH_CRHGSMI
1312 int tmpRc = vboxVDMACrCtlHgsmiSetup(pVdma);
1313# endif
1314#endif
1315 pVGAState->pVdma = pVdma;
1316 VBoxVBVAExHSInit(&pVdma->CmdVbva);
1317#ifdef VBOX_WITH_CRHGSMI
1318 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
1319#endif
1320 return VINF_SUCCESS;
1321#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1322 }
1323
1324 int tmpRc = vboxVDMAPipeDestruct(&pVdma->Pipe);
1325 AssertRC(tmpRc);
1326 }
1327
1328 RTMemFree(pVdma);
1329#endif
1330 }
1331 else
1332 rc = VERR_OUT_OF_RESOURCES;
1333
1334 return rc;
1335}
1336
1337int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
1338{
1339#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1340 /* @todo: implement*/
1341 AssertBreakpoint();
1342#endif
1343 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
1344 RTMemFree(pVdma);
1345 return VINF_SUCCESS;
1346}
1347
1348#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1349typedef struct
1350{
1351 struct VBOXVDMAHOST *pVdma;
1352 VBOXVDMAPIPE_CMD_BODY Cmd;
1353 bool bQueued;
1354} VBOXVDMACMD_SUBMIT_CONTEXT, *PVBOXVDMACMD_SUBMIT_CONTEXT;
1355
1356DECLCALLBACK(bool) vboxVDMACommandSubmitCb(PVBOXVDMAPIPE pPipe, void *pvCallback)
1357{
1358 PVBOXVDMACMD_SUBMIT_CONTEXT pContext = (PVBOXVDMACMD_SUBMIT_CONTEXT)pvCallback;
1359 struct VBOXVDMAHOST *pVdma = pContext->pVdma;
1360 HGSMILISTENTRY *pEntry = hgsmiListRemoveHead(&pVdma->CmdPool.List);
1361 Assert(pEntry);
1362 if (pEntry)
1363 {
1364 PVBOXVDMAPIPE_CMD pPipeCmd = VBOXVDMAPIPE_CMD_FROM_ENTRY(pEntry);
1365 pPipeCmd->Cmd = pContext->Cmd;
1366 VBoxSHGSMICommandMarkAsynchCompletion(pContext->Cmd.u.pvCmd);
1367 pContext->bQueued = true;
1368 hgsmiListAppend(&pVdma->PendingList, pEntry);
1369 return true;
1370 }
1371
1372 /* @todo: should we try to flush some commands here? */
1373 pContext->bQueued = false;
1374 return false;
1375}
1376#endif
1377
1378int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
1379{
1380#ifdef VBOX_WITH_CRHGSMI
1381 PVGASTATE pVGAState = pVdma->pVGAState;
1382 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
1383 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
1384 Assert(pCmd);
1385 if (pCmd)
1386 {
1387 int rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
1388 AssertRC(rc);
1389 if (RT_SUCCESS(rc))
1390 {
1391 rc = vboxVDMACrCtlGetRc(pCmd);
1392 }
1393 vboxVDMACrCtlRelease(pCmd);
1394 return rc;
1395 }
1396 return VERR_NO_MEMORY;
1397#else
1398 return VINF_SUCCESS;
1399#endif
1400}
1401
1402int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
1403{
1404#ifdef VBOX_WITH_CRHGSMI
1405 PVGASTATE pVGAState = pVdma->pVGAState;
1406 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
1407 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
1408 Assert(pCmd);
1409 if (pCmd)
1410 {
1411 int rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
1412 AssertRC(rc);
1413 if (RT_SUCCESS(rc))
1414 {
1415 rc = vboxVDMACrCtlGetRc(pCmd);
1416 }
1417 vboxVDMACrCtlRelease(pCmd);
1418 return rc;
1419 }
1420 return VERR_NO_MEMORY;
1421#else
1422 return VINF_SUCCESS;
1423#endif
1424}
1425
1426void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
1427{
1428#if 1
1429 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
1430
1431 switch (pCmd->enmCtl)
1432 {
1433 case VBOXVDMA_CTL_TYPE_ENABLE:
1434 pCmd->i32Result = VINF_SUCCESS;
1435 break;
1436 case VBOXVDMA_CTL_TYPE_DISABLE:
1437 pCmd->i32Result = VINF_SUCCESS;
1438 break;
1439 case VBOXVDMA_CTL_TYPE_FLUSH:
1440 pCmd->i32Result = VINF_SUCCESS;
1441 break;
1442#ifdef VBOX_VDMA_WITH_WATCHDOG
1443 case VBOXVDMA_CTL_TYPE_WATCHDOG:
1444 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
1445 break;
1446#endif
1447 default:
1448 AssertBreakpoint();
1449 pCmd->i32Result = VERR_NOT_SUPPORTED;
1450 }
1451
1452 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
1453 AssertRC(rc);
1454#else
1455 /* test asinch completion */
1456 VBOXVDMACMD_SUBMIT_CONTEXT Context;
1457 Context.pVdma = pVdma;
1458 Context.Cmd.enmType = VBOXVDMAPIPE_CMD_TYPE_DMACTL;
1459 Context.Cmd.u.pCtl = pCmd;
1460
1461 int rc = vboxVDMAPipeModifyClient(&pVdma->Pipe, vboxVDMACommandSubmitCb, &Context);
1462 AssertRC(rc);
1463 if (RT_SUCCESS(rc))
1464 {
1465 Assert(Context.bQueued);
1466 if (Context.bQueued)
1467 {
1468 /* success */
1469 return;
1470 }
1471 rc = VERR_OUT_OF_RESOURCES;
1472 }
1473
1474 /* failure */
1475 Assert(RT_FAILURE(rc));
1476 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
1477 pCmd->i32Result = rc;
1478 int tmpRc = VBoxSHGSMICommandComplete (pIns, pCmd);
1479 AssertRC(tmpRc);
1480
1481#endif
1482}
1483
1484void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
1485{
1486 int rc = VERR_NOT_IMPLEMENTED;
1487
1488#ifdef VBOX_WITH_CRHGSMI
1489 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
1490 * this is why we process them specially */
1491 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
1492 if (rc == VINF_SUCCESS)
1493 return;
1494
1495 if (RT_FAILURE(rc))
1496 {
1497 pCmd->rc = rc;
1498 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
1499 AssertRC(rc);
1500 return;
1501 }
1502#endif
1503
1504#ifndef VBOX_VDMA_WITH_WORKERTHREAD
1505 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
1506#else
1507
1508# ifdef DEBUG_misha
1509 Assert(0);
1510# endif
1511
1512 VBOXVDMACMD_SUBMIT_CONTEXT Context;
1513 Context.pVdma = pVdma;
1514 Context.Cmd.enmType = VBOXVDMAPIPE_CMD_TYPE_DMACMD;
1515 Context.Cmd.u.pDr = pCmd;
1516
1517 rc = vboxVDMAPipeModifyClient(&pVdma->Pipe, vboxVDMACommandSubmitCb, &Context);
1518 AssertRC(rc);
1519 if (RT_SUCCESS(rc))
1520 {
1521 Assert(Context.bQueued);
1522 if (Context.bQueued)
1523 {
1524 /* success */
1525 return;
1526 }
1527 rc = VERR_OUT_OF_RESOURCES;
1528 }
1529 /* failure */
1530 Assert(RT_FAILURE(rc));
1531 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
1532 pCmd->rc = rc;
1533 int tmpRc = VBoxSHGSMICommandComplete (pIns, pCmd);
1534 AssertRC(tmpRc);
1535#endif
1536}
1537
1538/**/
1539static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1540{
1541 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED);
1542
1543 uint32_t oldState;
1544 if (!ASMAtomicReadU32(&pCmdVbva->u32Pause))
1545 {
1546 if (ASMAtomicCmpXchgExU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING, &oldState))
1547 return VINF_SUCCESS;
1548 return oldState == VBVAEXHOSTCONTEXT_STATE_PROCESSING ? VERR_SEM_BUSY : VERR_INVALID_STATE;
1549 }
1550 return VERR_INVALID_STATE;
1551}
1552
1553static bool vboxVBVAExHPCheckPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1554{
1555 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1556
1557 if (!ASMAtomicReadU32(&pCmdVbva->u32Pause))
1558 return false;
1559
1560 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED);
1561 return true;
1562}
1563
1564static bool vboxVBVAExHPCheckOtherCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1565{
1566 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1567
1568 return !!ASMAtomicUoReadU32(&pCmdVbva->u32cOtherCommands);
1569}
1570
1571static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1572{
1573 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1574
1575 if (!vboxVBVAExHPCheckPause(pCmdVbva))
1576 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
1577 else
1578 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED);
1579}
1580
1581static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1582{
1583 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1584
1585 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
1586}
1587
1588static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1589{
1590 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1591
1592 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
1593}
1594
1595static bool vboxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1596{
1597 if (!pCmdVbva->cbCurData)
1598 return false;
1599
1600 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
1601 pVBVA->off32Data = (pVBVA->off32Data + pCmdVbva->cbCurData) % pVBVA->cbData;
1602
1603 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
1604
1605 pCmdVbva->cbCurData = 0;
1606
1607 return true;
1608}
1609
1610static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
1611{
1612 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1613
1614 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
1615
1616 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
1617 uint32_t indexRecordFree = pVBVA->indexRecordFree;
1618
1619 Log(("first = %d, free = %d\n",
1620 indexRecordFirst, indexRecordFree));
1621
1622 if (indexRecordFirst == indexRecordFree)
1623 {
1624 /* No records to process. Return without assigning output variables. */
1625 return VINF_EOF;
1626 }
1627
1628 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
1629
1630 /* A new record need to be processed. */
1631 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
1632 {
1633 /* the record is being recorded, try again */
1634 return VINF_TRY_AGAIN;
1635 }
1636
1637 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
1638
1639 if (!cbRecord)
1640 {
1641 /* the record is being recorded, try again */
1642 return VINF_TRY_AGAIN;
1643 }
1644
1645 /* we should not get partial commands here actually */
1646 Assert(cbRecord);
1647
1648 /* The size of largest contiguous chunk in the ring biffer. */
1649 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
1650
1651 /* The pointer to data in the ring buffer. */
1652 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
1653
1654 /* Fetch or point the data. */
1655 if (u32BytesTillBoundary >= cbRecord)
1656 {
1657 /* The command does not cross buffer boundary. Return address in the buffer. */
1658 *ppCmd = pSrc;
1659 *pcbCmd = cbRecord;
1660 pCmdVbva->cbCurData = cbRecord;
1661 return VINF_SUCCESS;
1662 }
1663
1664 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
1665 return VERR_INVALID_STATE;
1666}
1667
1668/* Resumes command processing
1669 * @returns - same as VBoxVBVAExHSCheckCommands
1670 */
1671static int vboxVBVAExHSResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1672{
1673 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED);
1674
1675 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
1676
1677 return VBoxVBVAExHSCheckCommands(pCmdVbva);
1678}
1679
1680/* pause the command processing. this will make the processor stop the command processing and release the processing state
1681 * to resume the command processing the vboxVBVAExHSResume must be called */
1682static void vboxVBVAExHSPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1683{
1684 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED);
1685
1686 Assert(!pCmdVbva->u32Pause);
1687
1688 ASMAtomicWriteU32(&pCmdVbva->u32Pause, 1);
1689
1690 for(;;)
1691 {
1692 if (ASMAtomicCmpXchgU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED, VBVAEXHOSTCONTEXT_STATE_LISTENING))
1693 break;
1694
1695 if (ASMAtomicReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_PAUSED)
1696 break;
1697
1698 RTThreadSleep(2);
1699 }
1700
1701 pCmdVbva->u32Pause = 0;
1702}
1703
1704/* releases (completed) the command previously acquired by VBoxVBVAExHCmdGet
1705 * for convenience can be called if no command is currently acquired
1706 * in that case it will do nothing and return false.
1707 * if the completion notification is needed returns true. */
1708static bool VBoxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1709{
1710 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1711
1712 return vboxVBVAExHPCmdCheckRelease(pCmdVbva);
1713}
1714
1715/*
1716 * @returns
1717 * VINF_SUCCESS - new command is obtained
1718 * VINF_EOF - processor has completed all commands and release the processing state, only VBoxVBVAExHS*** calls are now allowed
1719 * VINF_PERMISSION_DENIED - processing was paused, processing state was released, only VBoxVBVAExHS*** calls are now allowed
1720 * VINF_INTERRUPTED - command processing was interrupted, processor state remains set. client can process any commands,
1721 * and call VBoxVBVAExHPCmdGet again for further processing
1722 * VERR_** - error happened, most likely guest corrupted VBVA data
1723 *
1724 */
1725static int VBoxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
1726{
1727 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1728
1729 for(;;)
1730 {
1731 if (vboxVBVAExHPCheckPause(pCmdVbva))
1732 return VINF_PERMISSION_DENIED;
1733 if (vboxVBVAExHPCheckOtherCommands(pCmdVbva))
1734 return VINF_INTERRUPTED;
1735
1736 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
1737 switch (rc)
1738 {
1739 case VINF_SUCCESS:
1740 return VINF_SUCCESS;
1741 case VINF_EOF:
1742 vboxVBVAExHPHgEventClear(pCmdVbva);
1743 vboxVBVAExHPProcessorRelease(pCmdVbva);
1744 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
1745 * 1. we check the queue -> and it is empty
1746 * 2. submitter adds command to the queue
1747 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
1748 * 4. we clear the "processing" state
1749 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
1750 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
1751 **/
1752 if (VBoxVBVAExHSCheckCommands(pCmdVbva) == VINF_SUCCESS)
1753 continue;
1754 return VINF_EOF;
1755 case VINF_TRY_AGAIN:
1756 RTThreadSleep(1);
1757 continue;
1758 default:
1759 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
1760 if (RT_FAILURE(rc))
1761 return rc;
1762
1763 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected success status %d\n", rc));
1764 return VERR_INTERNAL_ERROR;
1765 }
1766 }
1767
1768 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
1769 return VERR_INTERNAL_ERROR;
1770}
1771
1772/* Checks whether the new commands are ready for processing
1773 * @returns
1774 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
1775 * VINF_EOF - no commands in a queue
1776 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
1777 * VERR_INVALID_STATE - the VBVA is paused or pausing */
1778static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1779{
1780 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED)
1781 return VINF_EOF;
1782
1783 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
1784 if (RT_SUCCESS(rc))
1785 {
1786 /* we are the processor now */
1787 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
1788
1789 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
1790 uint32_t indexRecordFree = pVBVA->indexRecordFree;
1791
1792 if (indexRecordFirst != indexRecordFree)
1793 {
1794 vboxVBVAExHPHgEventSet(pCmdVbva);
1795 return VINF_SUCCESS;
1796 }
1797
1798 vboxVBVAExHPProcessorRelease(pCmdVbva);
1799 return VINF_EOF;
1800 }
1801 if (rc == VERR_SEM_BUSY)
1802 return VINF_ALREADY_INITIALIZED;
1803 Assert(rc == VERR_INVALID_STATE);
1804 return VERR_INVALID_STATE;
1805}
1806
1807static void VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1808{
1809 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
1810}
1811
1812static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
1813{
1814 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) != VBVAEXHOSTCONTEXT_STATE_STOPPED)
1815 return VINF_ALREADY_INITIALIZED;
1816
1817 pCmdVbva->pVBVA = pVBVA;
1818 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
1819 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
1820 return VINF_SUCCESS;
1821}
1822
1823static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1824{
1825 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED)
1826 return VINF_SUCCESS;
1827
1828 /* ensure no commands pending and one tries to submit them */
1829 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
1830 if (RT_SUCCESS(rc))
1831 {
1832 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
1833 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
1834 return VINF_SUCCESS;
1835 }
1836 return VERR_INVALID_STATE;
1837}
1838
1839static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1840{
1841 /* ensure the processor is stopped */
1842 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED)
1843 return;
1844
1845 /* ensure no one tries to submit the command */
1846 vboxVBVAExHSPause(pCmdVbva);
1847 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
1848 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
1849}
1850
1851/* Saves state
1852 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
1853 */
1854static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
1855{
1856 int rc;
1857 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) != VBVAEXHOSTCONTEXT_STATE_STOPPED)
1858 {
1859 vboxVBVAExHSPause(pCmdVbva);
1860 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pCmdVbva->pVBVA) - pu8VramBase));
1861 AssertRCReturn(rc, rc);
1862 return vboxVBVAExHSResume(pCmdVbva);
1863 }
1864
1865 rc = SSMR3PutU32(pSSM, 0xffffffff);
1866 AssertRCReturn(rc, rc);
1867
1868 return VINF_EOF;
1869}
1870
1871/* Loads state
1872 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
1873 */
1874static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
1875{
1876 uint32_t u32;
1877 int rc = SSMR3GetU32(pSSM, &u32);
1878 AssertRCReturn(rc, rc);
1879 if (u32 != 0xffffffff)
1880 {
1881 VBVABUFFER *pVBVA = (VBVABUFFER*)pu8VramBase + u32;
1882 rc = VBoxVBVAExHSEnable(pCmdVbva, pVBVA);
1883 AssertRCReturn(rc, rc);
1884 return VBoxVBVAExHSCheckCommands(pCmdVbva);
1885 }
1886
1887 return VINF_EOF;
1888}
1889
1890int vboxCmdVBVAEnable(PVGASTATE pVGAState, VBVABUFFER *pVBVA)
1891{
1892 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
1893 return VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1894}
1895
1896int vboxCmdVBVADisable(PVGASTATE pVGAState)
1897{
1898 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
1899 return VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1900}
1901
1902static int vboxCmdVBVACmdSubmitPerform(PVGASTATE pVGAState)
1903{
1904 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
1905 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
1906 switch (rc)
1907 {
1908 case VINF_SUCCESS:
1909 return pVGAState->pDrv->pfnCrCmdNotifyCmds(pVGAState->pDrv);
1910 case VINF_ALREADY_INITIALIZED:
1911 case VINF_EOF:
1912 case VERR_INVALID_STATE:
1913 return VINF_SUCCESS;
1914 default:
1915 Assert(!RT_FAILURE(rc));
1916 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
1917 }
1918}
1919
1920int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
1921{
1922 return vboxCmdVBVACmdSubmitPerform(pVGAState);
1923}
1924
1925int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
1926{
1927 return vboxCmdVBVACmdSubmitPerform(pVGAState);
1928}
1929
1930void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
1931{
1932 vboxCmdVBVACmdSubmitPerform(pVGAState);
1933}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette