VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 50625

Last change on this file since 50625 was 50550, checked in by vboxsync, 11 years ago

DevVGA/HGSMI: use RTList

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 61.1 KB
Line 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16//#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23
24#include "DevVGA.h"
25#include "HGSMI/SHGSMIHost.h"
26
27#include <VBox/VBoxVideo3D.h>
28
29#ifdef DEBUG_misha
30#define WARN_BP() do { AssertFailed(); } while (0)
31#else
32#define WARN_BP() do { } while (0)
33#endif
34#define WARN(_msg) do { \
35 LogRel(_msg); \
36 WARN_BP(); \
37 } while (0)
38
39#ifdef VBOX_VDMA_WITH_WORKERTHREAD
40typedef enum
41{
42 VBOXVDMAPIPE_STATE_CLOSED = 0,
43 VBOXVDMAPIPE_STATE_CREATED = 1,
44 VBOXVDMAPIPE_STATE_OPENNED = 2,
45 VBOXVDMAPIPE_STATE_CLOSING = 3
46} VBOXVDMAPIPE_STATE;
47
48typedef struct VBOXVDMAPIPE
49{
50 RTSEMEVENT hEvent;
51 /* critical section for accessing pipe properties */
52 RTCRITSECT hCritSect;
53 VBOXVDMAPIPE_STATE enmState;
54 /* true iff the other end needs Event notification */
55 bool bNeedNotify;
56} VBOXVDMAPIPE, *PVBOXVDMAPIPE;
57
58typedef enum
59{
60 VBOXVDMAPIPE_CMD_TYPE_UNDEFINED = 0,
61 VBOXVDMAPIPE_CMD_TYPE_DMACMD = 1,
62 VBOXVDMAPIPE_CMD_TYPE_DMACTL = 2
63} VBOXVDMAPIPE_CMD_TYPE;
64
65typedef struct VBOXVDMAPIPE_CMD_BODY
66{
67 VBOXVDMAPIPE_CMD_TYPE enmType;
68 union
69 {
70 PVBOXVDMACBUF_DR pDr;
71 PVBOXVDMA_CTL pCtl;
72 void *pvCmd;
73 } u;
74}VBOXVDMAPIPE_CMD_BODY, *PVBOXVDMAPIPE_CMD_BODY;
75
76typedef struct VBOXVDMAPIPE_CMD
77{
78 HGSMILISTENTRY Entry;
79 VBOXVDMAPIPE_CMD_BODY Cmd;
80} VBOXVDMAPIPE_CMD, *PVBOXVDMAPIPE_CMD;
81
82#define VBOXVDMAPIPE_CMD_FROM_ENTRY(_pE) ( (PVBOXVDMAPIPE_CMD)((uint8_t *)(_pE) - RT_OFFSETOF(VBOXVDMAPIPE_CMD, Entry)) )
83
84typedef struct VBOXVDMAPIPE_CMD_POOL
85{
86 HGSMILIST List;
87 uint32_t cCmds;
88 VBOXVDMAPIPE_CMD aCmds[1];
89} VBOXVDMAPIPE_CMD_POOL, *PVBOXVDMAPIPE_CMD_POOL;
90#endif
91
92
93/* state transformations:
94 *
95 * submitter | processor
96 * STOPPED
97 * |
98 * |
99 * >
100 * LISTENING ---> PROCESSING
101 * ^ _/
102 * | _/
103 * | _/
104 * | _/
105 * | _/
106 * | _/
107 * | /
108 * < >
109 * PAUSED
110 *
111 * */
112#define VBVAEXHOSTCONTEXT_STATE_STOPPED 0
113#define VBVAEXHOSTCONTEXT_STATE_LISTENING 1
114#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 2
115#define VBVAEXHOSTCONTEXT_STATE_PAUSED 3
116
117typedef struct VBVAEXHOSTCONTEXT
118{
119 VBVABUFFER *pVBVA;
120 uint32_t cbCurData;
121 volatile uint32_t u32State;
122 volatile uint32_t u32Pause;
123 volatile uint32_t u32cOtherCommands;
124} VBVAEXHOSTCONTEXT;
125
126/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
127 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
128 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
129 * see mor edetailed comments in headers for function definitions */
130static bool VBoxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva);
131static int VBoxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
132
133/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
134 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
135static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
136
137static void VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
138static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
139static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
140static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
141static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
142static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
143
144typedef struct VBOXVDMAHOST
145{
146 PHGSMIINSTANCE pHgsmi;
147 PVGASTATE pVGAState;
148 VBVAEXHOSTCONTEXT CmdVbva;
149#ifdef VBOX_VDMA_WITH_WATCHDOG
150 PTMTIMERR3 WatchDogTimer;
151#endif
152#ifdef VBOX_VDMA_WITH_WORKERTHREAD
153 VBOXVDMAPIPE Pipe;
154 HGSMILIST PendingList;
155 RTTHREAD hWorkerThread;
156 VBOXVDMAPIPE_CMD_POOL CmdPool;
157#endif
158} VBOXVDMAHOST, *PVBOXVDMAHOST;
159
160
161#ifdef VBOX_WITH_CRHGSMI
162
163typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
164typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
165
166typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
167{
168 uint32_t cRefs;
169 int32_t rc;
170 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
171 void *pvCompletion;
172 VBOXVDMACMD_CHROMIUM_CTL Cmd;
173} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
174
175#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
176
177static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
178{
179 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
180 Assert(pHdr);
181 if (pHdr)
182 {
183 pHdr->cRefs = 1;
184 pHdr->rc = VERR_NOT_IMPLEMENTED;
185 pHdr->Cmd.enmType = enmCmd;
186 pHdr->Cmd.cbCmd = cbCmd;
187 return &pHdr->Cmd;
188 }
189
190 return NULL;
191}
192
193DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
194{
195 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
196 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
197 if(!cRefs)
198 {
199 RTMemFree(pHdr);
200 }
201}
202
203DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
204{
205 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
206 ASMAtomicIncU32(&pHdr->cRefs);
207}
208
209DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
210{
211 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
212 return pHdr->rc;
213}
214
215static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
216{
217 RTSemEventSignal((RTSEMEVENT)pvContext);
218}
219
220static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
221{
222 vboxVDMACrCtlRelease(pCmd);
223}
224
225
226static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
227{
228 if ( pVGAState->pDrv
229 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
230 {
231 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
232 pHdr->pfnCompletion = pfnCompletion;
233 pHdr->pvCompletion = pvCompletion;
234 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
235 return VINF_SUCCESS;
236 }
237#ifdef DEBUG_misha
238 Assert(0);
239#endif
240 return VERR_NOT_SUPPORTED;
241}
242
243static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
244{
245 RTSEMEVENT hComplEvent;
246 int rc = RTSemEventCreate(&hComplEvent);
247 AssertRC(rc);
248 if(RT_SUCCESS(rc))
249 {
250 rc = vboxVDMACrCtlPostAsync (pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
251#ifdef DEBUG_misha
252 AssertRC(rc);
253#endif
254 if (RT_SUCCESS(rc))
255 {
256 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
257 AssertRC(rc);
258 if(RT_SUCCESS(rc))
259 {
260 RTSemEventDestroy(hComplEvent);
261 }
262 }
263 else
264 {
265 /* the command is completed */
266 RTSemEventDestroy(hComplEvent);
267 }
268 }
269 return rc;
270}
271
272static void vboxVDMACrCmdNotifyPerform(struct VBOXVDMAHOST *pVdma)
273{
274 PVGASTATE pVGAState = pVdma->pVGAState;
275 pVGAState->pDrv->pfnCrCmdNotifyCmds(pVGAState->pDrv);
276}
277
278/*
279 * @returns
280 *
281 */
282static int vboxVDMACrCmdPreprocess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
283{
284 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
285 return VINF_EOF;
286
287 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
288
289 /* check if the command is cancelled */
290 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
291 {
292 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
293 return VINF_EOF;
294 }
295
296 /* come commands can be handled right away? */
297 switch (pCmd->u8OpCode)
298 {
299 case VBOXCMDVBVA_OPTYPE_NOPCMD:
300 pCmd->i8Result = 0;
301 return VINF_EOF;
302 default:
303 return VINF_SUCCESS;
304 }
305}
306
307static DECLCALLBACK(int) vboxVDMACrCmdCltCmdGet(HVBOXCRCMDCLT hClt, PVBOXCMDVBVA_HDR *ppNextCmd, uint32_t *pcbNextCmd)
308{
309 struct VBOXVDMAHOST *pVdma = hClt;
310
311 VBoxVBVAExHPCmdCheckRelease(&pVdma->CmdVbva);
312
313 uint32_t cbCmd;
314 uint8_t *pu8Cmd;
315
316 for(;;)
317 {
318 int rc = VBoxVBVAExHPCmdGet(&pVdma->CmdVbva, &pu8Cmd, &cbCmd);
319 switch (rc)
320 {
321 case VINF_SUCCESS:
322 {
323 rc = vboxVDMACrCmdPreprocess(pVdma, pu8Cmd, cbCmd);
324 switch (rc)
325 {
326 case VINF_SUCCESS:
327 *ppNextCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
328 *pcbNextCmd = cbCmd;
329 return VINF_SUCCESS;
330 case VINF_EOF:
331 continue;
332 default:
333 Assert(!RT_FAILURE(rc));
334 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
335 }
336 break;
337 }
338 case VINF_EOF:
339 return VINF_EOF;
340 case VINF_PERMISSION_DENIED:
341 /* processing was paused, processing state was released, only VBoxVBVAExHS*** calls are now allowed */
342 return VINF_EOF;
343 case VINF_INTERRUPTED:
344 /* command processing was interrupted, processor state remains set. client can process any commands */
345 vboxVDMACrCmdNotifyPerform(pVdma);
346 return VINF_EOF;
347 default:
348 Assert(!RT_FAILURE(rc));
349 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
350 }
351 }
352
353 WARN(("Warning: vboxVDMACrCmdCltCmdGet unexpected state\n"));
354 return VERR_INTERNAL_ERROR;
355}
356
357static DECLCALLBACK(int) vboxVDMACrCmdCltDmGet(HVBOXCRCMDCLT hClt, uint32_t idScreen, struct VBVAINFOSCREEN *pScreen, void **ppvVram)
358{
359 struct VBOXVDMAHOST *pVdma = hClt;
360 PVGASTATE pVGAState = pVdma->pVGAState;
361
362 return VBVAGetScreenInfo(pVGAState, idScreen, pScreen, ppvVram);
363}
364
365static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
366{
367 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd;
368 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP) vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP,
369 sizeof (*pCmd));
370 if (pCmd)
371 {
372 VBOXCRCMD_CLTINFO CltInfo;
373 CltInfo.hClient = pVdma;
374 CltInfo.pfnCmdGet = vboxVDMACrCmdCltCmdGet;
375 CltInfo.pfnDmGet = vboxVDMACrCmdCltDmGet;
376 PVGASTATE pVGAState = pVdma->pVGAState;
377 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
378 pCmd->cbVRam = pVGAState->vram_size;
379 pCmd->pCrCmdClientInfo = &CltInfo;
380 int rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
381 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
382 if (RT_SUCCESS(rc))
383 {
384 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
385 }
386 vboxVDMACrCtlRelease(&pCmd->Hdr);
387 return rc;
388 }
389 return VERR_NO_MEMORY;
390}
391
392static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
393
394/* check if this is external cmd to be passed to chromium backend */
395static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
396{
397 PVBOXVDMACMD pDmaCmd = NULL;
398 uint32_t cbDmaCmd = 0;
399 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
400 int rc = VINF_NOT_SUPPORTED;
401
402 cbDmaCmd = pCmdDr->cbBuf;
403
404 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
405 {
406 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
407 {
408 AssertMsgFailed(("invalid buffer data!"));
409 return VERR_INVALID_PARAMETER;
410 }
411
412 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
413 {
414 AssertMsgFailed(("invalid command buffer data!"));
415 return VERR_INVALID_PARAMETER;
416 }
417
418 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
419 }
420 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
421 {
422 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
423 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
424 {
425 AssertMsgFailed(("invalid command buffer data from offset!"));
426 return VERR_INVALID_PARAMETER;
427 }
428 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
429 }
430
431 if (pDmaCmd)
432 {
433 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
434 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
435
436 switch (pDmaCmd->enmType)
437 {
438 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
439 {
440 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
441 if (cbBody < sizeof (*pCrCmd))
442 {
443 AssertMsgFailed(("invalid chromium command buffer size!"));
444 return VERR_INVALID_PARAMETER;
445 }
446 PVGASTATE pVGAState = pVdma->pVGAState;
447 rc = VINF_SUCCESS;
448 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
449 {
450 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
451 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
452 break;
453 }
454 else
455 {
456 Assert(0);
457 }
458
459 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
460 AssertRC(tmpRc);
461 break;
462 }
463 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
464 {
465 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
466 if (cbBody < sizeof (*pTransfer))
467 {
468 AssertMsgFailed(("invalid bpb transfer buffer size!"));
469 return VERR_INVALID_PARAMETER;
470 }
471
472 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
473 AssertRC(rc);
474 if (RT_SUCCESS(rc))
475 {
476 pCmdDr->rc = VINF_SUCCESS;
477 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
478 AssertRC(rc);
479 rc = VINF_SUCCESS;
480 }
481 break;
482 }
483 default:
484 break;
485 }
486 }
487 return rc;
488}
489
490int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
491{
492 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
493 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
494 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
495 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
496 AssertRC(rc);
497 pDr->rc = rc;
498
499 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
500 rc = VBoxSHGSMICommandComplete(pIns, pDr);
501 AssertRC(rc);
502 return rc;
503}
504
505int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
506{
507 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
508 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
509 pCmdPrivate->rc = rc;
510 if (pCmdPrivate->pfnCompletion)
511 {
512 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
513 }
514 return VINF_SUCCESS;
515}
516
517#endif
518
519#ifdef VBOX_VDMA_WITH_WORKERTHREAD
520/* to simplify things and to avoid extra backend if modifications we assume the VBOXVDMA_RECTL is the same as VBVACMDHDR */
521AssertCompile(sizeof(VBOXVDMA_RECTL) == sizeof(VBVACMDHDR));
522AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, left) == RT_SIZEOFMEMB(VBVACMDHDR, x));
523AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, top) == RT_SIZEOFMEMB(VBVACMDHDR, y));
524AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, width) == RT_SIZEOFMEMB(VBVACMDHDR, w));
525AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, height) == RT_SIZEOFMEMB(VBVACMDHDR, h));
526AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, left) == RT_OFFSETOF(VBVACMDHDR, x));
527AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, top) == RT_OFFSETOF(VBVACMDHDR, y));
528AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, width) == RT_OFFSETOF(VBVACMDHDR, w));
529AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, height) == RT_OFFSETOF(VBVACMDHDR, h));
530
531static int vboxVDMANotifyPrimaryUpdate (PVGASTATE pVGAState, unsigned uScreenId, const VBOXVDMA_RECTL * pRectl)
532{
533 pVGAState->pDrv->pfnVBVAUpdateBegin (pVGAState->pDrv, uScreenId);
534
535 /* Updates the rectangle and sends the command to the VRDP server. */
536 pVGAState->pDrv->pfnVBVAUpdateProcess (pVGAState->pDrv, uScreenId,
537 (const PVBVACMDHDR)pRectl /* <- see above AssertCompile's and comments */,
538 sizeof (VBOXVDMA_RECTL));
539
540 pVGAState->pDrv->pfnVBVAUpdateEnd (pVGAState->pDrv, uScreenId, pRectl->left, pRectl->top,
541 pRectl->width, pRectl->height);
542
543 return VINF_SUCCESS;
544}
545#endif
546
547static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
548 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
549 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
550 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
551{
552 /* we do not support color conversion */
553 Assert(pDstDesc->format == pSrcDesc->format);
554 /* we do not support stretching */
555 Assert(pDstRectl->height == pSrcRectl->height);
556 Assert(pDstRectl->width == pSrcRectl->width);
557 if (pDstDesc->format != pSrcDesc->format)
558 return VERR_INVALID_FUNCTION;
559 if (pDstDesc->width == pDstRectl->width
560 && pSrcDesc->width == pSrcRectl->width
561 && pSrcDesc->width == pDstDesc->width)
562 {
563 Assert(!pDstRectl->left);
564 Assert(!pSrcRectl->left);
565 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
566 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
567 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
568 }
569 else
570 {
571 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
572 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
573 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
574 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
575 Assert(cbDstLine <= pDstDesc->pitch);
576 uint32_t cbDstSkip = pDstDesc->pitch;
577 uint8_t * pvDstStart = pvDstSurf + offDstStart;
578
579 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
580 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
581 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
582 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
583 Assert(cbSrcLine <= pSrcDesc->pitch);
584 uint32_t cbSrcSkip = pSrcDesc->pitch;
585 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
586
587 Assert(cbDstLine == cbSrcLine);
588
589 for (uint32_t i = 0; ; ++i)
590 {
591 memcpy (pvDstStart, pvSrcStart, cbDstLine);
592 if (i == pDstRectl->height)
593 break;
594 pvDstStart += cbDstSkip;
595 pvSrcStart += cbSrcSkip;
596 }
597 }
598 return VINF_SUCCESS;
599}
600
601static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
602{
603 if (!pRectl1->width)
604 *pRectl1 = *pRectl2;
605 else
606 {
607 int16_t x21 = pRectl1->left + pRectl1->width;
608 int16_t x22 = pRectl2->left + pRectl2->width;
609 if (pRectl1->left > pRectl2->left)
610 {
611 pRectl1->left = pRectl2->left;
612 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
613 }
614 else if (x21 < x22)
615 pRectl1->width = x22 - pRectl1->left;
616
617 x21 = pRectl1->top + pRectl1->height;
618 x22 = pRectl2->top + pRectl2->height;
619 if (pRectl1->top > pRectl2->top)
620 {
621 pRectl1->top = pRectl2->top;
622 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
623 }
624 else if (x21 < x22)
625 pRectl1->height = x22 - pRectl1->top;
626 }
627}
628
629/*
630 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
631 */
632static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
633{
634 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
635 Assert(cbBlt <= cbBuffer);
636 if (cbBuffer < cbBlt)
637 return VERR_INVALID_FUNCTION;
638
639 /* we do not support stretching for now */
640 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
641 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
642 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
643 return VERR_INVALID_FUNCTION;
644 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
645 return VERR_INVALID_FUNCTION;
646 Assert(pBlt->cDstSubRects);
647
648 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
649 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
650
651 if (pBlt->cDstSubRects)
652 {
653 VBOXVDMA_RECTL dstRectl, srcRectl;
654 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
655 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
656 {
657 pDstRectl = &pBlt->aDstSubRects[i];
658 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
659 {
660 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
661 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
662 dstRectl.width = pDstRectl->width;
663 dstRectl.height = pDstRectl->height;
664 pDstRectl = &dstRectl;
665 }
666
667 pSrcRectl = &pBlt->aDstSubRects[i];
668 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
669 {
670 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
671 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
672 srcRectl.width = pSrcRectl->width;
673 srcRectl.height = pSrcRectl->height;
674 pSrcRectl = &srcRectl;
675 }
676
677 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
678 &pBlt->dstDesc, &pBlt->srcDesc,
679 pDstRectl,
680 pSrcRectl);
681 AssertRC(rc);
682 if (!RT_SUCCESS(rc))
683 return rc;
684
685 vboxVDMARectlUnite(&updateRectl, pDstRectl);
686 }
687 }
688 else
689 {
690 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
691 &pBlt->dstDesc, &pBlt->srcDesc,
692 &pBlt->dstRectl,
693 &pBlt->srcRectl);
694 AssertRC(rc);
695 if (!RT_SUCCESS(rc))
696 return rc;
697
698 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
699 }
700
701#ifdef VBOX_VDMA_WITH_WORKERTHREAD
702 int iView = 0;
703 /* @todo: fixme: check if update is needed and get iView */
704 vboxVDMANotifyPrimaryUpdate (pVdma->pVGAState, iView, &updateRectl);
705#endif
706
707 return cbBlt;
708}
709
710static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
711{
712 if (cbBuffer < sizeof (*pTransfer))
713 return VERR_INVALID_PARAMETER;
714
715 PVGASTATE pVGAState = pVdma->pVGAState;
716 uint8_t * pvRam = pVGAState->vram_ptrR3;
717 PGMPAGEMAPLOCK SrcLock;
718 PGMPAGEMAPLOCK DstLock;
719 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
720 const void * pvSrc;
721 void * pvDst;
722 int rc = VINF_SUCCESS;
723 uint32_t cbTransfer = pTransfer->cbTransferSize;
724 uint32_t cbTransfered = 0;
725 bool bSrcLocked = false;
726 bool bDstLocked = false;
727 do
728 {
729 uint32_t cbSubTransfer = cbTransfer;
730 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
731 {
732 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
733 }
734 else
735 {
736 RTGCPHYS phPage = pTransfer->Src.phBuf;
737 phPage += cbTransfered;
738 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
739 AssertRC(rc);
740 if (RT_SUCCESS(rc))
741 {
742 bSrcLocked = true;
743 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
744 }
745 else
746 {
747 break;
748 }
749 }
750
751 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
752 {
753 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
754 }
755 else
756 {
757 RTGCPHYS phPage = pTransfer->Dst.phBuf;
758 phPage += cbTransfered;
759 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
760 AssertRC(rc);
761 if (RT_SUCCESS(rc))
762 {
763 bDstLocked = true;
764 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
765 }
766 else
767 {
768 break;
769 }
770 }
771
772 if (RT_SUCCESS(rc))
773 {
774 memcpy(pvDst, pvSrc, cbSubTransfer);
775 cbTransfer -= cbSubTransfer;
776 cbTransfered += cbSubTransfer;
777 }
778 else
779 {
780 cbTransfer = 0; /* to break */
781 }
782
783 if (bSrcLocked)
784 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
785 if (bDstLocked)
786 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
787 } while (cbTransfer);
788
789 if (RT_SUCCESS(rc))
790 return sizeof (*pTransfer);
791 return rc;
792}
793
794static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
795{
796 do
797 {
798 Assert(pvBuffer);
799 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
800
801 if (!pvBuffer)
802 return VERR_INVALID_PARAMETER;
803 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
804 return VERR_INVALID_PARAMETER;
805
806 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
807 uint32_t cbCmd = 0;
808 switch (pCmd->enmType)
809 {
810 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
811 {
812#ifdef VBOXWDDM_TEST_UHGSMI
813 static int count = 0;
814 static uint64_t start, end;
815 if (count==0)
816 {
817 start = RTTimeNanoTS();
818 }
819 ++count;
820 if (count==100000)
821 {
822 end = RTTimeNanoTS();
823 float ems = (end-start)/1000000.f;
824 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
825 }
826#endif
827 /* todo: post the buffer to chromium */
828 return VINF_SUCCESS;
829 }
830 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
831 {
832 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
833 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
834 Assert(cbBlt >= 0);
835 Assert((uint32_t)cbBlt <= cbBuffer);
836 if (cbBlt >= 0)
837 {
838 if ((uint32_t)cbBlt == cbBuffer)
839 return VINF_SUCCESS;
840 else
841 {
842 cbBuffer -= (uint32_t)cbBlt;
843 pvBuffer -= cbBlt;
844 }
845 }
846 else
847 return cbBlt; /* error */
848 break;
849 }
850 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
851 {
852 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
853 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
854 Assert(cbTransfer >= 0);
855 Assert((uint32_t)cbTransfer <= cbBuffer);
856 if (cbTransfer >= 0)
857 {
858 if ((uint32_t)cbTransfer == cbBuffer)
859 return VINF_SUCCESS;
860 else
861 {
862 cbBuffer -= (uint32_t)cbTransfer;
863 pvBuffer -= cbTransfer;
864 }
865 }
866 else
867 return cbTransfer; /* error */
868 break;
869 }
870 case VBOXVDMACMD_TYPE_DMA_NOP:
871 return VINF_SUCCESS;
872 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
873 return VINF_SUCCESS;
874 default:
875 AssertBreakpoint();
876 return VERR_INVALID_FUNCTION;
877 }
878 } while (1);
879
880 /* we should not be here */
881 AssertBreakpoint();
882 return VERR_INVALID_STATE;
883}
884
885#ifdef VBOX_VDMA_WITH_WORKERTHREAD
886
887int vboxVDMAPipeConstruct(PVBOXVDMAPIPE pPipe)
888{
889 int rc = RTSemEventCreate(&pPipe->hEvent);
890 AssertRC(rc);
891 if (RT_SUCCESS(rc))
892 {
893 rc = RTCritSectInit(&pPipe->hCritSect);
894 AssertRC(rc);
895 if (RT_SUCCESS(rc))
896 {
897 pPipe->enmState = VBOXVDMAPIPE_STATE_CREATED;
898 pPipe->bNeedNotify = true;
899 return VINF_SUCCESS;
900// RTCritSectDelete(pPipe->hCritSect);
901 }
902 RTSemEventDestroy(pPipe->hEvent);
903 }
904 return rc;
905}
906
907int vboxVDMAPipeOpenServer(PVBOXVDMAPIPE pPipe)
908{
909 int rc = RTCritSectEnter(&pPipe->hCritSect);
910 AssertRC(rc);
911 if (RT_SUCCESS(rc))
912 {
913 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CREATED);
914 switch (pPipe->enmState)
915 {
916 case VBOXVDMAPIPE_STATE_CREATED:
917 pPipe->enmState = VBOXVDMAPIPE_STATE_OPENNED;
918 pPipe->bNeedNotify = false;
919 rc = VINF_SUCCESS;
920 break;
921 case VBOXVDMAPIPE_STATE_OPENNED:
922 pPipe->bNeedNotify = false;
923 rc = VINF_ALREADY_INITIALIZED;
924 break;
925 default:
926 AssertBreakpoint();
927 rc = VERR_INVALID_STATE;
928 break;
929 }
930
931 RTCritSectLeave(&pPipe->hCritSect);
932 }
933 return rc;
934}
935
936int vboxVDMAPipeCloseServer(PVBOXVDMAPIPE pPipe)
937{
938 int rc = RTCritSectEnter(&pPipe->hCritSect);
939 AssertRC(rc);
940 if (RT_SUCCESS(rc))
941 {
942 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED
943 || pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSING);
944 switch (pPipe->enmState)
945 {
946 case VBOXVDMAPIPE_STATE_CLOSING:
947 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSED;
948 rc = VINF_SUCCESS;
949 break;
950 case VBOXVDMAPIPE_STATE_CLOSED:
951 rc = VINF_ALREADY_INITIALIZED;
952 break;
953 default:
954 AssertBreakpoint();
955 rc = VERR_INVALID_STATE;
956 break;
957 }
958
959 RTCritSectLeave(&pPipe->hCritSect);
960 }
961 return rc;
962}
963
964int vboxVDMAPipeCloseClient(PVBOXVDMAPIPE pPipe)
965{
966 int rc = RTCritSectEnter(&pPipe->hCritSect);
967 AssertRC(rc);
968 if (RT_SUCCESS(rc))
969 {
970 bool bNeedNotify = false;
971 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED
972 || pPipe->enmState == VBOXVDMAPIPE_STATE_CREATED
973 || pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED);
974 switch (pPipe->enmState)
975 {
976 case VBOXVDMAPIPE_STATE_OPENNED:
977 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSING;
978 bNeedNotify = pPipe->bNeedNotify;
979 pPipe->bNeedNotify = false;
980 break;
981 case VBOXVDMAPIPE_STATE_CREATED:
982 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSED;
983 pPipe->bNeedNotify = false;
984 break;
985 case VBOXVDMAPIPE_STATE_CLOSED:
986 rc = VINF_ALREADY_INITIALIZED;
987 break;
988 default:
989 AssertBreakpoint();
990 rc = VERR_INVALID_STATE;
991 break;
992 }
993
994 RTCritSectLeave(&pPipe->hCritSect);
995
996 if (bNeedNotify)
997 {
998 rc = RTSemEventSignal(pPipe->hEvent);
999 AssertRC(rc);
1000 }
1001 }
1002 return rc;
1003}
1004
1005
1006typedef DECLCALLBACK(bool) FNHVBOXVDMARWCB(PVBOXVDMAPIPE pPipe, void *pvCallback);
1007typedef FNHVBOXVDMARWCB *PFNHVBOXVDMARWCB;
1008
1009int vboxVDMAPipeModifyServer(PVBOXVDMAPIPE pPipe, PFNHVBOXVDMARWCB pfnCallback, void * pvCallback)
1010{
1011 int rc = RTCritSectEnter(&pPipe->hCritSect);
1012 AssertRC(rc);
1013 if (RT_SUCCESS(rc))
1014 {
1015 do
1016 {
1017 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED
1018 || pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSING);
1019
1020 if (pPipe->enmState >= VBOXVDMAPIPE_STATE_OPENNED)
1021 {
1022 bool bProcessing = pfnCallback(pPipe, pvCallback);
1023 pPipe->bNeedNotify = !bProcessing;
1024 if (bProcessing)
1025 {
1026 RTCritSectLeave(&pPipe->hCritSect);
1027 rc = VINF_SUCCESS;
1028 break;
1029 }
1030 else if (pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSING)
1031 {
1032 pPipe->enmState = VBOXVDMAPIPE_STATE_CLOSED;
1033 RTCritSectLeave(&pPipe->hCritSect);
1034 rc = VINF_EOF;
1035 break;
1036 }
1037 }
1038 else
1039 {
1040 AssertBreakpoint();
1041 rc = VERR_INVALID_STATE;
1042 RTCritSectLeave(&pPipe->hCritSect);
1043 break;
1044 }
1045
1046 RTCritSectLeave(&pPipe->hCritSect);
1047
1048 rc = RTSemEventWait(pPipe->hEvent, RT_INDEFINITE_WAIT);
1049 AssertRC(rc);
1050 if (!RT_SUCCESS(rc))
1051 break;
1052
1053 rc = RTCritSectEnter(&pPipe->hCritSect);
1054 AssertRC(rc);
1055 if (!RT_SUCCESS(rc))
1056 break;
1057 } while (1);
1058 }
1059
1060 return rc;
1061}
1062
1063int vboxVDMAPipeModifyClient(PVBOXVDMAPIPE pPipe, PFNHVBOXVDMARWCB pfnCallback, void * pvCallback)
1064{
1065 int rc = RTCritSectEnter(&pPipe->hCritSect);
1066 AssertRC(rc);
1067 if (RT_SUCCESS(rc))
1068 {
1069 bool bNeedNotify = false;
1070 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED);
1071 if (pPipe->enmState == VBOXVDMAPIPE_STATE_OPENNED)
1072 {
1073 bool bModified = pfnCallback(pPipe, pvCallback);
1074 if (bModified)
1075 {
1076 bNeedNotify = pPipe->bNeedNotify;
1077 pPipe->bNeedNotify = false;
1078 }
1079 }
1080 else
1081 rc = VERR_INVALID_STATE;
1082
1083 RTCritSectLeave(&pPipe->hCritSect);
1084
1085 if (bNeedNotify)
1086 {
1087 rc = RTSemEventSignal(pPipe->hEvent);
1088 AssertRC(rc);
1089 }
1090 }
1091 return rc;
1092}
1093
1094int vboxVDMAPipeDestruct(PVBOXVDMAPIPE pPipe)
1095{
1096 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED
1097 || pPipe->enmState == VBOXVDMAPIPE_STATE_CREATED);
1098 /* ensure the pipe is closed */
1099 vboxVDMAPipeCloseClient(pPipe);
1100
1101 Assert(pPipe->enmState == VBOXVDMAPIPE_STATE_CLOSED);
1102
1103 if (pPipe->enmState != VBOXVDMAPIPE_STATE_CLOSED)
1104 return VERR_INVALID_STATE;
1105
1106 int rc = RTCritSectDelete(&pPipe->hCritSect);
1107 AssertRC(rc);
1108
1109 rc = RTSemEventDestroy(pPipe->hEvent);
1110 AssertRC(rc);
1111
1112 return VINF_SUCCESS;
1113}
1114#endif
1115
1116static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
1117{
1118 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1119 const uint8_t * pvBuf;
1120 PGMPAGEMAPLOCK Lock;
1121 int rc;
1122 bool bReleaseLocked = false;
1123
1124 do
1125 {
1126 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
1127
1128 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
1129 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
1130 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
1131 {
1132 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1133 pvBuf = pvRam + pCmd->Location.offVramBuf;
1134 }
1135 else
1136 {
1137 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
1138 uint32_t offset = pCmd->Location.phBuf & 0xfff;
1139 Assert(offset + pCmd->cbBuf <= 0x1000);
1140 if (offset + pCmd->cbBuf > 0x1000)
1141 {
1142 /* @todo: more advanced mechanism of command buffer proc is actually needed */
1143 rc = VERR_INVALID_PARAMETER;
1144 break;
1145 }
1146
1147 const void * pvPageBuf;
1148 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
1149 AssertRC(rc);
1150 if (!RT_SUCCESS(rc))
1151 {
1152 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
1153 break;
1154 }
1155
1156 pvBuf = (const uint8_t *)pvPageBuf;
1157 pvBuf += offset;
1158
1159 bReleaseLocked = true;
1160 }
1161
1162 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
1163 AssertRC(rc);
1164
1165 if (bReleaseLocked)
1166 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1167 } while (0);
1168
1169 pCmd->rc = rc;
1170
1171 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
1172 AssertRC(rc);
1173}
1174
1175static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
1176{
1177 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1178 pCmd->i32Result = VINF_SUCCESS;
1179 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
1180 AssertRC(rc);
1181}
1182
1183#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1184typedef struct
1185{
1186 struct VBOXVDMAHOST *pVdma;
1187 VBOXVDMAPIPE_CMD_BODY Cmd;
1188 bool bHasCmd;
1189} VBOXVDMACMD_PROCESS_CONTEXT, *PVBOXVDMACMD_PROCESS_CONTEXT;
1190
1191static DECLCALLBACK(bool) vboxVDMACommandProcessCb(PVBOXVDMAPIPE pPipe, void *pvCallback)
1192{
1193 PVBOXVDMACMD_PROCESS_CONTEXT pContext = (PVBOXVDMACMD_PROCESS_CONTEXT)pvCallback;
1194 struct VBOXVDMAHOST *pVdma = pContext->pVdma;
1195 HGSMILISTENTRY *pEntry = hgsmiListRemoveHead(&pVdma->PendingList);
1196 if (pEntry)
1197 {
1198 PVBOXVDMAPIPE_CMD pPipeCmd = VBOXVDMAPIPE_CMD_FROM_ENTRY(pEntry);
1199 Assert(pPipeCmd);
1200 pContext->Cmd = pPipeCmd->Cmd;
1201 hgsmiListPrepend(&pVdma->CmdPool.List, pEntry);
1202 pContext->bHasCmd = true;
1203 return true;
1204 }
1205
1206 pContext->bHasCmd = false;
1207 return false;
1208}
1209
1210static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
1211{
1212 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
1213 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
1214 VBOXVDMACMD_PROCESS_CONTEXT Context;
1215 Context.pVdma = pVdma;
1216
1217 int rc = vboxVDMAPipeOpenServer(&pVdma->Pipe);
1218 AssertRC(rc);
1219 if (RT_SUCCESS(rc))
1220 {
1221 do
1222 {
1223 rc = vboxVDMAPipeModifyServer(&pVdma->Pipe, vboxVDMACommandProcessCb, &Context);
1224 AssertRC(rc);
1225 if (RT_SUCCESS(rc))
1226 {
1227 switch (Context.Cmd.enmType)
1228 {
1229 case VBOXVDMAPIPE_CMD_TYPE_DMACMD:
1230 {
1231 PVBOXVDMACBUF_DR pDr = Context.Cmd.u.pDr;
1232 vboxVDMACommandProcess(pVdma, pDr);
1233 break;
1234 }
1235 case VBOXVDMAPIPE_CMD_TYPE_DMACTL:
1236 {
1237 PVBOXVDMA_CTL pCtl = Context.Cmd.u.pCtl;
1238 vboxVDMAControlProcess(pVdma, pCtl);
1239 break;
1240 }
1241 default:
1242 AssertBreakpoint();
1243 break;
1244 }
1245
1246 if (rc == VINF_EOF)
1247 {
1248 rc = VINF_SUCCESS;
1249 break;
1250 }
1251 }
1252 else
1253 break;
1254 } while (1);
1255 }
1256
1257 /* always try to close the pipe to make sure the client side is notified */
1258 int tmpRc = vboxVDMAPipeCloseServer(&pVdma->Pipe);
1259 AssertRC(tmpRc);
1260 return rc;
1261}
1262#endif
1263
1264#ifdef VBOX_VDMA_WITH_WATCHDOG
1265static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
1266{
1267 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
1268 PVGASTATE pVGAState = pVdma->pVGAState;
1269 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
1270}
1271
1272static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
1273{
1274 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
1275 if (cMillis)
1276 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
1277 else
1278 TMTimerStop(pVdma->WatchDogTimer);
1279 return VINF_SUCCESS;
1280}
1281#endif
1282
1283int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
1284{
1285 int rc;
1286#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1287 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(RT_OFFSETOF(VBOXVDMAHOST, CmdPool.aCmds[cPipeElements]));
1288#else
1289 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
1290#endif
1291 Assert(pVdma);
1292 if (pVdma)
1293 {
1294 pVdma->pHgsmi = pVGAState->pHGSMI;
1295 pVdma->pVGAState = pVGAState;
1296
1297#ifdef VBOX_VDMA_WITH_WATCHDOG
1298 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
1299 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
1300 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
1301 AssertRC(rc);
1302#endif
1303#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1304 hgsmiListInit(&pVdma->PendingList);
1305 rc = vboxVDMAPipeConstruct(&pVdma->Pipe);
1306 AssertRC(rc);
1307 if (RT_SUCCESS(rc))
1308 {
1309 rc = RTThreadCreate(&pVdma->hWorkerThread, vboxVDMAWorkerThread, pVdma, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
1310 AssertRC(rc);
1311 if (RT_SUCCESS(rc))
1312 {
1313 hgsmiListInit(&pVdma->CmdPool.List);
1314 pVdma->CmdPool.cCmds = cPipeElements;
1315 for (uint32_t i = 0; i < cPipeElements; ++i)
1316 {
1317 hgsmiListAppend(&pVdma->CmdPool.List, &pVdma->CmdPool.aCmds[i].Entry);
1318 }
1319# if 0 //def VBOX_WITH_CRHGSMI
1320 int tmpRc = vboxVDMACrCtlHgsmiSetup(pVdma);
1321# endif
1322#endif
1323 pVGAState->pVdma = pVdma;
1324 VBoxVBVAExHSInit(&pVdma->CmdVbva);
1325#ifdef VBOX_WITH_CRHGSMI
1326 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
1327#endif
1328 return VINF_SUCCESS;
1329#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1330 }
1331
1332 int tmpRc = vboxVDMAPipeDestruct(&pVdma->Pipe);
1333 AssertRC(tmpRc);
1334 }
1335
1336 RTMemFree(pVdma);
1337#endif
1338 }
1339 else
1340 rc = VERR_OUT_OF_RESOURCES;
1341
1342 return rc;
1343}
1344
1345int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
1346{
1347#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1348 /* @todo: implement*/
1349 AssertBreakpoint();
1350#endif
1351 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
1352 RTMemFree(pVdma);
1353 return VINF_SUCCESS;
1354}
1355
1356#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1357typedef struct
1358{
1359 struct VBOXVDMAHOST *pVdma;
1360 VBOXVDMAPIPE_CMD_BODY Cmd;
1361 bool bQueued;
1362} VBOXVDMACMD_SUBMIT_CONTEXT, *PVBOXVDMACMD_SUBMIT_CONTEXT;
1363
1364DECLCALLBACK(bool) vboxVDMACommandSubmitCb(PVBOXVDMAPIPE pPipe, void *pvCallback)
1365{
1366 PVBOXVDMACMD_SUBMIT_CONTEXT pContext = (PVBOXVDMACMD_SUBMIT_CONTEXT)pvCallback;
1367 struct VBOXVDMAHOST *pVdma = pContext->pVdma;
1368 HGSMILISTENTRY *pEntry = hgsmiListRemoveHead(&pVdma->CmdPool.List);
1369 Assert(pEntry);
1370 if (pEntry)
1371 {
1372 PVBOXVDMAPIPE_CMD pPipeCmd = VBOXVDMAPIPE_CMD_FROM_ENTRY(pEntry);
1373 pPipeCmd->Cmd = pContext->Cmd;
1374 VBoxSHGSMICommandMarkAsynchCompletion(pContext->Cmd.u.pvCmd);
1375 pContext->bQueued = true;
1376 hgsmiListAppend(&pVdma->PendingList, pEntry);
1377 return true;
1378 }
1379
1380 /* @todo: should we try to flush some commands here? */
1381 pContext->bQueued = false;
1382 return false;
1383}
1384#endif
1385
1386int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
1387{
1388#ifdef VBOX_WITH_CRHGSMI
1389 PVGASTATE pVGAState = pVdma->pVGAState;
1390 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
1391 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
1392 Assert(pCmd);
1393 if (pCmd)
1394 {
1395 int rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
1396 AssertRC(rc);
1397 if (RT_SUCCESS(rc))
1398 {
1399 rc = vboxVDMACrCtlGetRc(pCmd);
1400 }
1401 vboxVDMACrCtlRelease(pCmd);
1402 return rc;
1403 }
1404 return VERR_NO_MEMORY;
1405#else
1406 return VINF_SUCCESS;
1407#endif
1408}
1409
1410int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
1411{
1412#ifdef VBOX_WITH_CRHGSMI
1413 PVGASTATE pVGAState = pVdma->pVGAState;
1414 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
1415 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
1416 Assert(pCmd);
1417 if (pCmd)
1418 {
1419 int rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
1420 AssertRC(rc);
1421 if (RT_SUCCESS(rc))
1422 {
1423 rc = vboxVDMACrCtlGetRc(pCmd);
1424 }
1425 vboxVDMACrCtlRelease(pCmd);
1426 return rc;
1427 }
1428 return VERR_NO_MEMORY;
1429#else
1430 return VINF_SUCCESS;
1431#endif
1432}
1433
1434void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
1435{
1436#if 1
1437 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
1438
1439 switch (pCmd->enmCtl)
1440 {
1441 case VBOXVDMA_CTL_TYPE_ENABLE:
1442 pCmd->i32Result = VINF_SUCCESS;
1443 break;
1444 case VBOXVDMA_CTL_TYPE_DISABLE:
1445 pCmd->i32Result = VINF_SUCCESS;
1446 break;
1447 case VBOXVDMA_CTL_TYPE_FLUSH:
1448 pCmd->i32Result = VINF_SUCCESS;
1449 break;
1450#ifdef VBOX_VDMA_WITH_WATCHDOG
1451 case VBOXVDMA_CTL_TYPE_WATCHDOG:
1452 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
1453 break;
1454#endif
1455 default:
1456 AssertBreakpoint();
1457 pCmd->i32Result = VERR_NOT_SUPPORTED;
1458 }
1459
1460 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
1461 AssertRC(rc);
1462#else
1463 /* test asinch completion */
1464 VBOXVDMACMD_SUBMIT_CONTEXT Context;
1465 Context.pVdma = pVdma;
1466 Context.Cmd.enmType = VBOXVDMAPIPE_CMD_TYPE_DMACTL;
1467 Context.Cmd.u.pCtl = pCmd;
1468
1469 int rc = vboxVDMAPipeModifyClient(&pVdma->Pipe, vboxVDMACommandSubmitCb, &Context);
1470 AssertRC(rc);
1471 if (RT_SUCCESS(rc))
1472 {
1473 Assert(Context.bQueued);
1474 if (Context.bQueued)
1475 {
1476 /* success */
1477 return;
1478 }
1479 rc = VERR_OUT_OF_RESOURCES;
1480 }
1481
1482 /* failure */
1483 Assert(RT_FAILURE(rc));
1484 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
1485 pCmd->i32Result = rc;
1486 int tmpRc = VBoxSHGSMICommandComplete (pIns, pCmd);
1487 AssertRC(tmpRc);
1488
1489#endif
1490}
1491
1492void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
1493{
1494 int rc = VERR_NOT_IMPLEMENTED;
1495
1496#ifdef VBOX_WITH_CRHGSMI
1497 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
1498 * this is why we process them specially */
1499 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
1500 if (rc == VINF_SUCCESS)
1501 return;
1502
1503 if (RT_FAILURE(rc))
1504 {
1505 pCmd->rc = rc;
1506 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
1507 AssertRC(rc);
1508 return;
1509 }
1510#endif
1511
1512#ifndef VBOX_VDMA_WITH_WORKERTHREAD
1513 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
1514#else
1515
1516# ifdef DEBUG_misha
1517 Assert(0);
1518# endif
1519
1520 VBOXVDMACMD_SUBMIT_CONTEXT Context;
1521 Context.pVdma = pVdma;
1522 Context.Cmd.enmType = VBOXVDMAPIPE_CMD_TYPE_DMACMD;
1523 Context.Cmd.u.pDr = pCmd;
1524
1525 rc = vboxVDMAPipeModifyClient(&pVdma->Pipe, vboxVDMACommandSubmitCb, &Context);
1526 AssertRC(rc);
1527 if (RT_SUCCESS(rc))
1528 {
1529 Assert(Context.bQueued);
1530 if (Context.bQueued)
1531 {
1532 /* success */
1533 return;
1534 }
1535 rc = VERR_OUT_OF_RESOURCES;
1536 }
1537 /* failure */
1538 Assert(RT_FAILURE(rc));
1539 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
1540 pCmd->rc = rc;
1541 int tmpRc = VBoxSHGSMICommandComplete (pIns, pCmd);
1542 AssertRC(tmpRc);
1543#endif
1544}
1545
1546/**/
1547static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1548{
1549 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED);
1550
1551 uint32_t oldState;
1552 if (!ASMAtomicReadU32(&pCmdVbva->u32Pause))
1553 {
1554 if (ASMAtomicCmpXchgExU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING, &oldState))
1555 return VINF_SUCCESS;
1556 return oldState == VBVAEXHOSTCONTEXT_STATE_PROCESSING ? VERR_SEM_BUSY : VERR_INVALID_STATE;
1557 }
1558 return VERR_INVALID_STATE;
1559}
1560
1561static bool vboxVBVAExHPCheckPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1562{
1563 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1564
1565 if (!ASMAtomicReadU32(&pCmdVbva->u32Pause))
1566 return false;
1567
1568 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED);
1569 return true;
1570}
1571
1572static bool vboxVBVAExHPCheckOtherCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1573{
1574 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1575
1576 return !!ASMAtomicUoReadU32(&pCmdVbva->u32cOtherCommands);
1577}
1578
1579static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1580{
1581 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1582
1583 if (!vboxVBVAExHPCheckPause(pCmdVbva))
1584 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
1585 else
1586 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED);
1587}
1588
1589static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1590{
1591 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1592
1593 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
1594}
1595
1596static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1597{
1598 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1599
1600 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
1601}
1602
1603static bool vboxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1604{
1605 if (!pCmdVbva->cbCurData)
1606 return false;
1607
1608 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
1609 pVBVA->off32Data = (pVBVA->off32Data + pCmdVbva->cbCurData) % pVBVA->cbData;
1610
1611 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
1612
1613 pCmdVbva->cbCurData = 0;
1614
1615 return true;
1616}
1617
1618static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
1619{
1620 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1621
1622 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
1623
1624 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
1625 uint32_t indexRecordFree = pVBVA->indexRecordFree;
1626
1627 Log(("first = %d, free = %d\n",
1628 indexRecordFirst, indexRecordFree));
1629
1630 if (indexRecordFirst == indexRecordFree)
1631 {
1632 /* No records to process. Return without assigning output variables. */
1633 return VINF_EOF;
1634 }
1635
1636 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
1637
1638 /* A new record need to be processed. */
1639 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
1640 {
1641 /* the record is being recorded, try again */
1642 return VINF_TRY_AGAIN;
1643 }
1644
1645 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
1646
1647 if (!cbRecord)
1648 {
1649 /* the record is being recorded, try again */
1650 return VINF_TRY_AGAIN;
1651 }
1652
1653 /* we should not get partial commands here actually */
1654 Assert(cbRecord);
1655
1656 /* The size of largest contiguous chunk in the ring biffer. */
1657 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
1658
1659 /* The pointer to data in the ring buffer. */
1660 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
1661
1662 /* Fetch or point the data. */
1663 if (u32BytesTillBoundary >= cbRecord)
1664 {
1665 /* The command does not cross buffer boundary. Return address in the buffer. */
1666 *ppCmd = pSrc;
1667 *pcbCmd = cbRecord;
1668 pCmdVbva->cbCurData = cbRecord;
1669 return VINF_SUCCESS;
1670 }
1671
1672 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
1673 return VERR_INVALID_STATE;
1674}
1675
1676/* Resumes command processing
1677 * @returns - same as VBoxVBVAExHSCheckCommands
1678 */
1679static int vboxVBVAExHSResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1680{
1681 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED);
1682
1683 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
1684
1685 return VBoxVBVAExHSCheckCommands(pCmdVbva);
1686}
1687
1688/* pause the command processing. this will make the processor stop the command processing and release the processing state
1689 * to resume the command processing the vboxVBVAExHSResume must be called */
1690static void vboxVBVAExHSPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1691{
1692 Assert(pCmdVbva->u32State != VBVAEXHOSTCONTEXT_STATE_STOPPED);
1693
1694 Assert(!pCmdVbva->u32Pause);
1695
1696 ASMAtomicWriteU32(&pCmdVbva->u32Pause, 1);
1697
1698 for(;;)
1699 {
1700 if (ASMAtomicCmpXchgU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_PAUSED, VBVAEXHOSTCONTEXT_STATE_LISTENING))
1701 break;
1702
1703 if (ASMAtomicReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_PAUSED)
1704 break;
1705
1706 RTThreadSleep(2);
1707 }
1708
1709 pCmdVbva->u32Pause = 0;
1710}
1711
1712/* releases (completed) the command previously acquired by VBoxVBVAExHCmdGet
1713 * for convenience can be called if no command is currently acquired
1714 * in that case it will do nothing and return false.
1715 * if the completion notification is needed returns true. */
1716static bool VBoxVBVAExHPCmdCheckRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1717{
1718 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1719
1720 return vboxVBVAExHPCmdCheckRelease(pCmdVbva);
1721}
1722
1723/*
1724 * @returns
1725 * VINF_SUCCESS - new command is obtained
1726 * VINF_EOF - processor has completed all commands and release the processing state, only VBoxVBVAExHS*** calls are now allowed
1727 * VINF_PERMISSION_DENIED - processing was paused, processing state was released, only VBoxVBVAExHS*** calls are now allowed
1728 * VINF_INTERRUPTED - command processing was interrupted, processor state remains set. client can process any commands,
1729 * and call VBoxVBVAExHPCmdGet again for further processing
1730 * VERR_** - error happened, most likely guest corrupted VBVA data
1731 *
1732 */
1733static int VBoxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
1734{
1735 Assert(pCmdVbva->u32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1736
1737 for(;;)
1738 {
1739 if (vboxVBVAExHPCheckPause(pCmdVbva))
1740 return VINF_PERMISSION_DENIED;
1741 if (vboxVBVAExHPCheckOtherCommands(pCmdVbva))
1742 return VINF_INTERRUPTED;
1743
1744 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
1745 switch (rc)
1746 {
1747 case VINF_SUCCESS:
1748 return VINF_SUCCESS;
1749 case VINF_EOF:
1750 vboxVBVAExHPHgEventClear(pCmdVbva);
1751 vboxVBVAExHPProcessorRelease(pCmdVbva);
1752 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
1753 * 1. we check the queue -> and it is empty
1754 * 2. submitter adds command to the queue
1755 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
1756 * 4. we clear the "processing" state
1757 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
1758 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
1759 **/
1760 if (VBoxVBVAExHSCheckCommands(pCmdVbva) == VINF_SUCCESS)
1761 continue;
1762 return VINF_EOF;
1763 case VINF_TRY_AGAIN:
1764 RTThreadSleep(1);
1765 continue;
1766 default:
1767 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
1768 if (RT_FAILURE(rc))
1769 return rc;
1770
1771 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected success status %d\n", rc));
1772 return VERR_INTERNAL_ERROR;
1773 }
1774 }
1775
1776 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
1777 return VERR_INTERNAL_ERROR;
1778}
1779
1780/* Checks whether the new commands are ready for processing
1781 * @returns
1782 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
1783 * VINF_EOF - no commands in a queue
1784 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
1785 * VERR_INVALID_STATE - the VBVA is paused or pausing */
1786static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1787{
1788 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED)
1789 return VINF_EOF;
1790
1791 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
1792 if (RT_SUCCESS(rc))
1793 {
1794 /* we are the processor now */
1795 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
1796
1797 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
1798 uint32_t indexRecordFree = pVBVA->indexRecordFree;
1799
1800 if (indexRecordFirst != indexRecordFree)
1801 {
1802 vboxVBVAExHPHgEventSet(pCmdVbva);
1803 return VINF_SUCCESS;
1804 }
1805
1806 vboxVBVAExHPProcessorRelease(pCmdVbva);
1807 return VINF_EOF;
1808 }
1809 if (rc == VERR_SEM_BUSY)
1810 return VINF_ALREADY_INITIALIZED;
1811 Assert(rc == VERR_INVALID_STATE);
1812 return VERR_INVALID_STATE;
1813}
1814
1815static void VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1816{
1817 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
1818}
1819
1820static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
1821{
1822 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) != VBVAEXHOSTCONTEXT_STATE_STOPPED)
1823 return VINF_ALREADY_INITIALIZED;
1824
1825 pCmdVbva->pVBVA = pVBVA;
1826 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
1827 ASMAtomicWriteU32(&pCmdVbva->u32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
1828 return VINF_SUCCESS;
1829}
1830
1831static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1832{
1833 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED)
1834 return VINF_SUCCESS;
1835
1836 /* ensure no commands pending and one tries to submit them */
1837 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
1838 if (RT_SUCCESS(rc))
1839 {
1840 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
1841 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
1842 return VINF_SUCCESS;
1843 }
1844 return VERR_INVALID_STATE;
1845}
1846
1847static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
1848{
1849 /* ensure the processor is stopped */
1850 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) == VBVAEXHOSTCONTEXT_STATE_STOPPED)
1851 return;
1852
1853 /* ensure no one tries to submit the command */
1854 vboxVBVAExHSPause(pCmdVbva);
1855 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
1856 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
1857}
1858
1859/* Saves state
1860 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
1861 */
1862static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
1863{
1864 int rc;
1865 if (ASMAtomicUoReadU32(&pCmdVbva->u32State) != VBVAEXHOSTCONTEXT_STATE_STOPPED)
1866 {
1867 vboxVBVAExHSPause(pCmdVbva);
1868 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pCmdVbva->pVBVA) - pu8VramBase));
1869 AssertRCReturn(rc, rc);
1870 return vboxVBVAExHSResume(pCmdVbva);
1871 }
1872
1873 rc = SSMR3PutU32(pSSM, 0xffffffff);
1874 AssertRCReturn(rc, rc);
1875
1876 return VINF_EOF;
1877}
1878
1879/* Loads state
1880 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
1881 */
1882static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
1883{
1884 uint32_t u32;
1885 int rc = SSMR3GetU32(pSSM, &u32);
1886 AssertRCReturn(rc, rc);
1887 if (u32 != 0xffffffff)
1888 {
1889 VBVABUFFER *pVBVA = (VBVABUFFER*)pu8VramBase + u32;
1890 rc = VBoxVBVAExHSEnable(pCmdVbva, pVBVA);
1891 AssertRCReturn(rc, rc);
1892 return VBoxVBVAExHSCheckCommands(pCmdVbva);
1893 }
1894
1895 return VINF_EOF;
1896}
1897
1898int vboxCmdVBVAEnable(PVGASTATE pVGAState, VBVABUFFER *pVBVA)
1899{
1900 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
1901 return VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1902}
1903
1904int vboxCmdVBVADisable(PVGASTATE pVGAState)
1905{
1906 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
1907 return VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1908}
1909
1910static int vboxCmdVBVACmdSubmitPerform(PVGASTATE pVGAState)
1911{
1912 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
1913 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
1914 switch (rc)
1915 {
1916 case VINF_SUCCESS:
1917 return pVGAState->pDrv->pfnCrCmdNotifyCmds(pVGAState->pDrv);
1918 case VINF_ALREADY_INITIALIZED:
1919 case VINF_EOF:
1920 case VERR_INVALID_STATE:
1921 return VINF_SUCCESS;
1922 default:
1923 Assert(!RT_FAILURE(rc));
1924 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
1925 }
1926}
1927
1928int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
1929{
1930 return vboxCmdVBVACmdSubmitPerform(pVGAState);
1931}
1932
1933int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
1934{
1935 return vboxCmdVBVACmdSubmitPerform(pVGAState);
1936}
1937
1938void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
1939{
1940 vboxCmdVBVACmdSubmitPerform(pVGAState);
1941}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette