VirtualBox

Changeset 80872 in vbox for trunk/src/VBox/Devices/Graphics


Ignore:
Timestamp:
Sep 17, 2019 8:54:03 PM (5 years ago)
Author:
vboxsync
Message:

Devices/Graphics,Main,include: remove obsolete Chromium code. bugref:9529

Location:
trunk/src/VBox/Devices/Graphics
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/Graphics/DevVGA-SVGA.cpp

    r78347 r80872  
    16011601            /* bird: Whatever this is was added to make screenshot work, ask sunlover should explain... */
    16021602            for (uint32_t idScreen = 0; idScreen < pThis->cMonitors; ++idScreen)
    1603                 pThis->pDrv->pfnVBVAEnable(pThis->pDrv, idScreen, NULL /*pHostFlags*/, false /*fRenderThreadMode*/);
     1603                pThis->pDrv->pfnVBVAEnable(pThis->pDrv, idScreen, NULL /*pHostFlags*/);
    16041604        }
    16051605        else
  • trunk/src/VBox/Devices/Graphics/DevVGA.cpp

    r80731 r80872  
    54455445    vbvaTimerCb(pThis);
    54465446#endif
    5447 
    5448     vboxCmdVBVATimerRefresh(pThis);
    54495447
    54505448#ifdef VBOX_WITH_VMSVGA
  • trunk/src/VBox/Devices/Graphics/DevVGA.h

    r80430 r80872  
    562562#define PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(_pcb) ( (PVGASTATE)((uint8_t *)(_pcb) - RT_OFFSETOF(VGASTATE, IVBVACallbacks)) )
    563563
    564 DECLCALLBACK(int) vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface,
    565                                                       PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc);
    566 DECLCALLBACK(int) vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface,
    567                                                       PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc);
    568 DECLCALLBACK(int) vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
    569                                         struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
    570                                         PFNCRCTLCOMPLETION pfnCompletion,
    571                                         void *pvCompletion);
    572 DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
    573                                             struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd);
    574 
    575564int vboxVBVASaveStateExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM);
    576565int vboxVBVALoadStateExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t u32Version);
     
    598587# endif /* VBOX_WITH_VDMA */
    599588
    600 int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState);
    601 int vboxCmdVBVACmdFlush(PVGASTATE pVGAState);
    602 int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl);
    603 void vboxCmdVBVATimerRefresh(PVGASTATE pVGAState);
    604 bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState);
    605589#endif /* VBOX_WITH_HGSMI */
    606590
  • trunk/src/VBox/Devices/Graphics/DevVGA_VBVA.cpp

    r80428 r80872  
    586586        pVBVA->hostFlags.u32HostEvents      = 0;
    587587        pVBVA->hostFlags.u32SupportedOrders = 0;
    588         rc = pVGAState->pDrv->pfnVBVAEnable(pVGAState->pDrv, uScreenId, &pVBVA->hostFlags, false);
     588        rc = pVGAState->pDrv->pfnVBVAEnable(pVGAState->pDrv, uScreenId, &pVBVA->hostFlags);
    589589        if (RT_SUCCESS(rc))
    590590        {
     
    20612061            if (pView->vbva.guest.pVBVA)
    20622062            {
    2063                 Assert(!vboxCmdVBVAIsEnabled(pVGAState));
    2064 
    20652063                int rc = vbvaEnable(iView, pVGAState, pCtx, pView->vbva.guest.pVBVA, pView->vbva.u32VBVAOffset, true /* fRestored */);
    20662064                if (RT_SUCCESS(rc))
     
    24252423    switch (u16ChannelInfo)
    24262424    {
    2427         case VBVA_CMDVBVA_SUBMIT:
    2428             rc = vboxCmdVBVACmdSubmit(pVGAState);
    2429             break;
    2430 
    2431         case VBVA_CMDVBVA_FLUSH:
    2432             rc = vboxCmdVBVACmdFlush(pVGAState);
    2433             break;
    2434 
    2435         case VBVA_CMDVBVA_CTL:
    2436             if (cbBuffer >= VBoxSHGSMIBufferHeaderSize() + sizeof(VBOXCMDVBVA_CTL))
    2437             {
    2438                 VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl
    2439                     = (VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *)VBoxSHGSMIBufferData((VBOXSHGSMIHEADER RT_UNTRUSTED_VOLATILE_GUEST *)pvBuffer);
    2440                 rc = vboxCmdVBVACmdCtl(pVGAState, pCtl, cbBuffer - VBoxSHGSMIBufferHeaderSize());
    2441             }
    2442             else
    2443                 rc = VERR_INVALID_PARAMETER;
    2444             break;
    2445 
    24462425#ifdef VBOX_WITH_VDMA
    24472426        case VBVA_VDMA_CMD:
     
    24882467            if (cbBuffer >= sizeof(VBVAINFOVIEW))
    24892468            {
    2490                 AssertMsgBreak(!vboxCmdVBVAIsEnabled(pVGAState), ("VBVA_INFO_VIEW is not acceptible for CmdVbva\n"));
    2491 
    24922469                /* Guest submits an array of VBVAINFOVIEW structures. */
    24932470                const VBVAINFOVIEW RT_UNTRUSTED_VOLATILE_GUEST *pView = (VBVAINFOVIEW RT_UNTRUSTED_VOLATILE_GUEST *)pvBuffer;
     
    25192496        case VBVA_INFO_SCREEN:
    25202497            rc = VERR_INVALID_PARAMETER;
    2521             AssertMsgBreak(!vboxCmdVBVAIsEnabled(pVGAState), ("VBVA_INFO_SCREEN is not acceptible for CmdVbva\n"));
    2522 
    25232498            if (cbBuffer >= sizeof(VBVAINFOSCREEN))
    25242499                rc = VBVAInfoScreen(pVGAState, (VBVAINFOSCREEN RT_UNTRUSTED_VOLATILE_GUEST *)pvBuffer);
     
    25272502        case VBVA_ENABLE:
    25282503            rc = VERR_INVALID_PARAMETER;
    2529             AssertMsgBreak(!vboxCmdVBVAIsEnabled(pVGAState), ("VBVA_ENABLE is not acceptible for CmdVbva\n"));
    2530 
    25312504            if (cbBuffer >= sizeof(VBVAENABLE))
    25322505            {
  • trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp

    r80731 r80872  
    3636#include "HGSMI/SHGSMIHost.h"
    3737
    38 #include <VBoxVideo3D.h>
    39 #include <VBoxVideoHost3D.h>
    40 
    4138#ifdef DEBUG_misha
    4239# define VBOXVDBG_MEMCACHE_DISABLE
     
    7370
    7471typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
    75 
    76 static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
    77 
    7872
    7973typedef struct VBOXVDMATHREAD
     
    175169
    176170
    177 typedef struct VBOXVDMA_SOURCE
    178 {
    179     VBVAINFOSCREEN Screen;
    180     VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
    181 } VBOXVDMA_SOURCE;
    182 
    183 
    184171typedef struct VBOXVDMAHOST
    185172{
    186173    PHGSMIINSTANCE pHgsmi; /**< Same as VGASTATE::pHgsmi. */
    187174    PVGASTATE pVGAState;
    188     VBVAEXHOSTCONTEXT CmdVbva;
    189     VBOXVDMATHREAD Thread;
    190     VBOXCRCMD_SVRINFO CrSrvInfo;
    191     VBVAEXHOSTCTL* pCurRemainingHostCtl;
    192     RTSEMEVENTMULTI HostCrCtlCompleteEvent;
    193     int32_t volatile i32cHostCrCtlCompleted;
    194     RTCRITSECT CalloutCritSect;
    195 //    VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
    196175} VBOXVDMAHOST, *PVBOXVDMAHOST;
    197176
     
    210189*   Internal Functions                                                                                                           *
    211190*********************************************************************************************************************************/
    212 static int  vdmaVBVANotifyDisable(PVGASTATE pVGAState);
    213 static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
    214 static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
    215 static int  VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread);
    216 static int  vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer,
    217                                        uint32_t cbBuffer);
    218 static int  vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
    219 static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
    220                                                           int rc, void *pvContext);
    221 
    222 /* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
    223  * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
    224 
    225 
    226 
    227 /**
    228  * Creates a host control command.
    229  */
    230 static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
    231 {
    232 # ifndef VBOXVDBG_MEMCACHE_DISABLE
    233     VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL *)RTMemCacheAlloc(pCmdVbva->CtlCache);
    234 # else
    235     VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL *)RTMemAlloc(sizeof(VBVAEXHOSTCTL));
    236 # endif
    237     if (pCtl)
    238     {
    239         RT_ZERO(*pCtl);
    240         pCtl->enmType = enmType;
    241     }
    242     else
    243         WARN(("VBoxVBVAExHCtlAlloc failed\n"));
    244     return pCtl;
    245 }
    246 
    247 /**
    248  * Destroys a host control command.
    249  */
    250 static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
    251 {
    252 # ifndef VBOXVDBG_MEMCACHE_DISABLE
    253     RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
    254 # else
    255     RTMemFree(pCtl);
    256 # endif
    257 }
    258 
    259 
    260 
    261 /**
    262  * Works the VBVA state.
    263  */
    264 static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    265 {
    266     Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
    267 
    268     if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
    269         return VINF_SUCCESS;
    270     return VERR_SEM_BUSY;
    271 }
    272 
    273 /**
    274  * Worker for vboxVBVAExHPDataGetInner() and VBoxVBVAExHPCheckHostCtlOnDisable()
    275  * that gets the next control command.
    276  *
    277  * @returns Pointer to command if found, NULL if not.
    278  * @param   pCmdVbva        The VBVA command context.
    279  * @param   pfHostCtl       Where to indicate whether it's a host or guest
    280  *                          control command.
    281  * @param   fHostOnlyMode   Whether to only fetch host commands, or both.
    282  */
    283 static VBVAEXHOSTCTL *vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
    284 {
    285     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    286 
    287     if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
    288         return NULL;
    289 
    290     int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
    291     if (RT_SUCCESS(rc))
    292     {
    293         VBVAEXHOSTCTL *pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
    294         if (pCtl)
    295             *pfHostCtl = true;
    296         else if (!fHostOnlyMode)
    297         {
    298             if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    299             {
    300                 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
    301                 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
    302                  * and there are no HostCtl commands*/
    303                 Assert(pCtl);
    304                 *pfHostCtl = false;
    305             }
    306         }
    307 
    308         if (pCtl)
    309         {
    310             RTListNodeRemove(&pCtl->Node);
    311             ASMAtomicDecU32(&pCmdVbva->u32cCtls);
    312         }
    313 
    314         RTCritSectLeave(&pCmdVbva->CltCritSect);
    315 
    316         return pCtl;
    317     }
    318     else
    319         WARN(("RTCritSectEnter failed %Rrc\n", rc));
    320 
    321     return NULL;
    322 }
    323 
    324 /**
    325  * Worker for vboxVDMACrHgcmHandleEnableRemainingHostCommand().
    326  */
    327 static VBVAEXHOSTCTL *VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    328 {
    329     bool fHostCtl = false;
    330     VBVAEXHOSTCTL *pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
    331     Assert(!pCtl || fHostCtl);
    332     return pCtl;
    333 }
    334 
    335 /**
    336  * Worker for vboxVBVAExHPCheckProcessCtlInternal() and
    337  * vboxVDMACrGuestCtlProcess() / VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED.
    338  */
    339 static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    340 {
    341     if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    342     {
    343         WARN(("Invalid state\n"));
    344         return VERR_INVALID_STATE;
    345     }
    346 
    347     ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
    348     return VINF_SUCCESS;
    349 }
    350 
    351 /**
    352  * Works the VBVA state in response to VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME.
    353  */
    354 static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    355 {
    356     if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    357     {
    358         WARN(("Invalid state\n"));
    359         return VERR_INVALID_STATE;
    360     }
    361 
    362     ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
    363     return VINF_SUCCESS;
    364 }
    365 
    366 /**
    367  * Worker for vboxVBVAExHPDataGetInner that processes PAUSE and RESUME requests.
    368  *
    369  * Unclear why these cannot be handled the normal way.
    370  *
    371  * @returns true if handled, false if not.
    372  * @param   pCmdVbva            The VBVA context.
    373  * @param   pCtl                The host control command.
    374  */
    375 static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
    376 {
    377     switch (pCtl->enmType)
    378     {
    379         case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
    380             VBoxVBVAExHPPause(pCmdVbva);
    381             VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
    382             return true;
    383 
    384         case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
    385             VBoxVBVAExHPResume(pCmdVbva);
    386             VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
    387             return true;
    388 
    389         default:
    390             return false;
    391     }
    392 }
    393 
    394 /**
    395  * Works the VBVA state.
    396  */
    397 static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    398 {
    399     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    400 
    401     ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
    402 }
    403 
    404 /**
    405  * Works the VBVA state.
    406  */
    407 static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    408 {
    409     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    410     if (pCmdVbva->pVBVA)
    411         ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
    412 }
    413 
    414 /**
    415  * Works the VBVA state.
    416  */
    417 static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    418 {
    419     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    420     if (pCmdVbva->pVBVA)
    421         ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
    422 }
    423 
    424 /**
    425  * Worker for vboxVBVAExHPDataGetInner.
    426  *
    427  * @retval VINF_SUCCESS
    428  * @retval VINF_EOF
    429  * @retval VINF_TRY_AGAIN
    430  * @retval VERR_INVALID_STATE
    431  *
    432  * @thread VDMA
    433  */
    434 static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
    435 {
    436     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    437     Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
    438 
    439     VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA; /* This is shared with the guest, so careful! */
    440 
    441     /*
    442      * Inspect records.
    443      */
    444     uint32_t idxRecordFirst = ASMAtomicUoReadU32(&pVBVA->indexRecordFirst);
    445     uint32_t idxRecordFree  = ASMAtomicReadU32(&pVBVA->indexRecordFree);
    446     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    447     Log(("first = %d, free = %d\n", idxRecordFirst, idxRecordFree));
    448     if (idxRecordFirst == idxRecordFree)
    449         return VINF_EOF; /* No records to process. Return without assigning output variables. */
    450     AssertReturn(idxRecordFirst < VBVA_MAX_RECORDS, VERR_INVALID_STATE);
    451     RT_UNTRUSTED_VALIDATED_FENCE();
    452 
    453     /*
    454      * Read the record size and check that it has been completly recorded.
    455      */
    456     uint32_t const cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[idxRecordFirst].cbRecord);
    457     uint32_t const cbRecord        = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
    458     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    459     if (   (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
    460         || !cbRecord)
    461         return VINF_TRY_AGAIN; /* The record is being recorded, try again. */
    462     Assert(cbRecord);
    463 
    464     /*
    465      * Get and validate the data area.
    466      */
    467     uint32_t const offData   = ASMAtomicReadU32(&pVBVA->off32Data);
    468     uint32_t       cbMaxData = ASMAtomicReadU32(&pVBVA->cbData);
    469     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    470     AssertLogRelMsgStmt(cbMaxData <= pCmdVbva->cbMaxData, ("%#x vs %#x\n", cbMaxData, pCmdVbva->cbMaxData),
    471                         cbMaxData = pCmdVbva->cbMaxData);
    472     AssertLogRelMsgReturn(   cbRecord <= cbMaxData
    473                           && offData  <= cbMaxData - cbRecord,
    474                           ("offData=%#x cbRecord=%#x cbMaxData=%#x cbRecord\n", offData, cbRecord, cbMaxData),
    475                           VERR_INVALID_STATE);
    476     RT_UNTRUSTED_VALIDATED_FENCE();
    477 
    478     /*
    479      * Just set the return values and we're done.
    480      */
    481     *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)&pVBVA->au8Data[offData];
    482     *pcbCmd = cbRecord;
    483     return VINF_SUCCESS;
    484 }
    485 
    486 /**
    487  * Completion routine advancing our end of the ring and data buffers forward.
    488  *
    489  * @param   pCmdVbva            The VBVA context.
    490  * @param   cbCmd               The size of the data.
    491  */
    492 static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
    493 {
    494     VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA;
    495     if (pVBVA)
    496     {
    497         /* Move data head. */
    498         uint32_t const  cbData      = pVBVA->cbData;
    499         uint32_t const  offData     = pVBVA->off32Data;
    500         RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    501         if (cbData > 0)
    502             ASMAtomicWriteU32(&pVBVA->off32Data, (offData + cbCmd) % cbData);
    503         else
    504             ASMAtomicWriteU32(&pVBVA->off32Data, 0);
    505 
    506         /* Increment record pointer. */
    507         uint32_t const  idxRecFirst = pVBVA->indexRecordFirst;
    508         RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    509         ASMAtomicWriteU32(&pVBVA->indexRecordFirst, (idxRecFirst + 1) % RT_ELEMENTS(pVBVA->aRecords));
    510     }
    511 }
    512 
    513 /**
    514  * Control command completion routine used by many.
    515  */
    516 static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
    517 {
    518     if (pCtl->pfnComplete)
    519         pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
    520     else
    521         VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
    522 }
    523 
    524 
    525 /**
    526  * Worker for VBoxVBVAExHPDataGet.
    527  * @thread VDMA
    528  */
    529 static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGetInner(struct VBVAEXHOSTCONTEXT *pCmdVbva,
    530                                                      uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
    531 {
    532     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    533     VBVAEXHOSTCTL *pCtl;
    534     bool fHostClt;
    535 
    536     for (;;)
    537     {
    538         pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
    539         if (pCtl)
    540         {
    541             if (fHostClt)
    542             {
    543                 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
    544                 {
    545                     *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCtl; /* Note! pCtl is host data, so trusted */
    546                     *pcbCmd = sizeof (*pCtl);
    547                     return VBVAEXHOST_DATA_TYPE_HOSTCTL;
    548                 }
    549                 continue; /* Processed by vboxVBVAExHPCheckProcessCtlInternal, get next. */
    550             }
    551             *ppbCmd = (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCtl; /* Note! pCtl is host data, so trusted */
    552             *pcbCmd = sizeof (*pCtl);
    553             return VBVAEXHOST_DATA_TYPE_GUESTCTL;
    554         }
    555 
    556         if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    557             return VBVAEXHOST_DATA_TYPE_NO_DATA;
    558 
    559         int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppbCmd, pcbCmd);
    560         switch (rc)
    561         {
    562             case VINF_SUCCESS:
    563                 return VBVAEXHOST_DATA_TYPE_CMD;
    564             case VINF_EOF:
    565                 return VBVAEXHOST_DATA_TYPE_NO_DATA;
    566             case VINF_TRY_AGAIN:
    567                 RTThreadSleep(1);
    568                 continue;
    569             default:
    570                 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
    571                 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %Rrc\n", rc));
    572                 return VBVAEXHOST_DATA_TYPE_NO_DATA;
    573         }
    574     }
    575     /* not reached */
    576 }
    577 
    578 /**
    579  * Called by vboxVDMAWorkerThread to get the next command to process.
    580  * @thread VDMA
    581  */
    582 static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva,
    583                                                 uint8_t RT_UNTRUSTED_VOLATILE_GUEST **ppbCmd, uint32_t *pcbCmd)
    584 {
    585     VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGetInner(pCmdVbva, ppbCmd, pcbCmd);
    586     if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
    587     {
    588         vboxVBVAExHPHgEventClear(pCmdVbva);
    589         vboxVBVAExHPProcessorRelease(pCmdVbva);
    590 
    591         /*
    592          * We need to prevent racing between us clearing the flag and command check/submission thread, i.e.
    593          * 1. we check the queue -> and it is empty
    594          * 2. submitter adds command to the queue
    595          * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
    596          * 4. we clear the "processing" state
    597          * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
    598          * 6. if the queue appears to be not-empty set the "processing" state back to "true"
    599          */
    600         int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
    601         if (RT_SUCCESS(rc))
    602         {
    603             /* we are the processor now */
    604             enmType = vboxVBVAExHPDataGetInner(pCmdVbva, ppbCmd, pcbCmd);
    605             if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
    606             {
    607                 vboxVBVAExHPProcessorRelease(pCmdVbva);
    608                 return VBVAEXHOST_DATA_TYPE_NO_DATA;
    609             }
    610 
    611             vboxVBVAExHPHgEventSet(pCmdVbva);
    612         }
    613     }
    614 
    615     return enmType;
    616 }
    617 
    618 /**
    619  * Checks for pending VBVA command or (internal) control command.
    620  */
    621 DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    622 {
    623     VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA = pCmdVbva->pVBVA;
    624     if (pVBVA)
    625     {
    626         uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
    627         uint32_t indexRecordFree  = pVBVA->indexRecordFree;
    628         RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    629 
    630         if (indexRecordFirst != indexRecordFree)
    631             return true;
    632     }
    633 
    634     return ASMAtomicReadU32(&pCmdVbva->u32cCtls) > 0;
    635 }
    636 
    637 /** Checks whether the new commands are ready for processing
    638  * @returns
    639  *   VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
    640  *   VINF_EOF - no commands in a queue
    641  *   VINF_ALREADY_INITIALIZED - another thread already processing the commands
    642  *   VERR_INVALID_STATE - the VBVA is paused or pausing */
    643 static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    644 {
    645     int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
    646     if (RT_SUCCESS(rc))
    647     {
    648         /* we are the processor now */
    649         if (vboxVBVAExHSHasCommands(pCmdVbva))
    650         {
    651             vboxVBVAExHPHgEventSet(pCmdVbva);
    652             return VINF_SUCCESS;
    653         }
    654 
    655         vboxVBVAExHPProcessorRelease(pCmdVbva);
    656         return VINF_EOF;
    657     }
    658     if (rc == VERR_SEM_BUSY)
    659         return VINF_ALREADY_INITIALIZED;
    660     return VERR_INVALID_STATE;
    661 }
    662 
    663 /**
    664  * Worker for vboxVDMAConstruct() that initializes the give VBVA host context.
    665  */
    666 static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    667 {
    668     RT_ZERO(*pCmdVbva);
    669     int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
    670     if (RT_SUCCESS(rc))
    671     {
    672 # ifndef VBOXVDBG_MEMCACHE_DISABLE
    673         rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
    674                               0, /* size_t cbAlignment */
    675                               UINT32_MAX, /* uint32_t cMaxObjects */
    676                               NULL, /* PFNMEMCACHECTOR pfnCtor*/
    677                               NULL, /* PFNMEMCACHEDTOR pfnDtor*/
    678                               NULL, /* void *pvUser*/
    679                               0 /* uint32_t fFlags*/
    680                               );
    681         if (RT_SUCCESS(rc))
    682 # endif
    683         {
    684             RTListInit(&pCmdVbva->GuestCtlList);
    685             RTListInit(&pCmdVbva->HostCtlList);
    686             pCmdVbva->i32State       = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
    687             pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
    688             return VINF_SUCCESS;
    689         }
    690 # ifndef VBOXVDBG_MEMCACHE_DISABLE
    691         WARN(("RTMemCacheCreate failed %Rrc\n", rc));
    692 # endif
    693     }
    694     else
    695         WARN(("RTCritSectInit failed %Rrc\n", rc));
    696 
    697     return rc;
    698 }
    699 
    700 /**
    701  * Checks if VBVA state is some form of enabled.
    702  */
    703 DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    704 {
    705     return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED;
    706 }
    707 
    708 /**
    709  * Checks if VBVA state is disabled.
    710  */
    711 DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    712 {
    713     return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
    714 }
    715 
    716 /**
    717  * Worker for vdmaVBVAEnableProcess().
    718  *
    719  * @thread VDMA
    720  */
    721 static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA,
    722                               uint8_t *pbVRam, uint32_t cbVRam)
    723 {
    724     if (VBoxVBVAExHSIsEnabled(pCmdVbva))
    725     {
    726         WARN(("VBVAEx is enabled already\n"));
    727         return VERR_INVALID_STATE;
    728     }
    729 
    730     uintptr_t offVRam = (uintptr_t)pVBVA - (uintptr_t)pbVRam;
    731     AssertLogRelMsgReturn(offVRam < cbVRam - sizeof(*pVBVA), ("%#p cbVRam=%#x\n", offVRam, cbVRam), VERR_OUT_OF_RANGE);
    732     RT_UNTRUSTED_VALIDATED_FENCE();
    733 
    734     pCmdVbva->pVBVA     = pVBVA;
    735     pCmdVbva->cbMaxData = cbVRam - offVRam - RT_UOFFSETOF(VBVABUFFER, au8Data);
    736     pVBVA->hostFlags.u32HostEvents = 0;
    737     ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
    738     return VINF_SUCCESS;
    739 }
    740 
    741 /**
    742  * Works the enable state.
    743  * @thread VDMA, CR, EMT, ...
    744  */
    745 static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    746 {
    747     if (VBoxVBVAExHSIsDisabled(pCmdVbva))
    748         return VINF_SUCCESS;
    749 
    750     ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
    751     return VINF_SUCCESS;
    752 }
    753 
    754 /**
    755  * Worker for vboxVDMADestruct() and vboxVDMAConstruct().
    756  */
    757 static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    758 {
    759     /* ensure the processor is stopped */
    760     Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
    761 
    762     /* ensure no one tries to submit the command */
    763     if (pCmdVbva->pVBVA)
    764         pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
    765 
    766     Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
    767     Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
    768 
    769     RTCritSectDelete(&pCmdVbva->CltCritSect);
    770 
    771 # ifndef VBOXVDBG_MEMCACHE_DISABLE
    772     RTMemCacheDestroy(pCmdVbva->CtlCache);
    773 # endif
    774 
    775     RT_ZERO(*pCmdVbva);
    776 }
    777 
    778 
    779 /**
    780  * Worker for vboxVBVAExHSSaveStateLocked().
    781  * @thread VDMA
    782  */
    783 static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
    784 {
    785     RT_NOREF(pCmdVbva);
    786     int rc = SSMR3PutU32(pSSM, pCtl->enmType);
    787     AssertRCReturn(rc, rc);
    788     rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
    789     AssertRCReturn(rc, rc);
    790     rc = SSMR3PutU32(pSSM, (uint32_t)((uintptr_t)pCtl->u.cmd.pvCmd - (uintptr_t)pu8VramBase));
    791     AssertRCReturn(rc, rc);
    792 
    793     return VINF_SUCCESS;
    794 }
    795 
    796 /**
    797  * Worker for VBoxVBVAExHSSaveState().
    798  * @thread VDMA
    799  */
    800 static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
    801 {
    802     if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    803     {
    804         WARN(("vbva not paused\n"));
    805         return VERR_INVALID_STATE;
    806     }
    807 
    808     int rc;
    809     VBVAEXHOSTCTL* pCtl;
    810     RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
    811     {
    812         rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
    813         AssertRCReturn(rc, rc);
    814     }
    815 
    816     rc = SSMR3PutU32(pSSM, 0);
    817     AssertRCReturn(rc, rc);
    818 
    819     return VINF_SUCCESS;
    820 }
    821 
    822 /**
    823  * Handles VBVAEXHOSTCTL_TYPE_HH_SAVESTATE for vboxVDMACrHostCtlProcess, saving
    824  * state on the VDMA thread.
    825  *
    826  * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
    827  * @thread VDMA
    828  */
    829 static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
    830 {
    831     int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
    832     AssertRCReturn(rc, rc);
    833 
    834     rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
    835     if (RT_FAILURE(rc))
    836         WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc));
    837 
    838     RTCritSectLeave(&pCmdVbva->CltCritSect);
    839     return rc;
    840 }
    841 
    842 
    843 /**
    844  * Worker for vboxVBVAExHSLoadStateLocked.
    845  * @retval VINF_EOF if end stuff to load.
    846  * @thread VDMA
    847  */
    848 static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
    849 {
    850     RT_NOREF(u32Version);
    851     uint32_t u32;
    852     int rc = SSMR3GetU32(pSSM, &u32);
    853     AssertLogRelRCReturn(rc, rc);
    854 
    855     if (!u32)
    856         return VINF_EOF;
    857 
    858     VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
    859     if (!pHCtl)
    860     {
    861         WARN(("VBoxVBVAExHCtlCreate failed\n"));
    862         return VERR_NO_MEMORY;
    863     }
    864 
    865     rc = SSMR3GetU32(pSSM, &u32);
    866     AssertLogRelRCReturn(rc, rc);
    867     pHCtl->u.cmd.cbCmd = u32;
    868 
    869     rc = SSMR3GetU32(pSSM, &u32);
    870     AssertLogRelRCReturn(rc, rc);
    871     pHCtl->u.cmd.pvCmd = pu8VramBase + u32;
    872 
    873     RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
    874     ++pCmdVbva->u32cCtls;
    875 
    876     return VINF_SUCCESS;
    877 }
    878 
    879 /**
    880  * Worker for VBoxVBVAExHSLoadState.
    881  * @thread VDMA
    882  */
    883 static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
    884 {
    885     if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    886     {
    887         WARN(("vbva not stopped\n"));
    888         return VERR_INVALID_STATE;
    889     }
    890 
    891     int rc;
    892     do
    893     {
    894         rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
    895         AssertLogRelRCReturn(rc, rc);
    896     } while (rc != VINF_EOF);
    897 
    898     return VINF_SUCCESS;
    899 }
    900 
    901 /**
    902  * Handles VBVAEXHOSTCTL_TYPE_HH_LOADSTATE for vboxVDMACrHostCtlProcess(),
    903  * loading state on the VDMA thread.
    904  *
    905  * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
    906  * @thread VDMA
    907  */
    908 static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
    909 {
    910     Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
    911     int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
    912     AssertRCReturn(rc, rc);
    913 
    914     rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
    915     if (RT_FAILURE(rc))
    916         WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc));
    917 
    918     RTCritSectLeave(&pCmdVbva->CltCritSect);
    919     return rc;
    920 }
    921 
    922 
    923 
    924 /**
    925  * Queues a control command to the VDMA worker thread.
    926  *
    927  * The @a enmSource argument decides which list (guest/host) it's queued on.
    928  *
    929  */
    930 static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
    931                                 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
    932 {
    933     int rc;
    934     if (VBoxVBVAExHSIsEnabled(pCmdVbva))
    935     {
    936         pCtl->pfnComplete = pfnComplete;
    937         pCtl->pvComplete  = pvComplete;
    938 
    939         rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
    940         if (RT_SUCCESS(rc))
    941         {
    942             /* Recheck that we're enabled after we've got the lock. */
    943             if (VBoxVBVAExHSIsEnabled(pCmdVbva))
    944             {
    945                 /* Queue it. */
    946                 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
    947                     RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
    948                 else
    949                     RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
    950                 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
    951 
    952                 RTCritSectLeave(&pCmdVbva->CltCritSect);
    953 
    954                 /* Work the state or something. */
    955                 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
    956             }
    957             else
    958             {
    959                 RTCritSectLeave(&pCmdVbva->CltCritSect);
    960                 Log(("cmd vbva not enabled (race)\n"));
    961                 rc = VERR_INVALID_STATE;
    962             }
    963         }
    964         else
    965             AssertRC(rc);
    966     }
    967     else
    968     {
    969         Log(("cmd vbva not enabled\n"));
    970         rc = VERR_INVALID_STATE;
    971     }
    972     return rc;
    973 }
    974 
    975 /**
    976  * Submits the control command and notifies the VDMA thread.
    977  */
    978 static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
    979                              PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
    980 {
    981     int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
    982     if (RT_SUCCESS(rc))
    983     {
    984         if (rc == VINF_SUCCESS)
    985             return VBoxVDMAThreadEventNotify(&pVdma->Thread);
    986         Assert(rc == VINF_ALREADY_INITIALIZED);
    987     }
    988     else
    989         Log(("VBoxVBVAExHCtlSubmit failed %Rrc\n", rc));
    990 
    991     return rc;
    992 }
    993 
    994 
    995 /**
    996  * Call VDMA thread creation notification callback.
    997  */
    998 void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
    999 {
    1000     Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
    1001     PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
    1002     void                     *pvChanged  = pThread->pvChanged;
    1003 
    1004     pThread->pfnChanged = NULL;
    1005     pThread->pvChanged  = NULL;
    1006 
    1007     ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
    1008 
    1009     if (pfnChanged)
    1010         pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
    1011 }
    1012 
    1013 /**
    1014  * Call VDMA thread termination notification callback.
    1015  */
    1016 void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
    1017 {
    1018     Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
    1019     PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
    1020     void                     *pvChanged  = pThread->pvChanged;
    1021 
    1022     pThread->pfnChanged = NULL;
    1023     pThread->pvChanged  = NULL;
    1024 
    1025     if (pfnChanged)
    1026         pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
    1027 }
    1028 
    1029 /**
    1030  * Check if VDMA thread is terminating.
    1031  */
    1032 DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
    1033 {
    1034     return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
    1035 }
    1036 
    1037 /**
    1038  * Init VDMA thread.
    1039  */
    1040 void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
    1041 {
    1042     RT_ZERO(*pThread);
    1043     pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
    1044 }
    1045 
    1046 /**
    1047  * Clean up VDMA thread.
    1048  */
    1049 int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
    1050 {
    1051     uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
    1052     switch (u32State)
    1053     {
    1054         case VBOXVDMATHREAD_STATE_TERMINATED:
    1055             return VINF_SUCCESS;
    1056 
    1057         case VBOXVDMATHREAD_STATE_TERMINATING:
    1058         {
    1059             int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
    1060             if (RT_SUCCESS(rc))
    1061             {
    1062                 RTSemEventDestroy(pThread->hEvent);
    1063                 pThread->hEvent        = NIL_RTSEMEVENT;
    1064                 pThread->hWorkerThread = NIL_RTTHREAD;
    1065                 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
    1066             }
    1067             else
    1068                 WARN(("RTThreadWait failed %Rrc\n", rc));
    1069             return rc;
    1070         }
    1071 
    1072         default:
    1073             WARN(("invalid state"));
    1074             return VERR_INVALID_STATE;
    1075     }
    1076 }
    1077 
    1078 /**
    1079  * Start VDMA thread.
    1080  */
    1081 int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread,
    1082                          PFNVBOXVDMATHREAD_CHANGED pfnCreated, void *pvCreated)
    1083 {
    1084     int rc = VBoxVDMAThreadCleanup(pThread);
    1085     if (RT_SUCCESS(rc))
    1086     {
    1087         rc = RTSemEventCreate(&pThread->hEvent);
    1088         pThread->u32State   = VBOXVDMATHREAD_STATE_CREATING;
    1089         pThread->pfnChanged = pfnCreated;
    1090         pThread->pvChanged  = pvCreated;
    1091         rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
    1092         if (RT_SUCCESS(rc))
    1093             return VINF_SUCCESS;
    1094 
    1095         WARN(("RTThreadCreate failed %Rrc\n", rc));
    1096         RTSemEventDestroy(pThread->hEvent);
    1097         pThread->hEvent        = NIL_RTSEMEVENT;
    1098         pThread->hWorkerThread = NIL_RTTHREAD;
    1099         pThread->u32State      = VBOXVDMATHREAD_STATE_TERMINATED;
    1100     }
    1101     else
    1102         WARN(("VBoxVDMAThreadCleanup failed %Rrc\n", rc));
    1103     return rc;
    1104 }
    1105 
    1106 /**
    1107  * Notifies the VDMA thread.
    1108  * @thread !VDMA
    1109  */
    1110 static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
    1111 {
    1112     int rc = RTSemEventSignal(pThread->hEvent);
    1113     AssertRC(rc);
    1114     return rc;
    1115 }
    1116 
    1117 /**
    1118  * State worker for VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD &
    1119  * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrHostCtlProcess(), and
    1120  * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrGuestCtlProcess().
    1121  *
    1122  * @thread VDMA
    1123  */
    1124 static int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void *pvTerminated, bool fNotify)
    1125 {
    1126     for (;;)
    1127     {
    1128         uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
    1129         switch (u32State)
    1130         {
    1131             case VBOXVDMATHREAD_STATE_CREATED:
    1132                 pThread->pfnChanged = pfnTerminated;
    1133                 pThread->pvChanged  = pvTerminated;
    1134                 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
    1135                 if (fNotify)
    1136                 {
    1137                     int rc = VBoxVDMAThreadEventNotify(pThread);
    1138                     AssertRC(rc);
    1139                 }
    1140                 return VINF_SUCCESS;
    1141 
    1142             case VBOXVDMATHREAD_STATE_TERMINATING:
    1143             case VBOXVDMATHREAD_STATE_TERMINATED:
    1144                 WARN(("thread is marked to termination or terminated\nn"));
    1145                 return VERR_INVALID_STATE;
    1146 
    1147             case VBOXVDMATHREAD_STATE_CREATING:
    1148                 /* wait till the thread creation is completed */
    1149                 WARN(("concurrent thread create/destron\n"));
    1150                 RTThreadYield();
    1151                 continue;
    1152 
    1153             default:
    1154                 WARN(("invalid state"));
    1155                 return VERR_INVALID_STATE;
    1156         }
    1157     }
    1158 }
    1159 
    1160 
    1161 
    1162 /*
    1163  *
    1164  *
    1165  * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
    1166  * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
    1167  * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
    1168  *
    1169  *
    1170  */
    1171 
    1172 /** Completion callback for vboxVDMACrCtlPostAsync(). */
    1173 typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
    1174 /** Pointer to a vboxVDMACrCtlPostAsync completion callback. */
    1175 typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
    1176 
    1177 /**
    1178  * Private wrapper around VBOXVDMACMD_CHROMIUM_CTL.
    1179  */
    1180 typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
    1181 {
    1182     uint32_t                    uMagic; /**< VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC */
    1183     uint32_t                    cRefs;
    1184     int32_t volatile            rc;
    1185     PFNVBOXVDMACRCTL_CALLBACK   pfnCompletion;
    1186     void                       *pvCompletion;
    1187     RTSEMEVENT                  hEvtDone;
    1188     VBOXVDMACMD_CHROMIUM_CTL    Cmd;
    1189 } VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
    1190 /** Magic number for VBOXVDMACMD_CHROMIUM_CTL_PRIVATE (Michael Wolff). */
    1191 # define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC         UINT32_C(0x19530827)
    1192 
    1193 /** Converts from a VBOXVDMACMD_CHROMIUM_CTL::Cmd pointer to a pointer to the
    1194  * containing structure. */
    1195 # define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p)  RT_FROM_MEMBER(pCmd, VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)
    1196 
    1197 /**
    1198  * Creates a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
    1199  */
    1200 static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
    1201 {
    1202     PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr;
    1203     pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
    1204     if (pHdr)
    1205     {
    1206         pHdr->uMagic      = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC;
    1207         pHdr->cRefs       = 1;
    1208         pHdr->rc          = VERR_NOT_IMPLEMENTED;
    1209         pHdr->hEvtDone    = NIL_RTSEMEVENT;
    1210         pHdr->Cmd.enmType = enmCmd;
    1211         pHdr->Cmd.cbCmd   = cbCmd;
    1212         return &pHdr->Cmd;
    1213     }
    1214     return NULL;
    1215 }
    1216 
    1217 /**
    1218  * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
    1219  */
    1220 DECLINLINE(void) vboxVDMACrCtlRelease(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
    1221 {
    1222     PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
    1223     Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
    1224 
    1225     uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
    1226     if (!cRefs)
    1227     {
    1228         pHdr->uMagic = ~VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC;
    1229         if (pHdr->hEvtDone != NIL_RTSEMEVENT)
    1230         {
    1231             RTSemEventDestroy(pHdr->hEvtDone);
    1232             pHdr->hEvtDone = NIL_RTSEMEVENT;
    1233         }
    1234         RTMemFree(pHdr);
    1235     }
    1236 }
    1237 
    1238 /**
    1239  * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
    1240  */
    1241 DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
    1242 {
    1243     PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
    1244     Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
    1245 
    1246     uint32_t cRefs = ASMAtomicIncU32(&pHdr->cRefs);
    1247     Assert(cRefs > 1);
    1248     Assert(cRefs < _1K);
    1249     RT_NOREF_PV(cRefs);
    1250 }
    1251 
    1252 /**
    1253  * Gets the result from our private chromium control command.
    1254  *
    1255  * @returns status code.
    1256  * @param   pCmd                The command.
    1257  */
    1258 DECLINLINE(int) vboxVDMACrCtlGetRc(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
    1259 {
    1260     PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
    1261     Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
    1262     return pHdr->rc;
    1263 }
    1264 
    1265 /**
    1266  * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync}
    1267  *
    1268  * @note Some indirect completion magic, you gotta love this code!
    1269  */
    1270 DECLCALLBACK(int) vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
    1271 {
    1272     PVGASTATE                           pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
    1273     PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE   pHdr      = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
    1274     Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
    1275 
    1276     pHdr->rc = rc;
    1277     if (pHdr->pfnCompletion)
    1278         pHdr->pfnCompletion(pVGAState, pCmd, pHdr->pvCompletion);
    1279     return VINF_SUCCESS;
    1280 }
    1281 
    1282 /**
    1283  * @callback_method_impl{FNCRCTLCOMPLETION,
    1284  *      Completion callback for vboxVDMACrCtlPost. }
    1285  */
    1286 static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void *pvContext)
    1287 {
    1288     PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)pvContext;
    1289     Assert(pHdr == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd));
    1290     Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
    1291     RT_NOREF(pVGAState, pCmd);
    1292 
    1293     int rc = RTSemEventSignal(pHdr->hEvtDone);
    1294     AssertRC(rc);
    1295 
    1296     vboxVDMACrCtlRelease(&pHdr->Cmd);
    1297 }
    1298 
    1299 /**
    1300  * Worker for vboxVDMACrCtlPost().
    1301  */
    1302 static int vboxVDMACrCtlPostAsync(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd,
    1303                                   PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
    1304 {
    1305     if (   pVGAState->pDrv
    1306         && pVGAState->pDrv->pfnCrHgsmiControlProcess)
    1307     {
    1308         PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
    1309         pHdr->pfnCompletion = pfnCompletion;
    1310         pHdr->pvCompletion  = pvCompletion;
    1311         pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
    1312         return VINF_SUCCESS;
    1313     }
    1314     return VERR_NOT_SUPPORTED;
    1315 }
    1316 
    1317 /**
    1318  * Posts stuff and waits.
    1319  */
    1320 static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
    1321 {
    1322     PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
    1323 
    1324     /* Allocate the semaphore. */
    1325     Assert(pHdr->hEvtDone == NIL_RTSEMEVENT);
    1326     int rc = RTSemEventCreate(&pHdr->hEvtDone);
    1327     AssertRCReturn(rc, rc);
    1328 
    1329     /* Grab a reference for the completion routine. */
    1330     vboxVDMACrCtlRetain(&pHdr->Cmd);
    1331 
    1332     /* Submit and wait for it. */
    1333     rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, pHdr);
    1334     if (RT_SUCCESS(rc))
    1335         rc = RTSemEventWaitNoResume(pHdr->hEvtDone, RT_INDEFINITE_WAIT);
    1336     else
    1337     {
    1338         if (rc != VERR_NOT_SUPPORTED)
    1339             AssertRC(rc);
    1340         vboxVDMACrCtlRelease(pCmd);
    1341     }
    1342     return rc;
    1343 }
    1344 
    1345 
    1346 /**
    1347  * Structure for passing data between vboxVDMACrHgcmSubmitSync() and the
    1348  * completion routine vboxVDMACrHgcmSubmitSyncCompletion().
    1349  */
    1350 typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
    1351 {
    1352     int volatile rc;
    1353     RTSEMEVENT hEvent;
    1354 } VDMA_VBVA_CTL_CYNC_COMPLETION;
    1355 
    1356 /**
    1357  * @callback_method_impl{FNCRCTLCOMPLETION,
    1358  *      Completion callback for vboxVDMACrHgcmSubmitSync() that signals the
    1359  *      waiting thread.}
    1360  */
    1361 static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
    1362 {
    1363     VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
    1364     pData->rc = rc;
    1365     rc = RTSemEventSignal(pData->hEvent);
    1366     AssertLogRelRC(rc);
    1367 
    1368     RT_NOREF(pCmd, cbCmd);
    1369 }
    1370 
    1371 /**
    1372  * Worker for vboxVDMACrHgcmHandleEnable() and vdmaVBVAEnableProcess() that
    1373  * works pVGAState->pDrv->pfnCrHgcmCtlSubmit.
    1374  *
    1375  * @thread VDMA
    1376  */
    1377 static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
    1378 {
    1379     VDMA_VBVA_CTL_CYNC_COMPLETION Data;
    1380     Data.rc = VERR_NOT_IMPLEMENTED;
    1381     int rc = RTSemEventCreate(&Data.hEvent);
    1382     if (!RT_SUCCESS(rc))
    1383     {
    1384         WARN(("RTSemEventCreate failed %Rrc\n", rc));
    1385         return rc;
    1386     }
    1387 
    1388     pCtl->CalloutList.List.pNext = NULL;
    1389 
    1390     PVGASTATE pVGAState = pVdma->pVGAState;
    1391     rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
    1392     if (RT_SUCCESS(rc))
    1393     {
    1394         rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
    1395         if (RT_SUCCESS(rc))
    1396         {
    1397             rc = Data.rc;
    1398             if (!RT_SUCCESS(rc))
    1399             {
    1400                 WARN(("pfnCrHgcmCtlSubmit command failed %Rrc\n", rc));
    1401             }
    1402 
    1403         }
    1404         else
    1405             WARN(("RTSemEventWait failed %Rrc\n", rc));
    1406     }
    1407     else
    1408         WARN(("pfnCrHgcmCtlSubmit failed %Rrc\n", rc));
    1409 
    1410 
    1411     RTSemEventDestroy(Data.hEvent);
    1412 
    1413     return rc;
    1414 }
    1415 
    1416 
    1417 /**
    1418  * Worker for vboxVDMAReset().
    1419  */
    1420 static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
    1421 {
    1422     VBVAEXHOSTCTL HCtl;
    1423     RT_ZERO(HCtl);
    1424     HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
    1425     int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
    1426     if (RT_SUCCESS(rc))
    1427         vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
    1428     else
    1429         Log(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc));
    1430     return rc;
    1431 }
    1432 
    1433 
    1434 /**
    1435  * Used by vboxVDMACrHgcmNotifyTerminatingCb() and called by
    1436  * crVBoxServerCrCmdDisablePostProcess() during crServerTearDown() to drain
    1437  * command queues or something.
    1438  */
    1439 static DECLCALLBACK(uint8_t *)
    1440 vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
    1441 {
    1442     struct VBOXVDMAHOST *pVdma = hClient;
    1443 
    1444     if (!pVdma->pCurRemainingHostCtl)
    1445         VBoxVBVAExHSDisable(&pVdma->CmdVbva); /* disable VBVA, all subsequent host commands will go HGCM way */
    1446     else
    1447         VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
    1448 
    1449     pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
    1450     if (pVdma->pCurRemainingHostCtl)
    1451     {
    1452         *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
    1453         return (uint8_t *)pVdma->pCurRemainingHostCtl->u.cmd.pvCmd;
    1454     }
    1455 
    1456     *pcbCtl = 0;
    1457     return NULL;
    1458 }
    1459 
    1460 /**
    1461  * Called by crServerTearDown().
    1462  */
    1463 static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
    1464 {
    1465 # ifdef VBOX_STRICT
    1466     struct VBOXVDMAHOST *pVdma = hClient;
    1467     Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    1468     Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
    1469 # else
    1470     RT_NOREF(hClient);
    1471 # endif
    1472 }
    1473 
    1474 /**
    1475  * Called by crServerTearDown().
    1476  */
    1477 static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient,
    1478                                                            VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
    1479 {
    1480     struct VBOXVDMAHOST *pVdma = hClient;
    1481 
    1482     VBVAEXHOSTCTL HCtl;
    1483     RT_ZERO(HCtl);
    1484     HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
    1485     int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
    1486 
    1487     pHgcmEnableData->hRHCmd   = pVdma;
    1488     pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
    1489 
    1490     if (rc == VERR_INVALID_STATE)
    1491         rc = VINF_SUCCESS;
    1492     else if (RT_FAILURE(rc))
    1493         WARN(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc));
    1494 
    1495     return rc;
    1496 }
    1497 
    1498 /**
    1499  * Worker for vdmaVBVAEnableProcess() and vdmaVBVADisableProcess().
    1500  *
    1501  * @thread VDMA
    1502  */
    1503 static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
    1504 {
    1505     VBOXCRCMDCTL_ENABLE Enable;
    1506     RT_ZERO(Enable);
    1507     Enable.Hdr.enmType   = VBOXCRCMDCTL_TYPE_ENABLE;
    1508     Enable.Data.hRHCmd   = pVdma;
    1509     Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
    1510 
    1511     int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
    1512     Assert(!pVdma->pCurRemainingHostCtl);
    1513     if (RT_SUCCESS(rc))
    1514     {
    1515         Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
    1516         return VINF_SUCCESS;
    1517     }
    1518 
    1519     Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
    1520     WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc));
    1521     return rc;
    1522 }
    1523 
    1524 /**
    1525  * Handles VBVAEXHOSTCTL_TYPE_GHH_ENABLE and VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED
    1526  * for vboxVDMACrGuestCtlProcess().
    1527  *
    1528  * @thread VDMA
    1529  */
    1530 static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
    1531 {
    1532     if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
    1533     {
    1534         WARN(("vdma VBVA is already enabled\n"));
    1535         return VERR_INVALID_STATE;
    1536     }
    1537 
    1538     VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *pVBVA
    1539         = (VBVABUFFER RT_UNTRUSTED_VOLATILE_GUEST *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
    1540     if (!pVBVA)
    1541     {
    1542         WARN(("invalid offset %d (%#x)\n", u32Offset, u32Offset));
    1543         return VERR_INVALID_PARAMETER;
    1544     }
    1545 
    1546     int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA, pVdma->pVGAState->vram_ptrR3, pVdma->pVGAState->vram_size);
    1547     if (RT_SUCCESS(rc))
    1548     {
    1549         if (!pVdma->CrSrvInfo.pfnEnable)
    1550         {
    1551             /* "HGCM-less" mode. All inited. */
    1552             return VINF_SUCCESS;
    1553         }
    1554 
    1555         VBOXCRCMDCTL_DISABLE Disable;
    1556         Disable.Hdr.enmType            = VBOXCRCMDCTL_TYPE_DISABLE;
    1557         Disable.Data.hNotifyTerm       = pVdma;
    1558         Disable.Data.pfnNotifyTerm     = vboxVDMACrHgcmNotifyTerminatingCb;
    1559         Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
    1560         rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
    1561         if (RT_SUCCESS(rc))
    1562         {
    1563             PVGASTATE pVGAState = pVdma->pVGAState;
    1564             VBOXCRCMD_SVRENABLE_INFO Info;
    1565             Info.hCltScr                = pVGAState->pDrv;
    1566             Info.pfnCltScrUpdateBegin   = pVGAState->pDrv->pfnVBVAUpdateBegin;
    1567             Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
    1568             Info.pfnCltScrUpdateEnd     = pVGAState->pDrv->pfnVBVAUpdateEnd;
    1569             rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
    1570             if (RT_SUCCESS(rc))
    1571                 return VINF_SUCCESS;
    1572 
    1573             WARN(("pfnEnable failed %Rrc\n", rc));
    1574             vboxVDMACrHgcmHandleEnable(pVdma);
    1575         }
    1576         else
    1577             WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc));
    1578 
    1579         VBoxVBVAExHSDisable(&pVdma->CmdVbva);
    1580     }
    1581     else
    1582         WARN(("VBoxVBVAExHSEnable failed %Rrc\n", rc));
    1583 
    1584     return rc;
    1585 }
    1586 
    1587 /**
    1588  * Worker for several vboxVDMACrHostCtlProcess() commands.
    1589  *
    1590  * @returns IPRT status code.
    1591  * @param   pVdma           The VDMA channel.
    1592  * @param   fDoHgcmEnable   ???
    1593  * @thread  VDMA
    1594  */
    1595 static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
    1596 {
    1597     if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
    1598     {
    1599         Log(("vdma VBVA is already disabled\n"));
    1600         return VINF_SUCCESS;
    1601     }
    1602 
    1603     if (!pVdma->CrSrvInfo.pfnDisable)
    1604     {
    1605         /* "HGCM-less" mode. Just undo what vdmaVBVAEnableProcess did. */
    1606         VBoxVBVAExHSDisable(&pVdma->CmdVbva);
    1607         return VINF_SUCCESS;
    1608     }
    1609 
    1610     int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
    1611     if (RT_SUCCESS(rc))
    1612     {
    1613         if (fDoHgcmEnable)
    1614         {
    1615             PVGASTATE pVGAState = pVdma->pVGAState;
    1616 
    1617             /* disable is a bit tricky
    1618              * we need to ensure the host ctl commands do not come out of order
    1619              * and do not come over HGCM channel until after it is enabled */
    1620             rc = vboxVDMACrHgcmHandleEnable(pVdma);
    1621             if (RT_SUCCESS(rc))
    1622             {
    1623                 vdmaVBVANotifyDisable(pVGAState);
    1624                 return VINF_SUCCESS;
    1625             }
    1626 
    1627             VBOXCRCMD_SVRENABLE_INFO Info;
    1628             Info.hCltScr                = pVGAState->pDrv;
    1629             Info.pfnCltScrUpdateBegin   = pVGAState->pDrv->pfnVBVAUpdateBegin;
    1630             Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
    1631             Info.pfnCltScrUpdateEnd     = pVGAState->pDrv->pfnVBVAUpdateEnd;
    1632             pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info); /** @todo ignoring return code */
    1633         }
    1634     }
    1635     else
    1636         WARN(("pfnDisable failed %Rrc\n", rc));
    1637 
    1638     return rc;
    1639 }
    1640 
    1641 /**
    1642  * Handles VBVAEXHOST_DATA_TYPE_HOSTCTL for vboxVDMAWorkerThread.
    1643  *
    1644  * @returns VBox status code.
    1645  * @param   pVdma                   The VDMA channel.
    1646  * @param   pCmd                    The control command to process.  Should be
    1647  *                                  safe, i.e. not shared with guest.
    1648  * @param   pfContinue              Where to return whether to continue or not.
    1649  * @thread  VDMA
    1650  */
    1651 static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
    1652 {
    1653     *pfContinue = true;
    1654 
    1655     int rc;
    1656     switch (pCmd->enmType)
    1657     {
    1658         /*
    1659          * See vdmaVBVACtlOpaqueHostSubmit() and its callers.
    1660          */
    1661         case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
    1662             if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
    1663             {
    1664                 if (pVdma->CrSrvInfo.pfnHostCtl)
    1665                     return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, (uint8_t *)pCmd->u.cmd.pvCmd, pCmd->u.cmd.cbCmd);
    1666                 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
    1667             }
    1668             else
    1669                 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for HGCM-less mode\n"));
    1670             return VERR_INVALID_STATE;
    1671 
    1672         /*
    1673          * See vdmaVBVACtlDisableSync().
    1674          */
    1675         case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
    1676             rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */);
    1677             if (RT_SUCCESS(rc))
    1678                 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */ );
    1679             else
    1680                 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
    1681             return rc;
    1682 
    1683         /*
    1684          * See vboxVDMACrHgcmNotifyTerminatingCb().
    1685          */
    1686         case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
    1687             rc = vdmaVBVADisableProcess(pVdma, false /* fDoHgcmEnable */);
    1688             if (RT_SUCCESS(rc))
    1689             {
    1690                 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true /* fNotify */);
    1691                 if (RT_SUCCESS(rc))
    1692                     *pfContinue = false;
    1693                 else
    1694                     WARN(("VBoxVDMAThreadTerm failed %Rrc\n", rc));
    1695             }
    1696             else
    1697                 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
    1698             return rc;
    1699 
    1700         /*
    1701          * See vboxVDMASaveStateExecPerform().
    1702          */
    1703         case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
    1704             rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM);
    1705             if (RT_SUCCESS(rc))
    1706             {
    1707                 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
    1708                 if (pVdma->CrSrvInfo.pfnSaveState)
    1709                     rc = pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
    1710             }
    1711             else
    1712                 WARN(("VBoxVBVAExHSSaveState failed %Rrc\n", rc));
    1713             return rc;
    1714 
    1715         /*
    1716          * See vboxVDMASaveLoadExecPerform().
    1717          */
    1718         case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
    1719             rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
    1720             if (RT_SUCCESS(rc))
    1721             {
    1722                 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
    1723                 if (pVdma->CrSrvInfo.pfnLoadState)
    1724                 {
    1725                     rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
    1726                     if (RT_FAILURE(rc))
    1727                         WARN(("pfnLoadState failed %Rrc\n", rc));
    1728                 }
    1729             }
    1730             else
    1731                 WARN(("VBoxVBVAExHSLoadState failed %Rrc\n", rc));
    1732             return rc;
    1733 
    1734         /*
    1735          * See vboxVDMASaveLoadDone().
    1736          */
    1737         case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
    1738         {
    1739             PVGASTATE pVGAState = pVdma->pVGAState;
    1740             for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
    1741             {
    1742                 VBVAINFOSCREEN CurScreen;
    1743                 VBVAINFOVIEW   CurView;
    1744                 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
    1745                 AssertLogRelMsgRCReturn(rc, ("VBVAGetInfoViewAndScreen [screen #%u] -> %#x\n", i, rc), rc);
    1746 
    1747                 rc = VBVAInfoScreen(pVGAState, &CurScreen);
    1748                 AssertLogRelMsgRCReturn(rc, ("VBVAInfoScreen [screen #%u] -> %#x\n", i, rc), rc);
    1749             }
    1750 
    1751             return VINF_SUCCESS;
    1752         }
    1753 
    1754         default:
    1755             WARN(("unexpected host ctl type %d\n", pCmd->enmType));
    1756             return VERR_INVALID_PARAMETER;
    1757     }
    1758 }
    1759 
    1760 /**
    1761  * Worker for vboxVDMACrGuestCtlResizeEntryProcess.
    1762  *
    1763  * @returns VINF_SUCCESS or VERR_INVALID_PARAMETER.
    1764  * @param   pVGAState           The VGA device state.
    1765  * @param   pScreen             The screen info (safe copy).
    1766  */
    1767 static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
    1768 {
    1769     const uint32_t idxView = pScreen->u32ViewIndex;
    1770     const uint16_t fFlags  = pScreen->u16Flags;
    1771 
    1772     if (fFlags & VBVA_SCREEN_F_DISABLED)
    1773     {
    1774         if (   idxView < pVGAState->cMonitors
    1775             || idxView == UINT32_C(0xFFFFFFFF))
    1776         {
    1777             RT_UNTRUSTED_VALIDATED_FENCE();
    1778 
    1779             RT_ZERO(*pScreen);
    1780             pScreen->u32ViewIndex = idxView;
    1781             pScreen->u16Flags     = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
    1782             return VINF_SUCCESS;
    1783         }
    1784     }
    1785     else
    1786     {
    1787         if (fFlags & VBVA_SCREEN_F_BLANK2)
    1788         {
    1789             if (   idxView >= pVGAState->cMonitors
    1790                 && idxView != UINT32_C(0xFFFFFFFF))
    1791                 return VERR_INVALID_PARAMETER;
    1792             RT_UNTRUSTED_VALIDATED_FENCE();
    1793 
    1794             /* Special case for blanking using current video mode.
    1795              * Only 'u16Flags' and 'u32ViewIndex' field are relevant.
    1796              */
    1797             RT_ZERO(*pScreen);
    1798             pScreen->u32ViewIndex = idxView;
    1799             pScreen->u16Flags     = fFlags;
    1800             return VINF_SUCCESS;
    1801         }
    1802 
    1803         if (   idxView < pVGAState->cMonitors
    1804             && pScreen->u16BitsPerPixel <= 32
    1805             && pScreen->u32Width <= UINT16_MAX
    1806             && pScreen->u32Height <= UINT16_MAX
    1807             && pScreen->u32LineSize <= UINT16_MAX * 4)
    1808         {
    1809             const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
    1810             if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
    1811             {
    1812                 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
    1813                 if (   pScreen->u32StartOffset <= pVGAState->vram_size
    1814                     && u64ScreenSize           <= pVGAState->vram_size
    1815                     && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
    1816                     return VINF_SUCCESS;
    1817             }
    1818         }
    1819     }
    1820 
    1821     LogFunc(("Failed\n"));
    1822     return VERR_INVALID_PARAMETER;
    1823 }
    1824 
    1825 /**
    1826  * Handles on entry in a VBVAEXHOSTCTL_TYPE_GHH_RESIZE command.
    1827  *
    1828  * @returns IPRT status code.
    1829  * @param   pVdma               The VDMA channel
    1830  * @param   pEntry              The entry to handle.  Considered volatile.
    1831  *
    1832  * @thread  VDMA
    1833  */
    1834 static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma,
    1835                                                 VBOXCMDVBVA_RESIZE_ENTRY RT_UNTRUSTED_VOLATILE_GUEST *pEntry)
    1836 {
    1837     PVGASTATE pVGAState = pVdma->pVGAState;
    1838 
    1839     VBVAINFOSCREEN Screen;
    1840     RT_COPY_VOLATILE(Screen, pEntry->Screen);
    1841     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    1842 
    1843     /* Verify and cleanup local copy of the input data. */
    1844     int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
    1845     if (RT_FAILURE(rc))
    1846     {
    1847         WARN(("invalid screen data\n"));
    1848         return rc;
    1849     }
    1850     RT_UNTRUSTED_VALIDATED_FENCE();
    1851 
    1852     VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
    1853     RT_BCOPY_VOLATILE(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
    1854     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    1855 
    1856     ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
    1857 
    1858     if (pVdma->CrSrvInfo.pfnResize)
    1859     {
    1860         /* Also inform the HGCM service, if it is there. */
    1861         rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
    1862         if (RT_FAILURE(rc))
    1863         {
    1864             WARN(("pfnResize failed %Rrc\n", rc));
    1865             return rc;
    1866         }
    1867     }
    1868 
    1869     /* A fake view which contains the current screen for the 2D VBVAInfoView. */
    1870     VBVAINFOVIEW View;
    1871     View.u32ViewOffset    = 0;
    1872     View.u32ViewSize      = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
    1873     View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
    1874 
    1875     const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
    1876 
    1877     for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
    1878          i >= 0;
    1879          i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
    1880     {
    1881         Screen.u32ViewIndex = i;
    1882 
    1883         VBVAINFOSCREEN CurScreen;
    1884         VBVAINFOVIEW CurView;
    1885 
    1886         rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
    1887         AssertRC(rc);
    1888 
    1889         if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
    1890             continue;
    1891 
    1892         /* The view does not change if _BLANK2 is set. */
    1893         if (   (!fDisable || !CurView.u32ViewSize)
    1894             && !RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_BLANK2))
    1895         {
    1896             View.u32ViewIndex = Screen.u32ViewIndex;
    1897 
    1898             rc = VBVAInfoView(pVGAState, &View);
    1899             if (RT_FAILURE(rc))
    1900             {
    1901                 WARN(("VBVAInfoView failed %Rrc\n", rc));
    1902                 break;
    1903             }
    1904         }
    1905 
    1906         rc = VBVAInfoScreen(pVGAState, &Screen);
    1907         if (RT_FAILURE(rc))
    1908         {
    1909             WARN(("VBVAInfoScreen failed %Rrc\n", rc));
    1910             break;
    1911         }
    1912     }
    1913 
    1914     return rc;
    1915 }
    1916 
    1917 
    1918 /**
    1919  * Processes VBVAEXHOST_DATA_TYPE_GUESTCTL for vboxVDMAWorkerThread and
    1920  * vdmaVBVACtlThreadCreatedEnable.
    1921  *
    1922  * @returns VBox status code.
    1923  * @param   pVdma               The VDMA channel.
    1924  * @param   pCmd                The command to process.  Maybe safe (not shared
    1925  *                              with guest).
    1926  *
    1927  * @thread  VDMA
    1928  */
    1929 static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
    1930 {
    1931     VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
    1932     switch (enmType)
    1933     {
    1934         /*
    1935          * See handling of VBOXCMDVBVACTL_TYPE_3DCTL in vboxCmdVBVACmdCtl().
    1936          */
    1937         case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
    1938             ASSERT_GUEST_LOGREL_RETURN(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva), VERR_INVALID_STATE);
    1939             ASSERT_GUEST_LOGREL_RETURN(pVdma->CrSrvInfo.pfnGuestCtl, VERR_INVALID_STATE);
    1940             return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr,
    1941                                                 (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd,
    1942                                                 pCmd->u.cmd.cbCmd);
    1943 
    1944         /*
    1945          * See handling of VBOXCMDVBVACTL_TYPE_RESIZE in vboxCmdVBVACmdCtl().
    1946          */
    1947         case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
    1948         {
    1949             ASSERT_GUEST_RETURN(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva), VERR_INVALID_STATE);
    1950             uint32_t cbCmd = pCmd->u.cmd.cbCmd;
    1951             ASSERT_GUEST_LOGREL_MSG_RETURN(   !(cbCmd % sizeof(VBOXCMDVBVA_RESIZE_ENTRY))
    1952                                            && cbCmd > 0,
    1953                                            ("cbCmd=%#x\n", cbCmd), VERR_INVALID_PARAMETER);
    1954 
    1955             uint32_t const cElements = cbCmd / sizeof(VBOXCMDVBVA_RESIZE_ENTRY);
    1956             VBOXCMDVBVA_RESIZE RT_UNTRUSTED_VOLATILE_GUEST *pResize
    1957                 = (VBOXCMDVBVA_RESIZE RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd;
    1958             for (uint32_t i = 0; i < cElements; ++i)
    1959             {
    1960                 VBOXCMDVBVA_RESIZE_ENTRY RT_UNTRUSTED_VOLATILE_GUEST *pEntry = &pResize->aEntries[i];
    1961                 int rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
    1962                 ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("vboxVDMACrGuestCtlResizeEntryProcess failed for #%u: %Rrc\n", i, rc), rc);
    1963             }
    1964             return VINF_SUCCESS;
    1965         }
    1966 
    1967         /*
    1968          * See vdmaVBVACtlEnableSubmitInternal().
    1969          */
    1970         case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
    1971         case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
    1972         {
    1973             ASSERT_GUEST(pCmd->u.cmd.cbCmd == sizeof(VBVAENABLE));
    1974 
    1975             VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable = (VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *)pCmd->u.cmd.pvCmd;
    1976             uint32_t const u32Offset = pEnable->u32Offset;
    1977             RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    1978 
    1979             int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
    1980             ASSERT_GUEST_MSG_RC_RETURN(rc, ("vdmaVBVAEnableProcess -> %Rrc\n", rc), rc);
    1981 
    1982             if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
    1983             {
    1984                 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
    1985                 ASSERT_GUEST_MSG_RC_RETURN(rc, ("VBoxVBVAExHPPause -> %Rrc\n", rc), rc);
    1986             }
    1987             return VINF_SUCCESS;
    1988         }
    1989 
    1990         /*
    1991          * See vdmaVBVACtlDisableSubmitInternal().
    1992          */
    1993         case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
    1994         {
    1995             int rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */);
    1996             ASSERT_GUEST_MSG_RC_RETURN(rc, ("vdmaVBVADisableProcess -> %Rrc\n", rc), rc);
    1997 
    1998             /* do vgaUpdateDisplayAll right away */
    1999             VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
    2000                               (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
    2001 
    2002             return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */);
    2003         }
    2004 
    2005         default:
    2006             ASSERT_GUEST_LOGREL_MSG_FAILED(("unexpected ctl type %d\n", enmType));
    2007             return VERR_INVALID_PARAMETER;
    2008     }
    2009 }
    2010 
    2011 
    2012 /**
    2013  * Copies one page in a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
    2014  *
    2015  * @param pDevIns   Device instance data.
    2016  * @param uPageNo   Page frame number.
    2017  * @param pbVram    Pointer to the VRAM.
    2018  * @param fIn       Flag whether this is a page in or out op.
    2019  * @thread VDMA
    2020  *
    2021  * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
    2022  */
    2023 static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX uPageNo, uint8_t *pbVram, bool fIn)
    2024 {
    2025     RTGCPHYS       GCPhysPage = (RTGCPHYS)uPageNo << X86_PAGE_SHIFT;
    2026     PGMPAGEMAPLOCK Lock;
    2027 
    2028     if (fIn)
    2029     {
    2030         const void *pvPage;
    2031         int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
    2032         ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("PDMDevHlpPhysGCPhys2CCPtrReadOnly %RGp -> %Rrc\n", GCPhysPage, rc), rc);
    2033 
    2034         memcpy(pbVram, pvPage, PAGE_SIZE);
    2035         PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
    2036     }
    2037     else
    2038     {
    2039         void *pvPage;
    2040         int rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
    2041         ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("PDMDevHlpPhysGCPhys2CCPtr %RGp -> %Rrc\n", GCPhysPage, rc), rc);
    2042 
    2043         memcpy(pvPage, pbVram, PAGE_SIZE);
    2044         PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
    2045     }
    2046 
    2047     return VINF_SUCCESS;
    2048 }
    2049 
    2050 /**
    2051  * Handles a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
    2052  *
    2053  * @return 0 on success, -1 on failure.
    2054  *
    2055  * @thread VDMA
    2056  */
    2057 static int8_t vboxVDMACrCmdVbvaPageTransfer(PVGASTATE pVGAState, VBOXCMDVBVA_HDR const RT_UNTRUSTED_VOLATILE_GUEST *pHdr,
    2058                                             uint32_t cbCmd, const VBOXCMDVBVA_PAGING_TRANSFER_DATA RT_UNTRUSTED_VOLATILE_GUEST *pData)
    2059 {
    2060     /*
    2061      * Extract and validate information.
    2062      */
    2063     ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_PAGING_TRANSFER), ("%#x\n", cbCmd), -1);
    2064 
    2065     bool const fIn = RT_BOOL(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
    2066     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2067 
    2068     uint32_t cbPageNumbers = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
    2069     ASSERT_GUEST_MSG_RETURN(!(cbPageNumbers % sizeof(VBOXCMDVBVAPAGEIDX)), ("%#x\n", cbPageNumbers), -1);
    2070     VBOXCMDVBVAPAGEIDX const cPages = cbPageNumbers / sizeof(VBOXCMDVBVAPAGEIDX);
    2071 
    2072     VBOXCMDVBVAOFFSET offVRam = pData->Alloc.u.offVRAM;
    2073     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2074     ASSERT_GUEST_MSG_RETURN(!(offVRam & X86_PAGE_OFFSET_MASK), ("%#x\n", offVRam), -1);
    2075     ASSERT_GUEST_MSG_RETURN(offVRam < pVGAState->vram_size, ("%#x vs %#x\n", offVRam, pVGAState->vram_size), -1);
    2076     uint32_t cVRamPages = (pVGAState->vram_size - offVRam) >> X86_PAGE_SHIFT;
    2077     ASSERT_GUEST_MSG_RETURN(cPages <= cVRamPages, ("cPages=%#x vs cVRamPages=%#x @ offVRam=%#x\n", cPages, cVRamPages, offVRam), -1);
    2078 
    2079     RT_UNTRUSTED_VALIDATED_FENCE();
    2080 
    2081     /*
    2082      * Execute the command.
    2083      */
    2084     uint8_t *pbVRam = (uint8_t *)pVGAState->vram_ptrR3 + offVRam;
    2085     for (uint32_t iPage = 0; iPage < cPages; iPage++, pbVRam += X86_PAGE_SIZE)
    2086     {
    2087         uint32_t uPageNo = pData->aPageNumbers[iPage];
    2088         RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2089         int rc = vboxVDMACrCmdVbvaProcessPagingEl(pVGAState->pDevInsR3, uPageNo, pbVRam, fIn);
    2090         ASSERT_GUEST_MSG_RETURN(RT_SUCCESS(rc), ("#%#x: uPageNo=%#x rc=%Rrc\n", iPage, uPageNo, rc), -1);
    2091     }
    2092     return 0;
    2093 }
    2094 
    2095 
    2096 /**
    2097  * Handles VBOXCMDVBVA_OPTYPE_PAGING_FILL.
    2098  *
    2099  * @returns 0 on success, -1 on failure.
    2100  * @param   pVGAState           The VGA state.
    2101  * @param   pFill               The fill command (volatile).
    2102  *
    2103  * @thread VDMA
    2104  */
    2105 static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL RT_UNTRUSTED_VOLATILE_GUEST *pFill)
    2106 {
    2107     /*
    2108      * Copy and validate input.
    2109      */
    2110     VBOXCMDVBVA_PAGING_FILL FillSafe;
    2111     RT_COPY_VOLATILE(FillSafe, *pFill);
    2112     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2113 
    2114     VBOXCMDVBVAOFFSET offVRAM = FillSafe.offVRAM;
    2115     ASSERT_GUEST_MSG_RETURN(!(offVRAM & X86_PAGE_OFFSET_MASK), ("offVRAM=%#x\n", offVRAM), -1);
    2116     ASSERT_GUEST_MSG_RETURN(offVRAM <= pVGAState->vram_size, ("offVRAM=%#x\n", offVRAM), -1);
    2117 
    2118     uint32_t cbFill = FillSafe.u32CbFill;
    2119     ASSERT_GUEST_STMT(!(cbFill & 3), cbFill &= ~(uint32_t)3);
    2120     ASSERT_GUEST_MSG_RETURN(   cbFill < pVGAState->vram_size
    2121                             && offVRAM <= pVGAState->vram_size - cbFill,
    2122                             ("offVRAM=%#x cbFill=%#x\n", offVRAM, cbFill), -1);
    2123 
    2124     RT_UNTRUSTED_VALIDATED_FENCE();
    2125 
    2126     /*
    2127      * Execute.
    2128      */
    2129     uint32_t      *pu32Vram = (uint32_t *)((uint8_t *)pVGAState->vram_ptrR3 + offVRAM);
    2130     uint32_t const u32Color = FillSafe.u32Pattern;
    2131 
    2132     uint32_t cLoops = cbFill / 4;
    2133     while (cLoops-- > 0)
    2134         pu32Vram[cLoops] = u32Color;
    2135 
    2136     return 0;
    2137 }
    2138 
    2139 /**
    2140  * Process command data.
    2141  *
    2142  * @returns zero or positive is success, negative failure.
    2143  * @param   pVdma               The VDMA channel.
    2144  * @param   pCmd                The command data to process. Assume volatile.
    2145  * @param   cbCmd               The amount of command data.
    2146  *
    2147  * @thread VDMA
    2148  */
    2149 static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma,
    2150                                               const VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd, uint32_t cbCmd)
    2151 {
    2152     uint8_t bOpCode = pCmd->u8OpCode;
    2153     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2154     switch (bOpCode)
    2155     {
    2156         case VBOXCMDVBVA_OPTYPE_NOPCMD:
    2157             return 0;
    2158 
    2159         case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
    2160             return vboxVDMACrCmdVbvaPageTransfer(pVdma->pVGAState, pCmd, cbCmd,
    2161                                                  &((VBOXCMDVBVA_PAGING_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *)pCmd)->Data);
    2162 
    2163         case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
    2164             ASSERT_GUEST_RETURN(cbCmd == sizeof(VBOXCMDVBVA_PAGING_FILL), -1);
    2165             return vboxVDMACrCmdVbvaPagingFill(pVdma->pVGAState, (VBOXCMDVBVA_PAGING_FILL RT_UNTRUSTED_VOLATILE_GUEST *)pCmd);
    2166 
    2167         default:
    2168             ASSERT_GUEST_RETURN(pVdma->CrSrvInfo.pfnCmd != NULL, -1);
    2169             return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
    2170     }
    2171 }
    2172 
    2173 # if 0
    2174 typedef struct VBOXCMDVBVA_PAGING_TRANSFER
    2175 {
    2176     VBOXCMDVBVA_HDR Hdr;
    2177     /* for now can only contain offVRAM.
    2178      * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
    2179     VBOXCMDVBVA_ALLOCINFO Alloc;
    2180     uint32_t u32Reserved;
    2181     VBOXCMDVBVA_SYSMEMEL aSysMem[1];
    2182 } VBOXCMDVBVA_PAGING_TRANSFER;
    2183 # endif
    2184 
    2185 AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
    2186 AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
    2187 AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
    2188 AssertCompile(!(X86_PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
    2189 
    2190 # define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (X86_PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
    2191 
    2192 /**
    2193  * Worker for vboxVDMACrCmdProcess.
    2194  *
    2195  * @returns 8-bit result.
    2196  * @param   pVdma       The VDMA channel.
    2197  * @param   pCmd        The command.  Consider volatile!
    2198  * @param   cbCmd       The size of what @a pCmd points to.  At least
    2199  *                      sizeof(VBOXCMDVBVA_HDR).
    2200  * @param   fRecursion  Set if recursive call, false if not.
    2201  *
    2202  * @thread VDMA
    2203  */
    2204 static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd,
    2205                                        uint32_t cbCmd, bool fRecursion)
    2206 {
    2207     int8_t        i8Result = 0;
    2208     uint8_t const bOpCode  = pCmd->u8OpCode;
    2209     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2210     LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: ENTER, bOpCode=%u\n", bOpCode));
    2211     switch (bOpCode)
    2212     {
    2213         case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
    2214         {
    2215             /*
    2216              * Extract the command physical address and size.
    2217              */
    2218             ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_SYSMEMCMD), ("%#x\n", cbCmd), -1);
    2219             RTGCPHYS GCPhysCmd  = ((VBOXCMDVBVA_SYSMEMCMD RT_UNTRUSTED_VOLATILE_GUEST *)pCmd)->phCmd;
    2220             RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2221             uint32_t cbCmdPart  = X86_PAGE_SIZE - (uint32_t)(GCPhysCmd & X86_PAGE_OFFSET_MASK);
    2222 
    2223             uint32_t cbRealCmd  = pCmd->u8Flags;
    2224             cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
    2225             ASSERT_GUEST_MSG_RETURN(cbRealCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbRealCmd), -1);
    2226             ASSERT_GUEST_MSG_RETURN(cbRealCmd <= _1M, ("%#x\n", cbRealCmd), -1);
    2227 
    2228             /*
    2229              * Lock down the first page of the memory specified by the command.
    2230              */
    2231             PGMPAGEMAPLOCK Lock;
    2232             PVGASTATE pVGAState = pVdma->pVGAState;
    2233             PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
    2234             VBOXCMDVBVA_HDR const *pRealCmdHdr = NULL;
    2235             int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysCmd, 0, (const void **)&pRealCmdHdr, &Lock);
    2236             ASSERT_GUEST_LOGREL_MSG_RC_RETURN(rc, ("VDMA: %RGp -> %Rrc\n", GCPhysCmd, rc), -1);
    2237             Assert((GCPhysCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pRealCmdHdr) & PAGE_OFFSET_MASK));
    2238 
    2239             /*
    2240              * All fits within one page?  We can handle that pretty efficiently.
    2241              */
    2242             if (cbRealCmd <= cbCmdPart)
    2243             {
    2244                 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
    2245                 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
    2246             }
    2247             else
    2248             {
    2249                 /*
    2250                  * To keep things damn simple, just double buffer cross page or
    2251                  * multipage requests.
    2252                  */
    2253                 uint8_t *pbCmdBuf = (uint8_t *)RTMemTmpAllocZ(RT_ALIGN_Z(cbRealCmd, 16));
    2254                 if (pbCmdBuf)
    2255                 {
    2256                     memcpy(pbCmdBuf, pRealCmdHdr, cbCmdPart);
    2257                     PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
    2258                     pRealCmdHdr = NULL;
    2259 
    2260                     rc = PDMDevHlpPhysRead(pDevIns, GCPhysCmd + cbCmdPart, &pbCmdBuf[cbCmdPart], cbRealCmd - cbCmdPart);
    2261                     if (RT_SUCCESS(rc))
    2262                         i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, (VBOXCMDVBVA_HDR const *)pbCmdBuf, cbRealCmd);
    2263                     else
    2264                         LogRelMax(200, ("VDMA: Error reading %#x bytes of guest memory %#RGp!\n", cbRealCmd, GCPhysCmd));
    2265                     RTMemTmpFree(pbCmdBuf);
    2266                 }
    2267                 else
    2268                 {
    2269                     PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
    2270                     LogRelMax(200, ("VDMA: Out of temporary memory! %#x\n", cbRealCmd));
    2271                     i8Result = -1;
    2272                 }
    2273             }
    2274             return i8Result;
    2275         }
    2276 
    2277         case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
    2278         {
    2279             Assert(cbCmd >= sizeof(VBOXCMDVBVA_HDR)); /* caller already checked this */
    2280             ASSERT_GUEST_RETURN(!fRecursion, -1);
    2281 
    2282             /* Skip current command. */
    2283             cbCmd -= sizeof(*pCmd);
    2284             pCmd++;
    2285 
    2286             /* Process subcommands. */
    2287             while (cbCmd > 0)
    2288             {
    2289                 ASSERT_GUEST_MSG_RETURN(cbCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbCmd), -1);
    2290 
    2291                 uint16_t cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
    2292                 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2293                 ASSERT_GUEST_MSG_RETURN(cbCurCmd <= cbCmd, ("cbCurCmd=%#x, cbCmd=%#x\n", cbCurCmd, cbCmd), -1);
    2294 
    2295                 i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd, true /*fRecursive*/);
    2296                 ASSERT_GUEST_MSG_RETURN(i8Result >= 0, ("vboxVDMACrCmdVbvaProcess -> %d\n", i8Result), i8Result);
    2297 
    2298                 /* Advance to the next command. */
    2299                 pCmd  = (VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *)((uintptr_t)pCmd + cbCurCmd);
    2300                 cbCmd -= cbCurCmd;
    2301             }
    2302             return 0;
    2303         }
    2304 
    2305         default:
    2306             i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
    2307             LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: LEAVE, opCode(%i)\n", pCmd->u8OpCode));
    2308             return i8Result;
    2309     }
    2310 }
    2311 
    2312 /**
    2313  * Worker for vboxVDMAWorkerThread handling VBVAEXHOST_DATA_TYPE_CMD.
    2314  *
    2315  * @thread VDMA
    2316  */
    2317 static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd, uint32_t cbCmd)
    2318 {
    2319     if (   cbCmd > 0
    2320         && *pbCmd == VBOXCMDVBVA_OPTYPE_NOP)
    2321     { /* nop */ }
    2322     else
    2323     {
    2324         ASSERT_GUEST_RETURN_VOID(cbCmd >= sizeof(VBOXCMDVBVA_HDR));
    2325         VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *pCmd = (VBOXCMDVBVA_HDR RT_UNTRUSTED_VOLATILE_GUEST *)pbCmd;
    2326 
    2327         /* check if the command is cancelled */
    2328         if (ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
    2329         {
    2330             /* Process it. */
    2331             pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd, false /*fRecursion*/);
    2332         }
    2333         else
    2334             Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
    2335     }
    2336 
    2337 }
    2338 
    2339 /**
    2340  * Worker for vboxVDMAConstruct().
    2341  */
    2342 static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
    2343 {
    2344     PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd;
    2345     pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof(*pCmd));
    2346     int rc;
    2347     if (pCmd)
    2348     {
    2349         PVGASTATE pVGAState = pVdma->pVGAState;
    2350         pCmd->pvVRamBase = pVGAState->vram_ptrR3;
    2351         pCmd->cbVRam = pVGAState->vram_size;
    2352         pCmd->pLed = &pVGAState->Led3D;
    2353         pCmd->CrClientInfo.hClient = pVdma;
    2354         pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
    2355         rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
    2356         if (RT_SUCCESS(rc))
    2357         {
    2358             rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
    2359             if (RT_SUCCESS(rc))
    2360                 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
    2361             else if (rc != VERR_NOT_SUPPORTED)
    2362                 WARN(("vboxVDMACrCtlGetRc returned %Rrc\n", rc));
    2363         }
    2364         else
    2365             WARN(("vboxVDMACrCtlPost failed %Rrc\n", rc));
    2366 
    2367         vboxVDMACrCtlRelease(&pCmd->Hdr);
    2368     }
    2369     else
    2370         rc = VERR_NO_MEMORY;
    2371 
    2372     if (!RT_SUCCESS(rc))
    2373         memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
    2374 
    2375     return rc;
    2376 }
    2377 
    2378 /**
    2379  * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync}
    2380  *
    2381  * @note Some indirect completion magic, you gotta love this code!
    2382  */
    2383 DECLCALLBACK(int) vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
    2384 {
    2385     PVGASTATE                                    pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
    2386     PHGSMIINSTANCE                               pIns      = pVGAState->pHGSMI;
    2387     VBOXVDMACMD RT_UNTRUSTED_VOLATILE_GUEST     *pDmaHdr   = VBOXVDMACMD_FROM_BODY(pCmd);
    2388     VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pDr       = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
    2389 
    2390     AssertRC(rc);
    2391     pDr->rc = rc;
    2392 
    2393     Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
    2394     rc = VBoxSHGSMICommandComplete(pIns, pDr);
    2395     AssertRC(rc);
    2396 
    2397     return rc;
    2398 }
    2399 
    2400 /**
    2401  * Worker for vboxVDMACmdExecBlt().
    2402  */
    2403 static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, const VBOXVIDEOOFFSET offDst, const VBOXVIDEOOFFSET offSrc,
    2404                                      const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
    2405                                      const VBOXVDMA_RECTL *pDstRectl, const VBOXVDMA_RECTL *pSrcRectl)
    2406 {
    2407     /*
    2408      * We do not support color conversion.
    2409      */
    2410     AssertReturn(pDstDesc->format == pSrcDesc->format, VERR_INVALID_FUNCTION);
    2411 
    2412     /* we do not support stretching (checked by caller) */
    2413     Assert(pDstRectl->height == pSrcRectl->height);
    2414     Assert(pDstRectl->width  == pSrcRectl->width);
    2415 
    2416     uint8_t *pbRam = pVdma->pVGAState->vram_ptrR3;
    2417     AssertCompileSize(pVdma->pVGAState->vram_size, sizeof(uint32_t));
    2418     uint32_t cbVRamSize = pVdma->pVGAState->vram_size;
    2419     uint8_t *pbDstSurf = pbRam + offDst;
    2420     uint8_t *pbSrcSurf = pbRam + offSrc;
    2421 
    2422     if (   pDstDesc->width == pDstRectl->width
    2423         && pSrcDesc->width == pSrcRectl->width
    2424         && pSrcDesc->width == pDstDesc->width
    2425         && pSrcDesc->pitch == pDstDesc->pitch)
    2426     {
    2427         Assert(!pDstRectl->left);
    2428         Assert(!pSrcRectl->left);
    2429         uint32_t offBoth  = pDstDesc->pitch * pDstRectl->top;
    2430         uint32_t cbToCopy = pDstDesc->pitch * pDstRectl->height;
    2431 
    2432         if (   cbToCopy <= cbVRamSize
    2433             && (uintptr_t)(pbDstSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy
    2434             && (uintptr_t)(pbSrcSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy)
    2435         {
    2436             RT_UNTRUSTED_VALIDATED_FENCE();
    2437             memcpy(pbDstSurf + offBoth, pbSrcSurf + offBoth, cbToCopy);
    2438         }
    2439         else
    2440             return VERR_INVALID_PARAMETER;
    2441     }
    2442     else
    2443     {
    2444         uint32_t offDstLineStart =   pDstRectl->left * pDstDesc->bpp >> 3;
    2445         uint32_t offDstLineEnd   = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
    2446         uint32_t cbDstLine       = offDstLineEnd - offDstLineStart;
    2447         uint32_t offDstStart     = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
    2448         Assert(cbDstLine <= pDstDesc->pitch);
    2449         uint32_t cbDstSkip       = pDstDesc->pitch;
    2450         uint8_t *pbDstStart      = pbDstSurf + offDstStart;
    2451 
    2452         uint32_t offSrcLineStart =   pSrcRectl->left * pSrcDesc->bpp >> 3;
    2453 # ifdef VBOX_STRICT
    2454         uint32_t offSrcLineEnd   = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
    2455         uint32_t cbSrcLine       = offSrcLineEnd - offSrcLineStart;
    2456 # endif
    2457         uint32_t offSrcStart     = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
    2458         Assert(cbSrcLine <= pSrcDesc->pitch);
    2459         uint32_t cbSrcSkip       = pSrcDesc->pitch;
    2460         const uint8_t *pbSrcStart = pbSrcSurf + offSrcStart;
    2461 
    2462         Assert(cbDstLine == cbSrcLine);
    2463 
    2464         for (uint32_t i = 0; ; ++i)
    2465         {
    2466             if (   cbDstLine <= cbVRamSize
    2467                 && (uintptr_t)pbDstStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine
    2468                 && (uintptr_t)pbSrcStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine)
    2469             {
    2470                 RT_UNTRUSTED_VALIDATED_FENCE(); /** @todo this could potentially be buzzkiller. */
    2471                 memcpy(pbDstStart, pbSrcStart, cbDstLine);
    2472             }
    2473             else
    2474                 return VERR_INVALID_PARAMETER;
    2475             if (i == pDstRectl->height)
    2476                 break;
    2477             pbDstStart += cbDstSkip;
    2478             pbSrcStart += cbSrcSkip;
    2479         }
    2480     }
    2481     return VINF_SUCCESS;
    2482 }
    2483 
    2484 #if 0 /* unused */
    2485 static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
    2486 {
    2487     if (!pRectl1->width)
    2488         *pRectl1 = *pRectl2;
    2489     else
    2490     {
    2491         int16_t x21 = pRectl1->left + pRectl1->width;
    2492         int16_t x22 = pRectl2->left + pRectl2->width;
    2493         if (pRectl1->left > pRectl2->left)
    2494         {
    2495             pRectl1->left = pRectl2->left;
    2496             pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
    2497         }
    2498         else if (x21 < x22)
    2499             pRectl1->width = x22 - pRectl1->left;
    2500 
    2501         x21 = pRectl1->top + pRectl1->height;
    2502         x22 = pRectl2->top + pRectl2->height;
    2503         if (pRectl1->top > pRectl2->top)
    2504         {
    2505             pRectl1->top = pRectl2->top;
    2506             pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
    2507         }
    2508         else if (x21 < x22)
    2509             pRectl1->height = x22 - pRectl1->top;
    2510     }
    2511 }
    2512 #endif /* unused */
    2513 
    2514 /**
    2515  * Handles VBOXVDMACMD_TYPE_DMA_PRESENT_BLT for vboxVDMACmdExec().
    2516  *
    2517  * @returns number of bytes (positive) of the full command on success,
    2518  *          otherwise a negative error status (VERR_XXX).
    2519  *
    2520  * @param   pVdma           The VDMA channel.
    2521  * @param   pBlt            Blit command buffer.  This is to be considered
    2522  *                          volatile!
    2523  * @param   cbBuffer        Number of bytes accessible at @a pBtl.
    2524  */
    2525 static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const VBOXVDMACMD_DMA_PRESENT_BLT RT_UNTRUSTED_VOLATILE_GUEST *pBlt,
    2526                               uint32_t cbBuffer)
    2527 {
    2528     /*
    2529      * Validate and make a local copy of the blt command up to the rectangle array.
    2530      */
    2531     AssertReturn(cbBuffer >= RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects), VERR_INVALID_PARAMETER);
    2532     VBOXVDMACMD_DMA_PRESENT_BLT BltSafe;
    2533     RT_BCOPY_VOLATILE(&BltSafe, (void const *)pBlt, RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects));
    2534     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2535 
    2536     AssertReturn(BltSafe.cDstSubRects < _8M, VERR_INVALID_PARAMETER);
    2537     uint32_t const cbBlt = RT_UOFFSETOF_DYN(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[BltSafe.cDstSubRects]);
    2538     AssertReturn(cbBuffer >= cbBlt, VERR_INVALID_PARAMETER);
    2539 
    2540     /*
    2541      * We do not support stretching.
    2542      */
    2543     AssertReturn(BltSafe.srcRectl.width  == BltSafe.dstRectl.width,  VERR_INVALID_FUNCTION);
    2544     AssertReturn(BltSafe.srcRectl.height == BltSafe.dstRectl.height, VERR_INVALID_FUNCTION);
    2545 
    2546     Assert(BltSafe.cDstSubRects);
    2547 
    2548     RT_UNTRUSTED_VALIDATED_FENCE();
    2549 
    2550     /*
    2551      * Do the work.
    2552      */
    2553     //VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0}; - pointless
    2554     if (BltSafe.cDstSubRects)
    2555     {
    2556         for (uint32_t i = 0; i < BltSafe.cDstSubRects; ++i)
    2557         {
    2558             VBOXVDMA_RECTL dstSubRectl;
    2559             dstSubRectl.left   = pBlt->aDstSubRects[i].left;
    2560             dstSubRectl.top    = pBlt->aDstSubRects[i].top;
    2561             dstSubRectl.width  = pBlt->aDstSubRects[i].width;
    2562             dstSubRectl.height = pBlt->aDstSubRects[i].height;
    2563             RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2564 
    2565             VBOXVDMA_RECTL srcSubRectl = dstSubRectl;
    2566 
    2567             dstSubRectl.left += BltSafe.dstRectl.left;
    2568             dstSubRectl.top  += BltSafe.dstRectl.top;
    2569 
    2570             srcSubRectl.left += BltSafe.srcRectl.left;
    2571             srcSubRectl.top  += BltSafe.srcRectl.top;
    2572 
    2573             int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
    2574                                                &dstSubRectl, &srcSubRectl);
    2575             AssertRCReturn(rc, rc);
    2576 
    2577             //vboxVDMARectlUnite(&updateRectl, &dstSubRectl); - pointless
    2578         }
    2579     }
    2580     else
    2581     {
    2582         int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
    2583                                            &BltSafe.dstRectl, &BltSafe.srcRectl);
    2584         AssertRCReturn(rc, rc);
    2585 
    2586         //vboxVDMARectlUnite(&updateRectl, &BltSafe.dstRectl); - pointless
    2587     }
    2588 
    2589     return cbBlt;
    2590 }
    2591 
    2592 
    2593 /**
    2594  * Handles VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER for vboxVDMACmdCheckCrCmd() and
    2595  * vboxVDMACmdExec().
    2596  *
    2597  * @returns number of bytes (positive) of the full command on success,
    2598  *          otherwise a negative error status (VERR_XXX).
    2599  *
    2600  * @param   pVdma           The VDMA channel.
    2601  * @param   pTransfer       Transfer command buffer.  This is to be considered
    2602  *                          volatile!
    2603  * @param   cbBuffer        Number of bytes accessible at @a pTransfer.
    2604  */
    2605 static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer,
    2606                                       uint32_t cbBuffer)
    2607 {
    2608     /*
    2609      * Make a copy of the command (it's volatile).
    2610      */
    2611     AssertReturn(cbBuffer >= sizeof(*pTransfer), VERR_INVALID_PARAMETER);
    2612     VBOXVDMACMD_DMA_BPB_TRANSFER TransferSafeCopy;
    2613     RT_COPY_VOLATILE(TransferSafeCopy, *pTransfer);
    2614     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2615 
    2616     PVGASTATE   pVGAState    = pVdma->pVGAState;
    2617     PPDMDEVINS  pDevIns      = pVGAState->pDevInsR3;
    2618     uint8_t    *pbRam        = pVGAState->vram_ptrR3;
    2619     uint32_t    cbTransfer   = TransferSafeCopy.cbTransferSize;
    2620 
    2621     /*
    2622      * Validate VRAM offset.
    2623      */
    2624     if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
    2625         AssertReturn(   cbTransfer <= pVGAState->vram_size
    2626                      && TransferSafeCopy.Src.offVramBuf <= pVGAState->vram_size - cbTransfer,
    2627                      VERR_INVALID_PARAMETER);
    2628 
    2629     if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
    2630         AssertReturn(   cbTransfer <= pVGAState->vram_size
    2631                      && TransferSafeCopy.Dst.offVramBuf <= pVGAState->vram_size - cbTransfer,
    2632                      VERR_INVALID_PARAMETER);
    2633     RT_UNTRUSTED_VALIDATED_FENCE();
    2634 
    2635     /*
    2636      * Transfer loop.
    2637      */
    2638     uint32_t    cbTransfered = 0;
    2639     int         rc           = VINF_SUCCESS;
    2640     do
    2641     {
    2642         uint32_t cbSubTransfer = cbTransfer;
    2643 
    2644         const void     *pvSrc;
    2645         bool            fSrcLocked = false;
    2646         PGMPAGEMAPLOCK  SrcLock;
    2647         if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
    2648             pvSrc = pbRam + TransferSafeCopy.Src.offVramBuf + cbTransfered;
    2649         else
    2650         {
    2651             RTGCPHYS GCPhysSrcPage = TransferSafeCopy.Src.phBuf + cbTransfered;
    2652             rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysSrcPage, 0, &pvSrc, &SrcLock);
    2653             AssertRC(rc);
    2654             if (RT_SUCCESS(rc))
    2655             {
    2656                 fSrcLocked = true;
    2657                 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysSrcPage & X86_PAGE_OFFSET_MASK));
    2658             }
    2659             else
    2660                 break;
    2661         }
    2662 
    2663         void           *pvDst;
    2664         PGMPAGEMAPLOCK  DstLock;
    2665         bool            fDstLocked = false;
    2666         if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
    2667             pvDst = pbRam + TransferSafeCopy.Dst.offVramBuf + cbTransfered;
    2668         else
    2669         {
    2670             RTGCPHYS GCPhysDstPage = TransferSafeCopy.Dst.phBuf + cbTransfered;
    2671             rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysDstPage, 0, &pvDst, &DstLock);
    2672             AssertRC(rc);
    2673             if (RT_SUCCESS(rc))
    2674             {
    2675                 fDstLocked = true;
    2676                 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysDstPage & X86_PAGE_OFFSET_MASK));
    2677             }
    2678         }
    2679 
    2680         if (RT_SUCCESS(rc))
    2681         {
    2682             memcpy(pvDst, pvSrc, cbSubTransfer);
    2683             cbTransfered += cbSubTransfer;
    2684             cbTransfer   -= cbSubTransfer;
    2685         }
    2686         else
    2687             cbTransfer = 0; /* force break below */
    2688 
    2689         if (fSrcLocked)
    2690             PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
    2691         if (fDstLocked)
    2692             PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
    2693     } while (cbTransfer);
    2694 
    2695     if (RT_SUCCESS(rc))
    2696         return sizeof(TransferSafeCopy);
    2697     return rc;
    2698 }
    2699 
    2700 /**
    2701  * Worker for vboxVDMACommandProcess().
    2702  *
    2703  * @param   pVdma       Tthe VDMA channel.
    2704  * @param   pbBuffer    Command buffer, considered volatile.
    2705  * @param   cbBuffer    The number of bytes at @a pbBuffer.
    2706  * @param   pCmdDr      The command.  For setting the async flag on chromium
    2707  *                      requests.
    2708  * @param   pfAsyncCmd  Flag to set if async command completion on chromium
    2709  *                      requests.  Input stat is false, so it only ever need to
    2710  *                      be set to true.
    2711  */
    2712 static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, uint8_t const RT_UNTRUSTED_VOLATILE_GUEST *pbBuffer, uint32_t cbBuffer,
    2713                            VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmdDr, bool *pfAsyncCmd)
    2714 {
    2715     AssertReturn(pbBuffer, VERR_INVALID_POINTER);
    2716 
    2717     for (;;)
    2718     {
    2719         AssertReturn(cbBuffer >= VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER);
    2720 
    2721         VBOXVDMACMD const RT_UNTRUSTED_VOLATILE_GUEST  *pCmd       = (VBOXVDMACMD const RT_UNTRUSTED_VOLATILE_GUEST *)pbBuffer;
    2722         VBOXVDMACMD_TYPE                                enmCmdType = pCmd->enmType;
    2723         RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2724 
    2725         ASSERT_GUEST_MSG_RETURN(   enmCmdType == VBOXVDMACMD_TYPE_CHROMIUM_CMD
    2726                                 || enmCmdType == VBOXVDMACMD_TYPE_DMA_PRESENT_BLT
    2727                                 || enmCmdType == VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER
    2728                                 || enmCmdType == VBOXVDMACMD_TYPE_DMA_NOP
    2729                                 || enmCmdType == VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ,
    2730                                 ("enmCmdType=%d\n", enmCmdType),
    2731                                 VERR_INVALID_FUNCTION);
    2732         RT_UNTRUSTED_VALIDATED_FENCE();
    2733 
    2734         int cbProcessed;
    2735         switch (enmCmdType)
    2736         {
    2737             case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
    2738             {
    2739                 VBOXVDMACMD_CHROMIUM_CMD RT_UNTRUSTED_VOLATILE_GUEST *pCrCmd = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_CHROMIUM_CMD);
    2740                 uint32_t const cbBody = VBOXVDMACMD_BODY_SIZE(cbBuffer);
    2741                 AssertReturn(cbBody >= sizeof(*pCrCmd), VERR_INVALID_PARAMETER);
    2742 
    2743                 PVGASTATE pVGAState = pVdma->pVGAState;
    2744                 AssertReturn(pVGAState->pDrv->pfnCrHgsmiCommandProcess, VERR_NOT_SUPPORTED);
    2745 
    2746                 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
    2747                 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
    2748                 *pfAsyncCmd = true;
    2749                 return VINF_SUCCESS;
    2750             }
    2751 
    2752             case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
    2753             {
    2754                 VBOXVDMACMD_DMA_PRESENT_BLT RT_UNTRUSTED_VOLATILE_GUEST *pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
    2755                 cbProcessed = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
    2756                 Assert(cbProcessed >= 0);
    2757                 break;
    2758             }
    2759 
    2760             case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
    2761             {
    2762                 VBOXVDMACMD_DMA_BPB_TRANSFER RT_UNTRUSTED_VOLATILE_GUEST *pTransfer
    2763                     = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
    2764                 cbProcessed = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
    2765                 Assert(cbProcessed >= 0);
    2766                 break;
    2767             }
    2768 
    2769             case VBOXVDMACMD_TYPE_DMA_NOP:
    2770                 return VINF_SUCCESS;
    2771 
    2772             case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
    2773                 return VINF_SUCCESS;
    2774 
    2775             default:
    2776                 AssertFailedReturn(VERR_INVALID_FUNCTION);
    2777         }
    2778 
    2779         /* Advance buffer or return. */
    2780         if (cbProcessed >= 0)
    2781         {
    2782             Assert(cbProcessed > 0);
    2783             cbProcessed += VBOXVDMACMD_HEADER_SIZE();
    2784             if ((uint32_t)cbProcessed >= cbBuffer)
    2785             {
    2786                 Assert((uint32_t)cbProcessed == cbBuffer);
    2787                 return VINF_SUCCESS;
    2788             }
    2789 
    2790             cbBuffer -= cbProcessed;
    2791             pbBuffer += cbProcessed;
    2792         }
    2793         else
    2794         {
    2795             RT_UNTRUSTED_VALIDATED_FENCE();
    2796             return cbProcessed; /* error status */
    2797         }
    2798     }
    2799 }
    2800 
    2801 /**
    2802  * VDMA worker thread procedure, see vdmaVBVACtlEnableSubmitInternal().
    2803  *
    2804  * @thread VDMA
    2805  */
    2806 static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
    2807 {
    2808     RT_NOREF(hThreadSelf);
    2809     PVBOXVDMAHOST       pVdma     = (PVBOXVDMAHOST)pvUser;
    2810     PVGASTATE           pVGAState = pVdma->pVGAState;
    2811     VBVAEXHOSTCONTEXT  *pCmdVbva  = &pVdma->CmdVbva;
    2812     int rc;
    2813 
    2814     VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
    2815 
    2816     while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
    2817     {
    2818         uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd = NULL;
    2819         uint32_t                             cbCmd = 0;
    2820         VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pbCmd, &cbCmd);
    2821         switch (enmType)
    2822         {
    2823             case VBVAEXHOST_DATA_TYPE_CMD:
    2824                 vboxVDMACrCmdProcess(pVdma, pbCmd, cbCmd);
    2825                 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
    2826                 VBVARaiseIrq(pVGAState, 0);
    2827                 break;
    2828 
    2829             case VBVAEXHOST_DATA_TYPE_GUESTCTL:
    2830                 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd);
    2831                 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
    2832                 break;
    2833 
    2834             case VBVAEXHOST_DATA_TYPE_HOSTCTL:
    2835             {
    2836                 bool fContinue = true;
    2837                 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd, &fContinue);
    2838                 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
    2839                 if (fContinue)
    2840                     break;
    2841             }
    2842             RT_FALL_THRU();
    2843 
    2844             case VBVAEXHOST_DATA_TYPE_NO_DATA:
    2845                 rc = RTSemEventWaitNoResume(pVdma->Thread.hEvent, RT_INDEFINITE_WAIT);
    2846                 AssertMsg(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc));
    2847                 break;
    2848 
    2849             default:
    2850                 WARN(("unexpected type %d\n", enmType));
    2851                 break;
    2852         }
    2853     }
    2854 
    2855     VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
    2856 
    2857     return VINF_SUCCESS;
    2858 }
    2859 
    2860 /**
    2861  * Worker for vboxVDMACommand.
    2862  *
    2863  * @returns VBox status code of the operation.
    2864  * @param   pVdma       VDMA instance data.
    2865  * @param   pCmd        The command to process.  Consider content volatile.
    2866  * @param   cbCmd       Number of valid bytes at @a pCmd.  This is at least
    2867  *                      sizeof(VBOXVDMACBUF_DR).
    2868  * @param   pfAsyncCmd  Flag to set if async command completion on chromium
    2869  *                      requests.  Input stat is false, so it only ever need to
    2870  *                      be set to true.
    2871  * @thread  EMT
    2872  */
    2873 static int vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, VBOXVDMACBUF_DR RT_UNTRUSTED_VOLATILE_GUEST *pCmd,
    2874                                   uint32_t cbCmd, bool *pfAsyncCmd)
    2875 {
    2876     /*
    2877      * Get the command buffer (volatile).
    2878      */
    2879     uint16_t const cbCmdBuf                = pCmd->cbBuf;
    2880     uint16_t const fCmdFlags               = pCmd->fFlags;
    2881     uint64_t const offVramBuf_or_GCPhysBuf = pCmd->Location.offVramBuf;
    2882     AssertCompile(sizeof(pCmd->Location.offVramBuf) == sizeof(pCmd->Location.phBuf));
    2883     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    2884 
    2885     const uint8_t RT_UNTRUSTED_VOLATILE_GUEST  *pbCmdBuf;
    2886     PGMPAGEMAPLOCK                              Lock;
    2887     bool                                        fReleaseLocked = false;
    2888     if (fCmdFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
    2889     {
    2890         pbCmdBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
    2891         AssertReturn((uintptr_t)&pbCmdBuf[cbCmdBuf] <= (uintptr_t)&((uint8_t *)pCmd)[cbCmd],
    2892                      VERR_INVALID_PARAMETER);
    2893         RT_UNTRUSTED_VALIDATED_FENCE();
    2894     }
    2895     else if (fCmdFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
    2896     {
    2897         AssertReturn(   offVramBuf_or_GCPhysBuf <= pVdma->pVGAState->vram_size
    2898                      && offVramBuf_or_GCPhysBuf + cbCmdBuf <= pVdma->pVGAState->vram_size,
    2899                      VERR_INVALID_PARAMETER);
    2900         RT_UNTRUSTED_VALIDATED_FENCE();
    2901 
    2902         pbCmdBuf = (uint8_t const RT_UNTRUSTED_VOLATILE_GUEST *)pVdma->pVGAState->vram_ptrR3 + offVramBuf_or_GCPhysBuf;
    2903     }
    2904     else
    2905     {
    2906         /* Make sure it doesn't cross a page. */
    2907         AssertReturn((uint32_t)(offVramBuf_or_GCPhysBuf & X86_PAGE_OFFSET_MASK) + cbCmdBuf <= (uint32_t)X86_PAGE_SIZE,
    2908                      VERR_INVALID_PARAMETER);
    2909         RT_UNTRUSTED_VALIDATED_FENCE();
    2910 
    2911         int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pVdma->pVGAState->pDevInsR3, offVramBuf_or_GCPhysBuf, 0 /*fFlags*/,
    2912                                                    (const void **)&pbCmdBuf, &Lock);
    2913         AssertRCReturn(rc, rc); /* if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
    2914         fReleaseLocked = true;
    2915     }
    2916 
    2917     /*
    2918      * Process the command.
    2919      */
    2920     int rc = vboxVDMACmdExec(pVdma, pbCmdBuf, cbCmdBuf, pCmd, pfAsyncCmd);
    2921     AssertRC(rc);
    2922 
    2923     /* Clean up comand buffer. */
    2924     if (fReleaseLocked)
    2925         PDMDevHlpPhysReleasePageMappingLock(pVdma->pVGAState->pDevInsR3, &Lock);
    2926     return rc;
    2927 }
    2928 
    2929 # if 0 /** @todo vboxVDMAControlProcess is unused */
    2930 static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
    2931 {
    2932     PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
    2933     pCmd->i32Result = VINF_SUCCESS;
    2934     int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
    2935     AssertRC(rc);
    2936 }
    2937 # endif
     191
    2938192
    2939193/**
     
    2956210        if (RT_SUCCESS(rc))
    2957211        {
    2958             VBoxVDMAThreadInit(&pVdma->Thread);
    2959 
    2960             rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
    2961             if (RT_SUCCESS(rc))
    2962             {
    2963                 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
    2964                 if (RT_SUCCESS(rc))
    2965                 {
    2966                     rc = RTCritSectInit(&pVdma->CalloutCritSect);
    2967                     if (RT_SUCCESS(rc))
    2968                     {
    2969212                        pVGAState->pVdma = pVdma;
    2970213
    2971 #ifdef VBOX_WITH_VMSVGA
    2972                         /* No HGCM service if VMSVGA is enabled. */
    2973                         if (!pVGAState->fVMSVGAEnabled)
    2974                         {
    2975                             int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
    2976                         }
    2977 #endif
    2978214                        return VINF_SUCCESS;
    2979                     }
    2980 
    2981                     WARN(("RTCritSectInit failed %Rrc\n", rc));
    2982                     VBoxVBVAExHSTerm(&pVdma->CmdVbva);
    2983                 }
    2984                 else
    2985                     WARN(("VBoxVBVAExHSInit failed %Rrc\n", rc));
    2986                 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
    2987             }
    2988             else
    2989                 WARN(("RTSemEventMultiCreate failed %Rrc\n", rc));
    2990 
    2991215            /* the timer is cleaned up automatically */
    2992216        }
     
    3003227void  vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
    3004228{
    3005     vdmaVBVACtlDisableSync(pVdma);
     229    RT_NOREF(pVdma);
    3006230}
    3007231
     
    3013237    if (!pVdma)
    3014238        return;
    3015 
    3016 #ifdef VBOX_WITH_VMSVGA
    3017     if (pVdma->pVGAState->fVMSVGAEnabled)
    3018         VBoxVBVAExHSDisable(&pVdma->CmdVbva);
    3019     else
    3020 #endif
    3021     {
    3022         /** @todo Remove. It does nothing because pVdma->CmdVbva is already disabled at this point
    3023          *        as the result of the SharedOpenGL HGCM service unloading.
    3024          */
    3025         vdmaVBVACtlDisableSync(pVdma);
    3026     }
    3027     VBoxVDMAThreadCleanup(&pVdma->Thread);
    3028     VBoxVBVAExHSTerm(&pVdma->CmdVbva);
    3029     RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
    3030     RTCritSectDelete(&pVdma->CalloutCritSect);
    3031239    RTMemFree(pVdma);
    3032240}
     
    3096304     */
    3097305    bool fAsyncCmd = false;
    3098     int rc = vboxVDMACommandProcess(pVdma, pCmd, cbCmd, &fAsyncCmd);
     306    RT_NOREF(cbCmd);
     307    int rc = VERR_NOT_IMPLEMENTED;
    3099308
    3100309    /*
     
    3110319
    3111320
    3112 /**
    3113  * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
    3114  *      Used by vdmaVBVACtlEnableDisableSubmit() and vdmaVBVACtlEnableDisableSubmit() }
    3115  */
    3116 static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
    3117                                                            int rc, void *pvContext)
    3118 {
    3119     PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
    3120     VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pGCtl
    3121         = (VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *)((uintptr_t)pCtl->u.cmd.pvCmd - sizeof(VBOXCMDVBVA_CTL));
    3122     AssertRC(rc);
    3123     pGCtl->i32Result = rc;
    3124 
    3125     Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
    3126     rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
    3127     AssertRC(rc);
    3128 
    3129     VBoxVBVAExHCtlFree(pVbva, pCtl);
    3130 }
    3131 
    3132 /**
    3133  * Worker for vdmaVBVACtlGenericGuestSubmit() and vdmaVBVACtlOpaqueHostSubmit().
    3134  */
    3135 static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType,
    3136                                     uint8_t RT_UNTRUSTED_VOLATILE_GUEST *pbCmd, uint32_t cbCmd,
    3137                                     PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
    3138 {
    3139     int            rc;
    3140     VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
    3141     if (pHCtl)
    3142     {
    3143         pHCtl->u.cmd.pvCmd = pbCmd;
    3144         pHCtl->u.cmd.cbCmd = cbCmd;
    3145         rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
    3146         if (RT_SUCCESS(rc))
    3147             return VINF_SUCCESS;
    3148 
    3149         VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
    3150         Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
    3151     }
    3152     else
    3153     {
    3154         WARN(("VBoxVBVAExHCtlCreate failed\n"));
    3155         rc = VERR_NO_MEMORY;
    3156     }
    3157     return rc;
    3158 }
    3159 
    3160 /**
    3161  * Handler for vboxCmdVBVACmdCtl()/VBOXCMDVBVACTL_TYPE_3DCTL.
    3162  */
    3163 static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType,
    3164                                          VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl)
    3165 {
    3166     Assert(cbCtl >= sizeof(VBOXCMDVBVA_CTL)); /* Checked by callers caller, vbvaChannelHandler(). */
    3167 
    3168     VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
    3169     int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType,
    3170                                       (uint8_t RT_UNTRUSTED_VOLATILE_GUEST *)(pCtl + 1),
    3171                                       cbCtl - sizeof(VBOXCMDVBVA_CTL),
    3172                                       vboxCmdVBVACmdCtlGuestCompletion, pVdma);
    3173     if (RT_SUCCESS(rc))
    3174         return VINF_SUCCESS;
    3175 
    3176     WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc));
    3177     pCtl->i32Result = rc;
    3178     rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
    3179     AssertRC(rc);
    3180     return VINF_SUCCESS;
    3181 }
    3182 
    3183 /**
    3184  * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE, Used by vdmaVBVACtlOpaqueHostSubmit()}
    3185  */
    3186 static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
    3187                                                           int rc, void *pvCompletion)
    3188 {
    3189     VBOXCRCMDCTL *pVboxCtl = (VBOXCRCMDCTL *)pCtl->u.cmd.pvCmd;
    3190     if (pVboxCtl->u.pfnInternal)
    3191         ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
    3192     VBoxVBVAExHCtlFree(pVbva, pCtl);
    3193 }
    3194 
    3195 /**
    3196  * Worker for vboxCmdVBVACmdHostCtl() and vboxCmdVBVACmdHostCtlSync().
    3197  */
    3198 static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
    3199                                        PFNCRCTLCOMPLETION pfnCompletion, void *pvCompletion)
    3200 {
    3201     pCmd->u.pfnInternal = (PFNRT)pfnCompletion;
    3202     int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
    3203                                       (uint8_t *)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
    3204     if (RT_FAILURE(rc))
    3205     {
    3206         if (rc == VERR_INVALID_STATE)
    3207         {
    3208             pCmd->u.pfnInternal = NULL;
    3209             PVGASTATE pVGAState = pVdma->pVGAState;
    3210             rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
    3211             if (!RT_SUCCESS(rc))
    3212                 WARN(("pfnCrHgsmiControlProcess failed %Rrc\n", rc));
    3213 
    3214             return rc;
    3215         }
    3216         WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc));
    3217         return rc;
    3218     }
    3219 
    3220     return VINF_SUCCESS;
    3221 }
    3222 
    3223 /**
    3224  * Called from vdmaVBVACtlThreadCreatedEnable().
    3225  */
    3226 static int vdmaVBVANotifyEnable(PVGASTATE pVGAState)
    3227 {
    3228     for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
    3229     {
    3230         int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
    3231         if (!RT_SUCCESS(rc))
    3232         {
    3233             WARN(("pfnVBVAEnable failed %Rrc\n", rc));
    3234             for (uint32_t j = 0; j < i; j++)
    3235             {
    3236                 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
    3237             }
    3238 
    3239             return rc;
    3240         }
    3241     }
    3242     return VINF_SUCCESS;
    3243 }
    3244 
    3245 /**
    3246  * Called from vdmaVBVACtlThreadCreatedEnable() and vdmaVBVADisableProcess().
    3247  */
    3248 static int vdmaVBVANotifyDisable(PVGASTATE pVGAState)
    3249 {
    3250     for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
    3251         pVGAState->pDrv->pfnVBVADisable(pVGAState->pDrv, i);
    3252     return VINF_SUCCESS;
    3253 }
    3254 
    3255 /**
    3256  * Hook that is called by vboxVDMAWorkerThread when it starts.
    3257  *
    3258  * @thread VDMA
    3259  */
    3260 static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
    3261                                                          void *pvThreadContext, void *pvContext)
    3262 {
    3263     RT_NOREF(pThread);
    3264     PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
    3265     VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
    3266 
    3267     if (RT_SUCCESS(rc))
    3268     {
    3269         rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
    3270         /* rc == VINF_SUCCESS would mean the actual state change has occcured */
    3271         if (rc == VINF_SUCCESS)
    3272         {
    3273             /* we need to inform Main about VBVA enable/disable
    3274              * main expects notifications to be done from the main thread
    3275              * submit it there */
    3276             PVGASTATE pVGAState = pVdma->pVGAState;
    3277 
    3278             if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
    3279                 vdmaVBVANotifyEnable(pVGAState);
    3280             else
    3281                 vdmaVBVANotifyDisable(pVGAState);
    3282         }
    3283         else if (RT_FAILURE(rc))
    3284             WARN(("vboxVDMACrGuestCtlProcess failed %Rrc\n", rc));
    3285     }
    3286     else
    3287         WARN(("vdmaVBVACtlThreadCreatedEnable is passed %Rrc\n", rc));
    3288 
    3289     VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
    3290 }
    3291 
    3292 /**
    3293  * Worker for vdmaVBVACtlEnableDisableSubmitInternal() and vdmaVBVACtlEnableSubmitSync().
    3294  */
    3295 static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable, bool fPaused,
    3296                                            PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
    3297 {
    3298     int rc;
    3299     VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva,
    3300                                                 fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
    3301     if (pHCtl)
    3302     {
    3303         pHCtl->u.cmd.pvCmd  = pEnable;
    3304         pHCtl->u.cmd.cbCmd  = sizeof(*pEnable);
    3305         pHCtl->pfnComplete  = pfnComplete;
    3306         pHCtl->pvComplete   = pvComplete;
    3307 
    3308         rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
    3309         if (RT_SUCCESS(rc))
    3310             return VINF_SUCCESS;
    3311 
    3312         WARN(("VBoxVDMAThreadCreate failed %Rrc\n", rc));
    3313         VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
    3314     }
    3315     else
    3316     {
    3317         WARN(("VBoxVBVAExHCtlCreate failed\n"));
    3318         rc = VERR_NO_MEMORY;
    3319     }
    3320 
    3321     return rc;
    3322 }
    3323 
    3324 /**
    3325  * Worker for vboxVDMASaveLoadExecPerform().
    3326  */
    3327 static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
    3328 {
    3329     VBVAENABLE Enable = {0};
    3330     Enable.u32Flags = VBVA_F_ENABLE;
    3331     Enable.u32Offset = offVram;
    3332 
    3333     VDMA_VBVA_CTL_CYNC_COMPLETION Data;
    3334     Data.rc = VERR_NOT_IMPLEMENTED;
    3335     int rc = RTSemEventCreate(&Data.hEvent);
    3336     if (!RT_SUCCESS(rc))
    3337     {
    3338         WARN(("RTSemEventCreate failed %Rrc\n", rc));
    3339         return rc;
    3340     }
    3341 
    3342     rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
    3343     if (RT_SUCCESS(rc))
    3344     {
    3345         rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
    3346         if (RT_SUCCESS(rc))
    3347         {
    3348             rc = Data.rc;
    3349             if (!RT_SUCCESS(rc))
    3350                 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc));
    3351         }
    3352         else
    3353             WARN(("RTSemEventWait failed %Rrc\n", rc));
    3354     }
    3355     else
    3356         WARN(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
    3357 
    3358     RTSemEventDestroy(Data.hEvent);
    3359 
    3360     return rc;
    3361 }
    3362 
    3363 /**
    3364  * Worker for vdmaVBVACtlEnableDisableSubmitInternal().
    3365  */
    3366 static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable,
    3367                                             PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
    3368 {
    3369     int rc;
    3370     if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
    3371     {
    3372         WARN(("VBoxVBVAExHSIsDisabled: disabled"));
    3373         return VINF_SUCCESS;
    3374     }
    3375 
    3376     VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
    3377     if (!pHCtl)
    3378     {
    3379         WARN(("VBoxVBVAExHCtlCreate failed\n"));
    3380         return VERR_NO_MEMORY;
    3381     }
    3382 
    3383     pHCtl->u.cmd.pvCmd = pEnable;
    3384     pHCtl->u.cmd.cbCmd = sizeof(*pEnable);
    3385     rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
    3386     if (RT_SUCCESS(rc))
    3387         return VINF_SUCCESS;
    3388 
    3389     WARN(("vdmaVBVACtlSubmit failed rc %Rrc\n", rc));
    3390     VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
    3391     return rc;
    3392 }
    3393 
    3394 /**
    3395  * Worker for vdmaVBVACtlEnableDisableSubmit().
    3396  */
    3397 static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable,
    3398                                                   PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
    3399 {
    3400     bool fEnable = (pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE;
    3401     if (fEnable)
    3402         return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
    3403     return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
    3404 }
    3405 
    3406 /**
    3407  * Handler for vboxCmdVBVACmdCtl/VBOXCMDVBVACTL_TYPE_ENABLE.
    3408  */
    3409 static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_GUEST *pEnable)
    3410 {
    3411     VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
    3412     int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
    3413     if (RT_SUCCESS(rc))
    3414         return VINF_SUCCESS;
    3415 
    3416     WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %Rrc\n", rc));
    3417     pEnable->Hdr.i32Result = rc;
    3418     rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
    3419     AssertRC(rc);
    3420     return VINF_SUCCESS;
    3421 }
    3422 
    3423 /**
    3424  * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
    3425  *      Used by vdmaVBVACtlSubmitSync() and vdmaVBVACtlEnableSubmitSync().}
    3426  */
    3427 static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
    3428                                                           int rc, void *pvContext)
    3429 {
    3430     RT_NOREF(pVbva, pCtl);
    3431     VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION *)pvContext;
    3432     pData->rc = rc;
    3433     rc = RTSemEventSignal(pData->hEvent);
    3434     if (!RT_SUCCESS(rc))
    3435         WARN(("RTSemEventSignal failed %Rrc\n", rc));
    3436 }
    3437 
    3438 
    3439 /**
    3440  *
    3441  */
    3442 static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
    3443 {
    3444     VDMA_VBVA_CTL_CYNC_COMPLETION Data;
    3445     Data.rc     = VERR_NOT_IMPLEMENTED;
    3446     Data.hEvent = NIL_RTSEMEVENT;
    3447     int rc = RTSemEventCreate(&Data.hEvent);
    3448     if (RT_SUCCESS(rc))
    3449     {
    3450         rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
    3451         if (RT_SUCCESS(rc))
    3452         {
    3453             rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
    3454             if (RT_SUCCESS(rc))
    3455             {
    3456                 rc = Data.rc;
    3457                 if (!RT_SUCCESS(rc))
    3458                     WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc));
    3459             }
    3460             else
    3461                 WARN(("RTSemEventWait failed %Rrc\n", rc));
    3462         }
    3463         else
    3464             Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
    3465 
    3466         RTSemEventDestroy(Data.hEvent);
    3467     }
    3468     else
    3469         WARN(("RTSemEventCreate failed %Rrc\n", rc));
    3470     return rc;
    3471 }
    3472 
    3473 /**
    3474  * Worker for vboxVDMASaveStateExecPrep().
    3475  */
    3476 static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
    3477 {
    3478     VBVAEXHOSTCTL Ctl;
    3479     Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
    3480     return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
    3481 }
    3482 
    3483 /**
    3484  * Worker for vboxVDMASaveLoadExecPerform() and vboxVDMASaveStateExecDone().
    3485  */
    3486 static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
    3487 {
    3488     VBVAEXHOSTCTL Ctl;
    3489     Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
    3490     return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
    3491 }
    3492 
    3493 /**
    3494  * Worker for vboxCmdVBVACmdSubmit(), vboxCmdVBVACmdFlush() and vboxCmdVBVATimerRefresh().
    3495  */
    3496 static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
    3497 {
    3498     int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
    3499     switch (rc)
    3500     {
    3501         case VINF_SUCCESS:
    3502             return VBoxVDMAThreadEventNotify(&pVdma->Thread);
    3503         case VINF_ALREADY_INITIALIZED:
    3504         case VINF_EOF:
    3505         case VERR_INVALID_STATE:
    3506             return VINF_SUCCESS;
    3507         default:
    3508             Assert(!RT_FAILURE(rc));
    3509             return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
    3510     }
    3511 }
    3512 
    3513 
    3514 /**
    3515  * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmit}
    3516  */
    3517 int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
    3518                           struct VBOXCRCMDCTL *pCmd,
    3519                           uint32_t cbCmd,
    3520                           PFNCRCTLCOMPLETION pfnCompletion,
    3521                           void *pvCompletion)
    3522 {
    3523     PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
    3524     struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
    3525     if (pVdma == NULL)
    3526         return VERR_INVALID_STATE;
    3527     pCmd->CalloutList.List.pNext = NULL;
    3528     return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
    3529 }
    3530 
    3531 /**
    3532  * Argument package from vboxCmdVBVACmdHostCtlSync to vboxCmdVBVACmdHostCtlSyncCb.
    3533  */
    3534 typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
    3535 {
    3536     struct VBOXVDMAHOST *pVdma;
    3537     uint32_t fProcessing;
    3538     int rc;
    3539 } VBOXCMDVBVA_CMDHOSTCTL_SYNC;
    3540 
    3541 /**
    3542  * @callback_method_impl{FNCRCTLCOMPLETION, Used by vboxCmdVBVACmdHostCtlSync.}
    3543  */
    3544 static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
    3545 {
    3546     RT_NOREF(pCmd, cbCmd);
    3547     VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC *)pvCompletion;
    3548 
    3549     pData->rc = rc;
    3550 
    3551     struct VBOXVDMAHOST *pVdma = pData->pVdma;
    3552 
    3553     ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
    3554 
    3555     pData->fProcessing = 0;
    3556 
    3557     RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
    3558 }
    3559 
    3560 /**
    3561  * Worker for vboxVDMACrCtlHgsmiSetup.
    3562  *
    3563  * @note r=bird: not to be confused with the callout function below. sigh.
    3564  */
    3565 static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd,
    3566                                                VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
    3567 {
    3568     pEntry->pfnCb = pfnCb;
    3569     int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
    3570     if (RT_SUCCESS(rc))
    3571     {
    3572         RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
    3573         RTCritSectLeave(&pVdma->CalloutCritSect);
    3574 
    3575         RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
    3576     }
    3577     else
    3578         WARN(("RTCritSectEnter failed %Rrc\n", rc));
    3579 
    3580     return rc;
    3581 }
    3582 
    3583 
    3584 /**
    3585  * Worker for vboxCmdVBVACmdHostCtlSync.
    3586  */
    3587 static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
    3588 {
    3589     int rc = VINF_SUCCESS;
    3590     for (;;)
    3591     {
    3592         rc = RTCritSectEnter(&pVdma->CalloutCritSect);
    3593         if (RT_SUCCESS(rc))
    3594         {
    3595             VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
    3596             if (pEntry)
    3597                 RTListNodeRemove(&pEntry->Node);
    3598             RTCritSectLeave(&pVdma->CalloutCritSect);
    3599 
    3600             if (!pEntry)
    3601                 break;
    3602 
    3603             pEntry->pfnCb(pEntry);
    3604         }
    3605         else
    3606         {
    3607             WARN(("RTCritSectEnter failed %Rrc\n", rc));
    3608             break;
    3609         }
    3610     }
    3611 
    3612     return rc;
    3613 }
    3614 
    3615 /**
    3616  * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmitSync}
    3617  */
    3618 DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface, struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd)
    3619 {
    3620     PVGASTATE               pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
    3621     struct VBOXVDMAHOST    *pVdma     = pVGAState->pVdma;
    3622     if (pVdma == NULL)
    3623         return VERR_INVALID_STATE;
    3624 
    3625     VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
    3626     Data.pVdma = pVdma;
    3627     Data.fProcessing = 1;
    3628     Data.rc = VERR_INTERNAL_ERROR;
    3629     RTListInit(&pCmd->CalloutList.List);
    3630     int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
    3631     if (!RT_SUCCESS(rc))
    3632     {
    3633         WARN(("vdmaVBVACtlOpaqueHostSubmit failed %Rrc", rc));
    3634         return rc;
    3635     }
    3636 
    3637     while (Data.fProcessing)
    3638     {
    3639         /* Poll infrequently to make sure no completed message has been missed. */
    3640         RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
    3641 
    3642         vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
    3643 
    3644         if (Data.fProcessing)
    3645             RTThreadYield();
    3646     }
    3647 
    3648     /* extra check callouts */
    3649     vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
    3650 
    3651     /* 'Our' message has been processed, so should reset the semaphore.
    3652      * There is still possible that another message has been processed
    3653      * and the semaphore has been signalled again.
    3654      * Reset only if there are no other messages completed.
    3655      */
    3656     int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
    3657     Assert(c >= 0);
    3658     if (!c)
    3659         RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
    3660 
    3661     rc = Data.rc;
    3662     if (!RT_SUCCESS(rc))
    3663         WARN(("host call failed %Rrc", rc));
    3664 
    3665     return rc;
    3666 }
    3667 
    3668 /**
    3669  * Handler for VBVA_CMDVBVA_CTL, see vbvaChannelHandler().
    3670  *
    3671  * @returns VBox status code
    3672  * @param   pVGAState           The VGA state.
    3673  * @param   pCtl                The control command.
    3674  * @param   cbCtl               The size of it.  This is at least
    3675  *                              sizeof(VBOXCMDVBVA_CTL).
    3676  * @thread  EMT
    3677  */
    3678 int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_GUEST *pCtl, uint32_t cbCtl)
    3679 {
    3680     struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
    3681     uint32_t uType = pCtl->u32Type;
    3682     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    3683 
    3684     if (   uType == VBOXCMDVBVACTL_TYPE_3DCTL
    3685         || uType == VBOXCMDVBVACTL_TYPE_RESIZE
    3686         || uType == VBOXCMDVBVACTL_TYPE_ENABLE)
    3687     {
    3688         RT_UNTRUSTED_VALIDATED_FENCE();
    3689 
    3690         switch (uType)
    3691         {
    3692             case VBOXCMDVBVACTL_TYPE_3DCTL:
    3693                 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
    3694 
    3695             case VBOXCMDVBVACTL_TYPE_RESIZE:
    3696                 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
    3697 
    3698             case VBOXCMDVBVACTL_TYPE_ENABLE:
    3699                 ASSERT_GUEST_BREAK(cbCtl == sizeof(VBOXCMDVBVA_CTL_ENABLE));
    3700                 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_GUEST *)pCtl);
    3701 
    3702             default:
    3703                 AssertFailed();
    3704         }
    3705     }
    3706 
    3707     pCtl->i32Result = VERR_INVALID_PARAMETER;
    3708     int rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
    3709     AssertRC(rc);
    3710     return VINF_SUCCESS;
    3711 }
    3712 
    3713 /**
    3714  * Handler for VBVA_CMDVBVA_SUBMIT, see vbvaChannelHandler().
    3715  *
    3716  * @thread  EMT
    3717  */
    3718 int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
    3719 {
    3720     if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
    3721     {
    3722         WARN(("vdma VBVA is disabled\n"));
    3723         return VERR_INVALID_STATE;
    3724     }
    3725 
    3726     return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
    3727 }
    3728 
    3729 /**
    3730  * Handler for VBVA_CMDVBVA_FLUSH, see vbvaChannelHandler().
    3731  *
    3732  * @thread  EMT
    3733  */
    3734 int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
    3735 {
    3736     WARN(("flush\n"));
    3737     if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
    3738     {
    3739         WARN(("vdma VBVA is disabled\n"));
    3740         return VERR_INVALID_STATE;
    3741     }
    3742     return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
    3743 }
    3744 
    3745 /**
    3746  * Called from vgaTimerRefresh().
    3747  */
    3748 void vboxCmdVBVATimerRefresh(PVGASTATE pVGAState)
    3749 {
    3750     if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
    3751         return;
    3752     vboxVDMACmdSubmitPerform(pVGAState->pVdma);
    3753 }
    3754 
    3755 bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
    3756 {
    3757     return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
    3758 }
    3759 
    3760 
    3761321
    3762322/*
     
    3772332int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
    3773333{
    3774     int rc = vdmaVBVAPause(pVdma);
    3775     if (RT_SUCCESS(rc))
    3776         return VINF_SUCCESS;
    3777 
    3778     if (rc != VERR_INVALID_STATE)
    3779     {
    3780         WARN(("vdmaVBVAPause failed %Rrc\n", rc));
    3781         return rc;
    3782     }
    3783 
    3784 # ifdef DEBUG_misha
    3785     WARN(("debug prep"));
    3786 # endif
    3787 
    3788     PVGASTATE pVGAState = pVdma->pVGAState;
    3789     PVBOXVDMACMD_CHROMIUM_CTL pCmd;
    3790     pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof(*pCmd));
    3791     if (pCmd)
    3792     {
    3793         rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
    3794         AssertRC(rc);
    3795         if (RT_SUCCESS(rc))
    3796             rc = vboxVDMACrCtlGetRc(pCmd);
    3797         vboxVDMACrCtlRelease(pCmd);
    3798         return rc;
    3799     }
    3800     return VERR_NO_MEMORY;
     334    RT_NOREF(pVdma);
     335    return VINF_SUCCESS;
    3801336}
    3802337
    3803338int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
    3804339{
    3805     int rc = vdmaVBVAResume(pVdma);
    3806     if (RT_SUCCESS(rc))
    3807         return VINF_SUCCESS;
    3808 
    3809     if (rc != VERR_INVALID_STATE)
    3810     {
    3811         WARN(("vdmaVBVAResume failed %Rrc\n", rc));
    3812         return rc;
    3813     }
    3814 
    3815 # ifdef DEBUG_misha
    3816     WARN(("debug done"));
    3817 # endif
    3818 
    3819     PVGASTATE pVGAState = pVdma->pVGAState;
    3820     PVBOXVDMACMD_CHROMIUM_CTL pCmd;
    3821     pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof(*pCmd));
    3822     Assert(pCmd);
    3823     if (pCmd)
    3824     {
    3825         rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
    3826         AssertRC(rc);
    3827         if (RT_SUCCESS(rc))
    3828             rc = vboxVDMACrCtlGetRc(pCmd);
    3829         vboxVDMACrCtlRelease(pCmd);
    3830         return rc;
    3831     }
    3832     return VERR_NO_MEMORY;
     340    RT_NOREF(pVdma);
     341    return VINF_SUCCESS;
    3833342}
    3834343
     
    3836345{
    3837346    int rc;
    3838 
    3839     if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
    3840     {
    3841         rc = SSMR3PutU32(pSSM, UINT32_MAX);
    3842         AssertRCReturn(rc, rc);
    3843         return VINF_SUCCESS;
    3844     }
    3845 
    3846     PVGASTATE pVGAState = pVdma->pVGAState;
    3847     uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
    3848 
    3849     rc = SSMR3PutU32(pSSM, (uint32_t)((uintptr_t)pVdma->CmdVbva.pVBVA - (uintptr_t)pu8VramBase));
     347    RT_NOREF(pVdma);
     348
     349    rc = SSMR3PutU32(pSSM, UINT32_MAX);
    3850350    AssertRCReturn(rc, rc);
    3851 
    3852     VBVAEXHOSTCTL HCtl;
    3853     HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
    3854     HCtl.u.state.pSSM = pSSM;
    3855     HCtl.u.state.u32Version = 0;
    3856     return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
     351    return VINF_SUCCESS;
    3857352}
    3858353
     
    3865360    if (u32 != UINT32_MAX)
    3866361    {
    3867         rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
    3868         AssertLogRelRCReturn(rc, rc);
    3869 
    3870         Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
    3871 
    3872         VBVAEXHOSTCTL HCtl;
    3873         HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
    3874         HCtl.u.state.pSSM = pSSM;
    3875         HCtl.u.state.u32Version = u32Version;
    3876         rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
    3877         AssertLogRelRCReturn(rc, rc);
    3878 
    3879         rc = vdmaVBVAResume(pVdma);
    3880         AssertLogRelRCReturn(rc, rc);
    3881 
    3882         return VINF_SUCCESS;
     362        RT_NOREF(pVdma, u32Version);
     363        WARN(("Unsupported VBVACtl info!\n"));
     364        return VERR_VERSION_MISMATCH;
    3883365    }
    3884366
     
    3888370int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
    3889371{
    3890     if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
    3891         return VINF_SUCCESS;
    3892 
    3893 /** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
    3894        * the purpose of this code is. */
    3895     VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
    3896     if (!pHCtl)
    3897     {
    3898         WARN(("VBoxVBVAExHCtlCreate failed\n"));
    3899         return VERR_NO_MEMORY;
    3900     }
    3901 
    3902     /* sanity */
    3903     pHCtl->u.cmd.pvCmd = NULL;
    3904     pHCtl->u.cmd.cbCmd = 0;
    3905 
    3906     /* NULL completion will just free the ctl up */
    3907     int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
    3908     if (RT_FAILURE(rc))
    3909     {
    3910         Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
    3911         VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
    3912         return rc;
    3913     }
    3914 
    3915     return VINF_SUCCESS;
    3916 }
    3917 
     372    RT_NOREF(pVdma);
     373    return VINF_SUCCESS;
     374}
     375
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette