VirtualBox

Changeset 63427 in vbox for trunk/src/VBox/Devices/Graphics


Ignore:
Timestamp:
Aug 13, 2016 11:24:39 PM (8 years ago)
Author:
vboxsync
Message:

warnings (weird config)

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp

    r63211 r63427  
    1515 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
    1616 */
     17
     18
     19/*********************************************************************************************************************************
     20*   Header Files                                                                                                                 *
     21*********************************************************************************************************************************/
    1722#include <VBox/VMMDev.h>
    1823#include <VBox/vmm/pdmdev.h>
     
    4045#endif
    4146
     47/*********************************************************************************************************************************
     48*   Defined Constants And Macros                                                                                                 *
     49*********************************************************************************************************************************/
    4250#ifdef DEBUG_misha
    43 #define WARN_BP() do { AssertFailed(); } while (0)
     51# define WARN_BP() do { AssertFailed(); } while (0)
    4452#else
    45 #define WARN_BP() do { } while (0)
     53# define WARN_BP() do { } while (0)
    4654#endif
    4755#define WARN(_msg) do { \
     
    5563#define VBOXVDMATHREAD_STATE_TERMINATING            4
    5664
     65
     66/*********************************************************************************************************************************
     67*   Structures and Typedefs                                                                                                      *
     68*********************************************************************************************************************************/
    5769struct VBOXVDMATHREAD;
    5870
    5971typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
    6072
     73#ifdef VBOX_WITH_CRHGSMI
    6174static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
     75#endif
    6276
    6377
     
    156170} VBVAEXHOST_DATA_TYPE;
    157171
    158 static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
    159 
    160 
    161 static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
    162 
    163 static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
    164 static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
    165 
    166 /* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
    167  * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
    168 static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
    169 
    170 static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
    171 static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
    172 static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
    173 static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
    174 static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
    175 static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
    176 
    177 static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
    178 {
    179 #ifndef VBOXVDBG_MEMCACHE_DISABLE
    180     return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
    181 #else
    182     return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
    183 #endif
    184 }
    185 
    186 static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
    187 {
    188 #ifndef VBOXVDBG_MEMCACHE_DISABLE
    189     RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
    190 #else
    191     RTMemFree(pCtl);
    192 #endif
    193 }
    194 
    195 static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
    196 {
    197     VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
    198     if (!pCtl)
    199     {
    200         WARN(("VBoxVBVAExHCtlAlloc failed\n"));
    201         return NULL;
    202     }
    203 
    204     pCtl->enmType = enmType;
    205     return pCtl;
    206 }
    207 
    208 static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    209 {
    210     Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
    211 
    212     if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
    213             return VINF_SUCCESS;
    214     return VERR_SEM_BUSY;
    215 }
    216 
    217 static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
    218 {
    219     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    220 
    221     if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
    222         return NULL;
    223 
    224     int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
    225     if (RT_SUCCESS(rc))
    226     {
    227         VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
    228         if (pCtl)
    229             *pfHostCtl = true;
    230         else if (!fHostOnlyMode)
    231         {
    232             if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    233             {
    234                 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
    235                 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
    236                  * and there are no HostCtl commands*/
    237                 Assert(pCtl);
    238                 *pfHostCtl = false;
    239             }
    240         }
    241 
    242         if (pCtl)
    243         {
    244             RTListNodeRemove(&pCtl->Node);
    245             ASMAtomicDecU32(&pCmdVbva->u32cCtls);
    246         }
    247 
    248         RTCritSectLeave(&pCmdVbva->CltCritSect);
    249 
    250         return pCtl;
    251     }
    252     else
    253         WARN(("RTCritSectEnter failed %d\n", rc));
    254 
    255     return NULL;
    256 }
    257 
    258 static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    259 {
    260     bool fHostCtl = false;
    261     VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
    262     Assert(!pCtl || fHostCtl);
    263     return pCtl;
    264 }
    265 
    266 static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    267 {
    268     if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    269     {
    270         WARN(("Invalid state\n"));
    271         return VERR_INVALID_STATE;
    272     }
    273 
    274     ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
    275     return VINF_SUCCESS;
    276 }
    277 
    278 static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    279 {
    280     if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    281     {
    282         WARN(("Invalid state\n"));
    283         return VERR_INVALID_STATE;
    284     }
    285 
    286     ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
    287     return VINF_SUCCESS;
    288 }
    289 
    290 
    291 static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
    292 {
    293     switch (pCtl->enmType)
    294     {
    295         case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
    296         {
    297             VBoxVBVAExHPPause(pCmdVbva);
    298             VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
    299             return true;
    300         }
    301         case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
    302         {
    303             VBoxVBVAExHPResume(pCmdVbva);
    304             VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
    305             return true;
    306         }
    307         default:
    308             return false;
    309     }
    310 }
    311 
    312 static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    313 {
    314     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    315 
    316     ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
    317 }
    318 
    319 static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    320 {
    321     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    322     if (pCmdVbva->pVBVA)
    323         ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
    324 }
    325 
    326 static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    327 {
    328     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    329     if (pCmdVbva->pVBVA)
    330         ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
    331 }
    332 
    333 static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
    334 {
    335     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    336     Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
    337 
    338     VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
    339 
    340     uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
    341     uint32_t indexRecordFree = pVBVA->indexRecordFree;
    342 
    343     Log(("first = %d, free = %d\n",
    344                    indexRecordFirst, indexRecordFree));
    345 
    346     if (indexRecordFirst == indexRecordFree)
    347     {
    348         /* No records to process. Return without assigning output variables. */
    349         return VINF_EOF;
    350     }
    351 
    352     uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
    353 
    354     /* A new record need to be processed. */
    355     if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
    356     {
    357         /* the record is being recorded, try again */
    358         return VINF_TRY_AGAIN;
    359     }
    360 
    361     uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
    362 
    363     if (!cbRecord)
    364     {
    365         /* the record is being recorded, try again */
    366         return VINF_TRY_AGAIN;
    367     }
    368 
    369     /* we should not get partial commands here actually */
    370     Assert(cbRecord);
    371 
    372     /* The size of largest contiguous chunk in the ring biffer. */
    373     uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
    374 
    375     /* The pointer to data in the ring buffer. */
    376     uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
    377 
    378     /* Fetch or point the data. */
    379     if (u32BytesTillBoundary >= cbRecord)
    380     {
    381         /* The command does not cross buffer boundary. Return address in the buffer. */
    382         *ppCmd = pSrc;
    383         *pcbCmd = cbRecord;
    384         return VINF_SUCCESS;
    385     }
    386 
    387     LogRel(("CmdVbva: cross-bound writes unsupported\n"));
    388     return VERR_INVALID_STATE;
    389 }
    390 
    391 static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
    392 {
    393     VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
    394     pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
    395 
    396     pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
    397 }
    398 
    399 static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
    400 {
    401     if (pCtl->pfnComplete)
    402         pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
    403     else
    404         VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
    405 }
    406 
    407 static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
    408 {
    409     Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    410     VBVAEXHOSTCTL*pCtl;
    411     bool fHostClt;
    412 
    413     for (;;)
    414     {
    415         pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
    416         if (pCtl)
    417         {
    418             if (fHostClt)
    419             {
    420                 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
    421                 {
    422                     *ppCmd = (uint8_t*)pCtl;
    423                     *pcbCmd = sizeof (*pCtl);
    424                     return VBVAEXHOST_DATA_TYPE_HOSTCTL;
    425                 }
    426                 continue;
    427             }
    428             else
    429             {
    430                 *ppCmd = (uint8_t*)pCtl;
    431                 *pcbCmd = sizeof (*pCtl);
    432                 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
    433             }
    434         }
    435 
    436         if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    437             return VBVAEXHOST_DATA_TYPE_NO_DATA;
    438 
    439         int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
    440         switch (rc)
    441         {
    442             case VINF_SUCCESS:
    443                 return VBVAEXHOST_DATA_TYPE_CMD;
    444             case VINF_EOF:
    445                 return VBVAEXHOST_DATA_TYPE_NO_DATA;
    446             case VINF_TRY_AGAIN:
    447                 RTThreadSleep(1);
    448                 continue;
    449             default:
    450                 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
    451                 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
    452                 return VBVAEXHOST_DATA_TYPE_NO_DATA;
    453         }
    454     }
    455     /* not reached */
    456 }
    457 
    458 static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
    459 {
    460     VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
    461     if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
    462     {
    463         vboxVBVAExHPHgEventClear(pCmdVbva);
    464         vboxVBVAExHPProcessorRelease(pCmdVbva);
    465         /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
    466          * 1. we check the queue -> and it is empty
    467          * 2. submitter adds command to the queue
    468          * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
    469          * 4. we clear the "processing" state
    470          * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
    471          * 6. if the queue appears to be not-empty set the "processing" state back to "true"
    472          **/
    473         int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
    474         if (RT_SUCCESS(rc))
    475         {
    476             /* we are the processor now */
    477             enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
    478             if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
    479             {
    480                 vboxVBVAExHPProcessorRelease(pCmdVbva);
    481                 return VBVAEXHOST_DATA_TYPE_NO_DATA;
    482             }
    483 
    484             vboxVBVAExHPHgEventSet(pCmdVbva);
    485         }
    486     }
    487 
    488     return enmType;
    489 }
    490 
    491 DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    492 {
    493     VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
    494 
    495     if (pVBVA)
    496     {
    497         uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
    498         uint32_t indexRecordFree = pVBVA->indexRecordFree;
    499 
    500         if (indexRecordFirst != indexRecordFree)
    501             return true;
    502     }
    503 
    504     return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
    505 }
    506 
    507 /* Checks whether the new commands are ready for processing
    508  * @returns
    509  *   VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
    510  *   VINF_EOF - no commands in a queue
    511  *   VINF_ALREADY_INITIALIZED - another thread already processing the commands
    512  *   VERR_INVALID_STATE - the VBVA is paused or pausing */
    513 static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    514 {
    515     int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
    516     if (RT_SUCCESS(rc))
    517     {
    518         /* we are the processor now */
    519         if (vboxVBVAExHSHasCommands(pCmdVbva))
    520         {
    521             vboxVBVAExHPHgEventSet(pCmdVbva);
    522             return VINF_SUCCESS;
    523         }
    524 
    525         vboxVBVAExHPProcessorRelease(pCmdVbva);
    526         return VINF_EOF;
    527     }
    528     if (rc == VERR_SEM_BUSY)
    529         return VINF_ALREADY_INITIALIZED;
    530     return VERR_INVALID_STATE;
    531 }
    532 
    533 static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    534 {
    535     memset(pCmdVbva, 0, sizeof (*pCmdVbva));
    536     int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
    537     if (RT_SUCCESS(rc))
    538     {
    539 #ifndef VBOXVDBG_MEMCACHE_DISABLE
    540         rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
    541                                 0, /* size_t cbAlignment */
    542                                 UINT32_MAX, /* uint32_t cMaxObjects */
    543                                 NULL, /* PFNMEMCACHECTOR pfnCtor*/
    544                                 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
    545                                 NULL, /* void *pvUser*/
    546                                 0 /* uint32_t fFlags*/
    547                                 );
    548         if (RT_SUCCESS(rc))
    549 #endif
    550         {
    551             RTListInit(&pCmdVbva->GuestCtlList);
    552             RTListInit(&pCmdVbva->HostCtlList);
    553             pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
    554             pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
    555             return VINF_SUCCESS;
    556         }
    557 #ifndef VBOXVDBG_MEMCACHE_DISABLE
    558         else
    559             WARN(("RTMemCacheCreate failed %d\n", rc));
    560 #endif
    561     }
    562     else
    563         WARN(("RTCritSectInit failed %d\n", rc));
    564 
    565     return rc;
    566 }
    567 
    568 DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    569 {
    570     return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
    571 }
    572 
    573 DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    574 {
    575     return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
    576 }
    577 
    578 static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
    579 {
    580     if (VBoxVBVAExHSIsEnabled(pCmdVbva))
    581     {
    582         WARN(("VBVAEx is enabled already\n"));
    583         return VERR_INVALID_STATE;
    584     }
    585 
    586     pCmdVbva->pVBVA = pVBVA;
    587     pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
    588     ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
    589     return VINF_SUCCESS;
    590 }
    591 
    592 static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    593 {
    594     if (VBoxVBVAExHSIsDisabled(pCmdVbva))
    595         return VINF_SUCCESS;
    596 
    597     ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
    598     return VINF_SUCCESS;
    599 }
    600 
    601 static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
    602 {
    603     /* ensure the processor is stopped */
    604     Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
    605 
    606     /* ensure no one tries to submit the command */
    607     if (pCmdVbva->pVBVA)
    608         pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
    609 
    610     Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
    611     Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
    612 
    613     RTCritSectDelete(&pCmdVbva->CltCritSect);
    614 
    615 #ifndef VBOXVDBG_MEMCACHE_DISABLE
    616     RTMemCacheDestroy(pCmdVbva->CtlCache);
    617 #endif
    618 
    619     memset(pCmdVbva, 0, sizeof (*pCmdVbva));
    620 }
    621 
    622 static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
    623 {
    624     RT_NOREF(pCmdVbva);
    625     int rc = SSMR3PutU32(pSSM, pCtl->enmType);
    626     AssertRCReturn(rc, rc);
    627     rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
    628     AssertRCReturn(rc, rc);
    629     rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
    630     AssertRCReturn(rc, rc);
    631 
    632     return VINF_SUCCESS;
    633 }
    634 
    635 static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
    636 {
    637     if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    638     {
    639         WARN(("vbva not paused\n"));
    640         return VERR_INVALID_STATE;
    641     }
    642 
    643     VBVAEXHOSTCTL* pCtl;
    644     int rc;
    645     RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
    646     {
    647         rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
    648         AssertRCReturn(rc, rc);
    649     }
    650 
    651     rc = SSMR3PutU32(pSSM, 0);
    652     AssertRCReturn(rc, rc);
    653 
    654     return VINF_SUCCESS;
    655 }
    656 /* Saves state
    657  * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
    658  */
    659 static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
    660 {
    661     int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
    662     if (RT_FAILURE(rc))
    663     {
    664         WARN(("RTCritSectEnter failed %d\n", rc));
    665         return rc;
    666     }
    667 
    668     rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
    669     if (RT_FAILURE(rc))
    670         WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
    671 
    672     RTCritSectLeave(&pCmdVbva->CltCritSect);
    673 
    674     return rc;
    675 }
    676 
    677 static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
    678 {
    679     RT_NOREF(u32Version);
    680     uint32_t u32;
    681     int rc = SSMR3GetU32(pSSM, &u32);
    682     AssertLogRelRCReturn(rc, rc);
    683 
    684     if (!u32)
    685         return VINF_EOF;
    686 
    687     VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
    688     if (!pHCtl)
    689     {
    690         WARN(("VBoxVBVAExHCtlCreate failed\n"));
    691         return VERR_NO_MEMORY;
    692     }
    693 
    694     rc = SSMR3GetU32(pSSM, &u32);
    695     AssertLogRelRCReturn(rc, rc);
    696     pHCtl->u.cmd.cbCmd = u32;
    697 
    698     rc = SSMR3GetU32(pSSM, &u32);
    699     AssertLogRelRCReturn(rc, rc);
    700     pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
    701 
    702     RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
    703     ++pCmdVbva->u32cCtls;
    704 
    705     return VINF_SUCCESS;
    706 }
    707 
    708 
    709 static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
    710 {
    711     if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
    712     {
    713         WARN(("vbva not stopped\n"));
    714         return VERR_INVALID_STATE;
    715     }
    716 
    717     int rc;
    718 
    719     do {
    720         rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
    721         AssertLogRelRCReturn(rc, rc);
    722     } while (VINF_EOF != rc);
    723 
    724     return VINF_SUCCESS;
    725 }
    726 
    727 /* Loads state
    728  * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
    729  */
    730 static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
    731 {
    732     Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
    733     int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
    734     if (RT_FAILURE(rc))
    735     {
    736         WARN(("RTCritSectEnter failed %d\n", rc));
    737         return rc;
    738     }
    739 
    740     rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
    741     if (RT_FAILURE(rc))
    742         WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
    743 
    744     RTCritSectLeave(&pCmdVbva->CltCritSect);
    745 
    746     return rc;
    747 }
    748 
    749 typedef enum
    750 {
    751     VBVAEXHOSTCTL_SOURCE_GUEST = 0,
    752     VBVAEXHOSTCTL_SOURCE_HOST
    753 } VBVAEXHOSTCTL_SOURCE;
    754 
    755 
    756 static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
    757 {
    758     if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
    759     {
    760         Log(("cmd vbva not enabled\n"));
    761         return VERR_INVALID_STATE;
    762     }
    763 
    764     pCtl->pfnComplete = pfnComplete;
    765     pCtl->pvComplete = pvComplete;
    766 
    767     int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
    768     if (RT_SUCCESS(rc))
    769     {
    770         if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
    771         {
    772             Log(("cmd vbva not enabled\n"));
    773             RTCritSectLeave(&pCmdVbva->CltCritSect);
    774             return VERR_INVALID_STATE;
    775         }
    776 
    777         if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
    778         {
    779             RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
    780         }
    781         else
    782             RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
    783 
    784         ASMAtomicIncU32(&pCmdVbva->u32cCtls);
    785 
    786         RTCritSectLeave(&pCmdVbva->CltCritSect);
    787 
    788         rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
    789     }
    790     else
    791         WARN(("RTCritSectEnter failed %d\n", rc));
    792 
    793     return rc;
    794 }
    795172
    796173#ifdef VBOX_WITH_CRHGSMI
     
    821198} VBOXVDMAHOST, *PVBOXVDMAHOST;
    822199
     200
     201/*********************************************************************************************************************************
     202*   Internal Functions                                                                                                           *
     203*********************************************************************************************************************************/
    823204#ifdef VBOX_WITH_CRHGSMI
     205static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
     206static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
     207
     208static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
     209static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
     210
     211/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
     212 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
     213static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
     214
     215static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
     216static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
     217static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
     218static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
     219static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
     220static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
     221
     222#endif /* VBOX_WITH_CRHGSMI */
     223
     224
     225
     226#ifdef VBOX_WITH_CRHGSMI
     227
     228static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
     229{
     230# ifndef VBOXVDBG_MEMCACHE_DISABLE
     231    return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
     232# else
     233    return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
     234# endif
     235}
     236
     237static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
     238{
     239# ifndef VBOXVDBG_MEMCACHE_DISABLE
     240    RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
     241# else
     242    RTMemFree(pCtl);
     243# endif
     244}
     245
     246static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
     247{
     248    VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
     249    if (!pCtl)
     250    {
     251        WARN(("VBoxVBVAExHCtlAlloc failed\n"));
     252        return NULL;
     253    }
     254
     255    pCtl->enmType = enmType;
     256    return pCtl;
     257}
     258
     259static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     260{
     261    Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
     262
     263    if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
     264            return VINF_SUCCESS;
     265    return VERR_SEM_BUSY;
     266}
     267
     268static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
     269{
     270    Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
     271
     272    if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
     273        return NULL;
     274
     275    int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
     276    if (RT_SUCCESS(rc))
     277    {
     278        VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
     279        if (pCtl)
     280            *pfHostCtl = true;
     281        else if (!fHostOnlyMode)
     282        {
     283            if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
     284            {
     285                pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
     286                /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
     287                 * and there are no HostCtl commands*/
     288                Assert(pCtl);
     289                *pfHostCtl = false;
     290            }
     291        }
     292
     293        if (pCtl)
     294        {
     295            RTListNodeRemove(&pCtl->Node);
     296            ASMAtomicDecU32(&pCmdVbva->u32cCtls);
     297        }
     298
     299        RTCritSectLeave(&pCmdVbva->CltCritSect);
     300
     301        return pCtl;
     302    }
     303    else
     304        WARN(("RTCritSectEnter failed %d\n", rc));
     305
     306    return NULL;
     307}
     308
     309static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     310{
     311    bool fHostCtl = false;
     312    VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
     313    Assert(!pCtl || fHostCtl);
     314    return pCtl;
     315}
     316
     317static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     318{
     319    if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
     320    {
     321        WARN(("Invalid state\n"));
     322        return VERR_INVALID_STATE;
     323    }
     324
     325    ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
     326    return VINF_SUCCESS;
     327}
     328
     329static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     330{
     331    if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
     332    {
     333        WARN(("Invalid state\n"));
     334        return VERR_INVALID_STATE;
     335    }
     336
     337    ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
     338    return VINF_SUCCESS;
     339}
     340
     341static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
     342{
     343    switch (pCtl->enmType)
     344    {
     345        case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
     346        {
     347            VBoxVBVAExHPPause(pCmdVbva);
     348            VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
     349            return true;
     350        }
     351        case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
     352        {
     353            VBoxVBVAExHPResume(pCmdVbva);
     354            VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
     355            return true;
     356        }
     357        default:
     358            return false;
     359    }
     360}
     361
     362static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     363{
     364    Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
     365
     366    ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
     367}
     368
     369static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     370{
     371    Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
     372    if (pCmdVbva->pVBVA)
     373        ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
     374}
     375
     376static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     377{
     378    Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
     379    if (pCmdVbva->pVBVA)
     380        ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
     381}
     382
     383static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
     384{
     385    Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
     386    Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
     387
     388    VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
     389
     390    uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
     391    uint32_t indexRecordFree = pVBVA->indexRecordFree;
     392
     393    Log(("first = %d, free = %d\n",
     394                   indexRecordFirst, indexRecordFree));
     395
     396    if (indexRecordFirst == indexRecordFree)
     397    {
     398        /* No records to process. Return without assigning output variables. */
     399        return VINF_EOF;
     400    }
     401
     402    uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
     403
     404    /* A new record need to be processed. */
     405    if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
     406    {
     407        /* the record is being recorded, try again */
     408        return VINF_TRY_AGAIN;
     409    }
     410
     411    uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
     412
     413    if (!cbRecord)
     414    {
     415        /* the record is being recorded, try again */
     416        return VINF_TRY_AGAIN;
     417    }
     418
     419    /* we should not get partial commands here actually */
     420    Assert(cbRecord);
     421
     422    /* The size of largest contiguous chunk in the ring biffer. */
     423    uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
     424
     425    /* The pointer to data in the ring buffer. */
     426    uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
     427
     428    /* Fetch or point the data. */
     429    if (u32BytesTillBoundary >= cbRecord)
     430    {
     431        /* The command does not cross buffer boundary. Return address in the buffer. */
     432        *ppCmd = pSrc;
     433        *pcbCmd = cbRecord;
     434        return VINF_SUCCESS;
     435    }
     436
     437    LogRel(("CmdVbva: cross-bound writes unsupported\n"));
     438    return VERR_INVALID_STATE;
     439}
     440
     441static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
     442{
     443    VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
     444    pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
     445
     446    pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
     447}
     448
     449static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
     450{
     451    if (pCtl->pfnComplete)
     452        pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
     453    else
     454        VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
     455}
     456
     457
     458static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
     459{
     460    Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
     461    VBVAEXHOSTCTL*pCtl;
     462    bool fHostClt;
     463
     464    for (;;)
     465    {
     466        pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
     467        if (pCtl)
     468        {
     469            if (fHostClt)
     470            {
     471                if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
     472                {
     473                    *ppCmd = (uint8_t*)pCtl;
     474                    *pcbCmd = sizeof (*pCtl);
     475                    return VBVAEXHOST_DATA_TYPE_HOSTCTL;
     476                }
     477                continue;
     478            }
     479            else
     480            {
     481                *ppCmd = (uint8_t*)pCtl;
     482                *pcbCmd = sizeof (*pCtl);
     483                return VBVAEXHOST_DATA_TYPE_GUESTCTL;
     484            }
     485        }
     486
     487        if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
     488            return VBVAEXHOST_DATA_TYPE_NO_DATA;
     489
     490        int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
     491        switch (rc)
     492        {
     493            case VINF_SUCCESS:
     494                return VBVAEXHOST_DATA_TYPE_CMD;
     495            case VINF_EOF:
     496                return VBVAEXHOST_DATA_TYPE_NO_DATA;
     497            case VINF_TRY_AGAIN:
     498                RTThreadSleep(1);
     499                continue;
     500            default:
     501                /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
     502                WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
     503                return VBVAEXHOST_DATA_TYPE_NO_DATA;
     504        }
     505    }
     506    /* not reached */
     507}
     508
     509static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
     510{
     511    VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
     512    if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
     513    {
     514        vboxVBVAExHPHgEventClear(pCmdVbva);
     515        vboxVBVAExHPProcessorRelease(pCmdVbva);
     516        /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
     517         * 1. we check the queue -> and it is empty
     518         * 2. submitter adds command to the queue
     519         * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
     520         * 4. we clear the "processing" state
     521         * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
     522         * 6. if the queue appears to be not-empty set the "processing" state back to "true"
     523         **/
     524        int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
     525        if (RT_SUCCESS(rc))
     526        {
     527            /* we are the processor now */
     528            enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
     529            if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
     530            {
     531                vboxVBVAExHPProcessorRelease(pCmdVbva);
     532                return VBVAEXHOST_DATA_TYPE_NO_DATA;
     533            }
     534
     535            vboxVBVAExHPHgEventSet(pCmdVbva);
     536        }
     537    }
     538
     539    return enmType;
     540}
     541
     542DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     543{
     544    VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
     545
     546    if (pVBVA)
     547    {
     548        uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
     549        uint32_t indexRecordFree = pVBVA->indexRecordFree;
     550
     551        if (indexRecordFirst != indexRecordFree)
     552            return true;
     553    }
     554
     555    return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
     556}
     557
     558/** Checks whether the new commands are ready for processing
     559 * @returns
     560 *   VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
     561 *   VINF_EOF - no commands in a queue
     562 *   VINF_ALREADY_INITIALIZED - another thread already processing the commands
     563 *   VERR_INVALID_STATE - the VBVA is paused or pausing */
     564static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     565{
     566    int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
     567    if (RT_SUCCESS(rc))
     568    {
     569        /* we are the processor now */
     570        if (vboxVBVAExHSHasCommands(pCmdVbva))
     571        {
     572            vboxVBVAExHPHgEventSet(pCmdVbva);
     573            return VINF_SUCCESS;
     574        }
     575
     576        vboxVBVAExHPProcessorRelease(pCmdVbva);
     577        return VINF_EOF;
     578    }
     579    if (rc == VERR_SEM_BUSY)
     580        return VINF_ALREADY_INITIALIZED;
     581    return VERR_INVALID_STATE;
     582}
     583
     584static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     585{
     586    memset(pCmdVbva, 0, sizeof (*pCmdVbva));
     587    int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
     588    if (RT_SUCCESS(rc))
     589    {
     590# ifndef VBOXVDBG_MEMCACHE_DISABLE
     591        rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
     592                                0, /* size_t cbAlignment */
     593                                UINT32_MAX, /* uint32_t cMaxObjects */
     594                                NULL, /* PFNMEMCACHECTOR pfnCtor*/
     595                                NULL, /* PFNMEMCACHEDTOR pfnDtor*/
     596                                NULL, /* void *pvUser*/
     597                                0 /* uint32_t fFlags*/
     598                                );
     599        if (RT_SUCCESS(rc))
     600# endif
     601        {
     602            RTListInit(&pCmdVbva->GuestCtlList);
     603            RTListInit(&pCmdVbva->HostCtlList);
     604            pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
     605            pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
     606            return VINF_SUCCESS;
     607        }
     608# ifndef VBOXVDBG_MEMCACHE_DISABLE
     609        else
     610            WARN(("RTMemCacheCreate failed %d\n", rc));
     611# endif
     612    }
     613    else
     614        WARN(("RTCritSectInit failed %d\n", rc));
     615
     616    return rc;
     617}
     618
     619DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     620{
     621    return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
     622}
     623
     624DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     625{
     626    return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
     627}
     628
     629static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
     630{
     631    if (VBoxVBVAExHSIsEnabled(pCmdVbva))
     632    {
     633        WARN(("VBVAEx is enabled already\n"));
     634        return VERR_INVALID_STATE;
     635    }
     636
     637    pCmdVbva->pVBVA = pVBVA;
     638    pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
     639    ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
     640    return VINF_SUCCESS;
     641}
     642
     643static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     644{
     645    if (VBoxVBVAExHSIsDisabled(pCmdVbva))
     646        return VINF_SUCCESS;
     647
     648    ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
     649    return VINF_SUCCESS;
     650}
     651
     652static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
     653{
     654    /* ensure the processor is stopped */
     655    Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
     656
     657    /* ensure no one tries to submit the command */
     658    if (pCmdVbva->pVBVA)
     659        pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
     660
     661    Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
     662    Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
     663
     664    RTCritSectDelete(&pCmdVbva->CltCritSect);
     665
     666# ifndef VBOXVDBG_MEMCACHE_DISABLE
     667    RTMemCacheDestroy(pCmdVbva->CtlCache);
     668# endif
     669
     670    memset(pCmdVbva, 0, sizeof (*pCmdVbva));
     671}
     672
     673static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
     674{
     675    RT_NOREF(pCmdVbva);
     676    int rc = SSMR3PutU32(pSSM, pCtl->enmType);
     677    AssertRCReturn(rc, rc);
     678    rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
     679    AssertRCReturn(rc, rc);
     680    rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
     681    AssertRCReturn(rc, rc);
     682
     683    return VINF_SUCCESS;
     684}
     685
     686static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
     687{
     688    if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
     689    {
     690        WARN(("vbva not paused\n"));
     691        return VERR_INVALID_STATE;
     692    }
     693
     694    VBVAEXHOSTCTL* pCtl;
     695    int rc;
     696    RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
     697    {
     698        rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
     699        AssertRCReturn(rc, rc);
     700    }
     701
     702    rc = SSMR3PutU32(pSSM, 0);
     703    AssertRCReturn(rc, rc);
     704
     705    return VINF_SUCCESS;
     706}
     707
     708
     709/** Saves state
     710 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
     711 */
     712static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
     713{
     714    int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
     715    if (RT_FAILURE(rc))
     716    {
     717        WARN(("RTCritSectEnter failed %d\n", rc));
     718        return rc;
     719    }
     720
     721    rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
     722    if (RT_FAILURE(rc))
     723        WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
     724
     725    RTCritSectLeave(&pCmdVbva->CltCritSect);
     726
     727    return rc;
     728}
     729
     730static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
     731{
     732    RT_NOREF(u32Version);
     733    uint32_t u32;
     734    int rc = SSMR3GetU32(pSSM, &u32);
     735    AssertLogRelRCReturn(rc, rc);
     736
     737    if (!u32)
     738        return VINF_EOF;
     739
     740    VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
     741    if (!pHCtl)
     742    {
     743        WARN(("VBoxVBVAExHCtlCreate failed\n"));
     744        return VERR_NO_MEMORY;
     745    }
     746
     747    rc = SSMR3GetU32(pSSM, &u32);
     748    AssertLogRelRCReturn(rc, rc);
     749    pHCtl->u.cmd.cbCmd = u32;
     750
     751    rc = SSMR3GetU32(pSSM, &u32);
     752    AssertLogRelRCReturn(rc, rc);
     753    pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
     754
     755    RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
     756    ++pCmdVbva->u32cCtls;
     757
     758    return VINF_SUCCESS;
     759}
     760
     761
     762static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
     763{
     764    if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
     765    {
     766        WARN(("vbva not stopped\n"));
     767        return VERR_INVALID_STATE;
     768    }
     769
     770    int rc;
     771
     772    do {
     773        rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
     774        AssertLogRelRCReturn(rc, rc);
     775    } while (VINF_EOF != rc);
     776
     777    return VINF_SUCCESS;
     778}
     779
     780/** Loads state
     781 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
     782 */
     783static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
     784{
     785    Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
     786    int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
     787    if (RT_FAILURE(rc))
     788    {
     789        WARN(("RTCritSectEnter failed %d\n", rc));
     790        return rc;
     791    }
     792
     793    rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
     794    if (RT_FAILURE(rc))
     795        WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
     796
     797    RTCritSectLeave(&pCmdVbva->CltCritSect);
     798
     799    return rc;
     800}
     801
     802typedef enum
     803{
     804    VBVAEXHOSTCTL_SOURCE_GUEST = 0,
     805    VBVAEXHOSTCTL_SOURCE_HOST
     806} VBVAEXHOSTCTL_SOURCE;
     807
     808
     809static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
     810{
     811    if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
     812    {
     813        Log(("cmd vbva not enabled\n"));
     814        return VERR_INVALID_STATE;
     815    }
     816
     817    pCtl->pfnComplete = pfnComplete;
     818    pCtl->pvComplete = pvComplete;
     819
     820    int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
     821    if (RT_SUCCESS(rc))
     822    {
     823        if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
     824        {
     825            Log(("cmd vbva not enabled\n"));
     826            RTCritSectLeave(&pCmdVbva->CltCritSect);
     827            return VERR_INVALID_STATE;
     828        }
     829
     830        if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
     831        {
     832            RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
     833        }
     834        else
     835            RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
     836
     837        ASMAtomicIncU32(&pCmdVbva->u32cCtls);
     838
     839        RTCritSectLeave(&pCmdVbva->CltCritSect);
     840
     841        rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
     842    }
     843    else
     844        WARN(("RTCritSectEnter failed %d\n", rc));
     845
     846    return rc;
     847}
    824848
    825849void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
     
    9891013} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
    9901014
    991 #define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
     1015# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
    9921016
    9931017static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
     
    10331057}
    10341058
    1035 #if 0 /** @todo vboxVDMACrCtlCbReleaseCmd is unused */
     1059# if 0 /** @todo vboxVDMACrCtlCbReleaseCmd is unused */
    10361060static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
    10371061{
     
    10391063    vboxVDMACrCtlRelease(pCmd);
    10401064}
    1041 #endif
     1065# endif
    10421066
    10431067static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
     
    10521076        return VINF_SUCCESS;
    10531077    }
    1054 #ifdef DEBUG_misha
     1078# ifdef DEBUG_misha
    10551079    Assert(0);
    1056 #endif
     1080# endif
    10571081    return VERR_NOT_SUPPORTED;
    10581082}
     
    10661090    {
    10671091        rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
    1068 #ifdef DEBUG_misha
     1092# ifdef DEBUG_misha
    10691093        AssertRC(rc);
    1070 #endif
     1094# endif
    10711095        if (RT_SUCCESS(rc))
    10721096        {
     
    11841208static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
    11851209{
    1186 #ifdef VBOX_STRICT
     1210# ifdef VBOX_STRICT
    11871211    struct VBOXVDMAHOST *pVdma = hClient;
    11881212    Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
    11891213    Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
    1190 #else
     1214# else
    11911215    RT_NOREF(hClient);
    1192 #endif
     1216# endif
    11931217}
    11941218
     
    12521276    if (!pVdma->CrSrvInfo.pfnEnable)
    12531277    {
    1254 #ifdef DEBUG_misha
     1278# ifdef DEBUG_misha
    12551279        WARN(("pfnEnable is NULL\n"));
    12561280        return VERR_NOT_SUPPORTED;
    1257 #endif
     1281# endif
    12581282    }
    12591283
     
    18521876}
    18531877
    1854 #if 0
     1878# if 0
    18551879typedef struct VBOXCMDVBVA_PAGING_TRANSFER
    18561880{
     
    18621886    VBOXCMDVBVA_SYSMEMEL aSysMem[1];
    18631887} VBOXCMDVBVA_PAGING_TRANSFER;
    1864 #endif
     1888# endif
    18651889
    18661890AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
     
    18691893AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
    18701894
    1871 #define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
     1895# define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
    18721896
    18731897static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
     
    22772301
    22782302        uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
    2279 #ifdef VBOX_STRICT
     2303# ifdef VBOX_STRICT
    22802304        uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
    22812305        uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
    2282 #endif
     2306# endif
    22832307        uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
    22842308        Assert(cbSrcLine <= pSrcDesc->pitch);
     
    25042528            case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
    25052529            {
    2506 #ifdef VBOXWDDM_TEST_UHGSMI
     2530# ifdef VBOXWDDM_TEST_UHGSMI
    25072531                static int count = 0;
    25082532                static uint64_t start, end;
     
    25182542                    LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
    25192543                }
    2520 #endif
     2544# endif
    25212545                /* todo: post the buffer to chromium */
    25222546                return VINF_SUCCESS;
     
    26962720# endif
    26972721
    2698 #endif /* #ifdef VBOX_WITH_CRHGSMI */
    2699 
     2722#endif /* VBOX_WITH_CRHGSMI */
    27002723#ifdef VBOX_VDMA_WITH_WATCHDOG
     2724
    27012725static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
    27022726{
     
    27152739    return VINF_SUCCESS;
    27162740}
    2717 #endif
     2741
     2742#endif /* VBOX_VDMA_WITH_WATCHDOG */
    27182743
    27192744int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
     
    27802805#ifdef VBOX_WITH_CRHGSMI
    27812806    vdmaVBVACtlDisableSync(pVdma);
     2807#else
     2808    RT_NOREF(pVdma);
    27822809#endif
    27832810    return VINF_SUCCESS;
     
    28502877    vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
    28512878#else
     2879    RT_NOREF(cbCmd);
    28522880    pCmd->rc = rc;
    28532881    rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
     
    28562884}
    28572885
    2858 /**/
    28592886#ifdef VBOX_WITH_CRHGSMI
    28602887
     
    34043431    return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
    34053432}
    3406 #endif
     3433
     3434#endif /* VBOX_WITH_CRHGSMI */
    34073435
    34083436int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
     
    34193447    }
    34203448
    3421 #ifdef DEBUG_misha
     3449# ifdef DEBUG_misha
    34223450    WARN(("debug prep"));
    3423 #endif
     3451# endif
    34243452
    34253453    PVGASTATE pVGAState = pVdma->pVGAState;
     
    34403468    return VERR_NO_MEMORY;
    34413469#else
     3470    RT_NOREF(pVdma);
    34423471    return VINF_SUCCESS;
    34433472#endif
     
    34573486    }
    34583487
    3459 #ifdef DEBUG_misha
     3488# ifdef DEBUG_misha
    34603489    WARN(("debug done"));
    3461 #endif
     3490# endif
    34623491
    34633492    PVGASTATE pVGAState = pVdma->pVGAState;
     
    34783507    return VERR_NO_MEMORY;
    34793508#else
     3509    RT_NOREF(pVdma);
    34803510    return VINF_SUCCESS;
    34813511#endif
     
    34853515{
    34863516    int rc;
    3487 
    3488 #ifdef VBOX_WITH_CRHGSMI
     3517#ifndef VBOX_WITH_CRHGSMI
     3518    RT_NOREF(pVdma, pSSM);
     3519
     3520#else
    34893521    if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
    34903522#endif
    34913523    {
    3492         rc = SSMR3PutU32(pSSM, 0xffffffff);
     3524        rc = SSMR3PutU32(pSSM, UINT32_MAX);
    34933525        AssertRCReturn(rc, rc);
    34943526        return VINF_SUCCESS;
     
    35163548    AssertLogRelRCReturn(rc, rc);
    35173549
    3518     if (u32 != 0xffffffff)
     3550    if (u32 != UINT32_MAX)
    35193551    {
    35203552#ifdef VBOX_WITH_CRHGSMI
     
    35363568        return VINF_SUCCESS;
    35373569#else
     3570        RT_NOREF(pVdma, u32Version);
    35383571        WARN(("Unsupported VBVACtl info!\n"));
    35393572        return VERR_VERSION_MISMATCH;
     
    35713604        return rc;
    35723605    }
     3606#else
     3607    RT_NOREF(pVdma);
    35733608#endif
    35743609    return VINF_SUCCESS;
    35753610}
     3611
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette