VirtualBox

Ignore:
Timestamp:
Aug 28, 2019 4:07:42 PM (5 years ago)
Author:
vboxsync
Message:

WDDM: remove obsolete CmdVbva code. bugref:9529

Location:
trunk/src/VBox/Additions/WINNT/Graphics/Video/mp
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/common/VBoxMPDevExt.h

    r80482 r80483  
    151151   BOOLEAN fComplexTopologiesEnabled;
    152152
    153    VBOXCMDVBVA CmdVbva;
    154 
    155153   VBOXWDDM_GLOBAL_POINTER_INFO PointerInfo;
    156154
  • trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPVbva.cpp

    r80435 r80483  
    9696    return VERR_GENERAL_FAILURE;
    9797}
    98 
    99 #ifdef VBOX_WITH_CROGL
    100 /* command vbva ring buffer */
    101 
    102 /* customized VBVA implementation */
    103 
    104 /* Forward declarations of internal functions. */
    105 static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
    106                                     uint32_t cb, uint32_t offset);
    107 static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
    108                               PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
    109                               const void *p, uint32_t cb);
    110 
    111 DECLINLINE(void) vboxVBVAExFlush(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
    112 {
    113     pCtx->pfnFlush(pCtx, pHGSMICtx, pCtx->pvFlush);
    114 }
    115 
    116 static int vboxCmdVbvaSubmitHgsmi(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, HGSMIOFFSET offDr)
    117 {
    118     VBVO_PORT_WRITE_U32(pHGSMICtx->port, offDr);
    119     /* Make the compiler aware that the host has changed memory. */
    120     ASMCompilerBarrier();
    121     return VINF_SUCCESS;
    122 }
    123 #define vboxCmdVbvaSubmit vboxCmdVbvaSubmitHgsmi
    124 
    125 static VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_HOST *vboxCmdVbvaCtlCreate(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cbCtl)
    126 {
    127     Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
    128     return (VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_HOST *)VBoxSHGSMICommandAlloc(&pHGSMICtx->heapCtx, cbCtl,
    129                                                                                 HGSMI_CH_VBVA, VBVA_CMDVBVA_CTL);
    130 }
    131 
    132 static void vboxCmdVbvaCtlFree(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_HOST *pCtl)
    133 {
    134     VBoxSHGSMICommandFree(&pHGSMICtx->heapCtx, pCtl);
    135 }
    136 
    137 static int vboxCmdVbvaCtlSubmitSync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_HOST *pCtl)
    138 {
    139     const VBOXSHGSMIHEADER RT_UNTRUSTED_VOLATILE_HOST *pHdr = VBoxSHGSMICommandPrepSynch(&pHGSMICtx->heapCtx, pCtl);
    140     if (!pHdr)
    141     {
    142         WARN(("VBoxSHGSMICommandPrepSynch returnd NULL"));
    143         return VERR_INVALID_PARAMETER;
    144     }
    145 
    146     HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
    147     if (offCmd == HGSMIOFFSET_VOID)
    148     {
    149         WARN(("VBoxSHGSMICommandOffset returnd NULL"));
    150         VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
    151         return VERR_INVALID_PARAMETER;
    152     }
    153 
    154     int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
    155     if (RT_SUCCESS(rc))
    156     {
    157         rc = VBoxSHGSMICommandDoneSynch(&pHGSMICtx->heapCtx, pHdr);
    158         if (RT_SUCCESS(rc))
    159         {
    160             rc = pCtl->i32Result;
    161             if (!RT_SUCCESS(rc))
    162                 WARN(("pCtl->i32Result %d", pCtl->i32Result));
    163 
    164             return rc;
    165         }
    166         else
    167             WARN(("VBoxSHGSMICommandDoneSynch returnd %d", rc));
    168     }
    169     else
    170         WARN(("vboxCmdVbvaSubmit returnd %d", rc));
    171 
    172     VBoxSHGSMICommandCancelSynch(&pHGSMICtx->heapCtx, pHdr);
    173 
    174     return rc;
    175 }
    176 
    177 static int vboxCmdVbvaCtlSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL RT_UNTRUSTED_VOLATILE_HOST *pCtl,
    178                                      FNVBOXSHGSMICMDCOMPLETION pfnCompletion, void RT_UNTRUSTED_VOLATILE_HOST *pvCompletion)
    179 {
    180     const VBOXSHGSMIHEADER RT_UNTRUSTED_VOLATILE_HOST *pHdr = VBoxSHGSMICommandPrepAsynch(&pHGSMICtx->heapCtx, pCtl, pfnCompletion,
    181                                                                                           pvCompletion, VBOXSHGSMI_FLAG_GH_ASYNCH_IRQ);
    182     HGSMIOFFSET offCmd = VBoxSHGSMICommandOffset(&pHGSMICtx->heapCtx, pHdr);
    183     if (offCmd == HGSMIOFFSET_VOID)
    184     {
    185         WARN(("VBoxSHGSMICommandOffset returnd NULL"));
    186         VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
    187         return VERR_INVALID_PARAMETER;
    188     }
    189 
    190     int rc = vboxCmdVbvaSubmit(pHGSMICtx, offCmd);
    191     if (RT_SUCCESS(rc))
    192     {
    193         VBoxSHGSMICommandDoneAsynch(&pHGSMICtx->heapCtx, pHdr);
    194         return rc;
    195     }
    196 
    197     WARN(("vboxCmdVbvaSubmit returnd %d", rc));
    198     VBoxSHGSMICommandCancelAsynch(&pHGSMICtx->heapCtx, pHdr);
    199     return rc;
    200 }
    201 
    202 static int vboxVBVAExCtlSubmitEnableDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, bool fEnable)
    203 {
    204     VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_HOST *pCtl =
    205         (VBOXCMDVBVA_CTL_ENABLE RT_UNTRUSTED_VOLATILE_HOST *)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (*pCtl));
    206     if (!pCtl)
    207     {
    208         WARN(("vboxCmdVbvaCtlCreate failed"));
    209         return VERR_NO_MEMORY;
    210     }
    211 
    212     pCtl->Hdr.u32Type = VBOXCMDVBVACTL_TYPE_ENABLE;
    213     pCtl->Hdr.i32Result = VERR_NOT_IMPLEMENTED;
    214     memset((void *)&pCtl->Enable, 0, sizeof(pCtl->Enable));
    215     pCtl->Enable.u32Flags  = fEnable? VBVA_F_ENABLE: VBVA_F_DISABLE;
    216     pCtl->Enable.u32Offset = pCtx->offVRAMBuffer;
    217     pCtl->Enable.i32Result = VERR_NOT_SUPPORTED;
    218     pCtl->Enable.u32Flags |= VBVA_F_ABSOFFSET;
    219 
    220     int rc = vboxCmdVbvaCtlSubmitSync(pHGSMICtx, &pCtl->Hdr);
    221     if (RT_SUCCESS(rc))
    222     {
    223         rc = pCtl->Hdr.i32Result;
    224         if (!RT_SUCCESS(rc))
    225             WARN(("vboxCmdVbvaCtlSubmitSync Disable failed %d", rc));
    226     }
    227     else
    228         WARN(("vboxCmdVbvaCtlSubmitSync returnd %d", rc));
    229 
    230     vboxCmdVbvaCtlFree(pHGSMICtx, &pCtl->Hdr);
    231 
    232     return rc;
    233 }
    234 
    235 /*
    236  * Public hardware buffer methods.
    237  */
    238 VBVAEX_DECL(int) VBoxVBVAExEnable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBVABUFFER *pVBVA)
    239 {
    240     int rc = VERR_GENERAL_FAILURE;
    241 
    242     LogFlowFunc(("pVBVA %p\n", pVBVA));
    243 
    244 #if 0  /* All callers check this */
    245     if (ppdev->bHGSMISupported)
    246 #endif
    247     {
    248         LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
    249 
    250         pVBVA->hostFlags.u32HostEvents      = 0;
    251         pVBVA->hostFlags.u32SupportedOrders = 0;
    252         pVBVA->off32Data          = 0;
    253         pVBVA->off32Free          = 0;
    254         memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
    255         pVBVA->indexRecordFirst   = 0;
    256         pVBVA->indexRecordFree    = 0;
    257         pVBVA->cbPartialWriteThreshold = 256;
    258         pVBVA->cbData             = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
    259 
    260         pCtx->fHwBufferOverflow = false;
    261         pCtx->pRecord    = NULL;
    262         pCtx->pVBVA      = pVBVA;
    263 
    264         rc = vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, true);
    265     }
    266 
    267     if (!RT_SUCCESS(rc))
    268     {
    269         WARN(("enable failed %d", rc));
    270         VBoxVBVAExDisable(pCtx, pHGSMICtx);
    271     }
    272 
    273     return rc;
    274 }
    275 
    276 VBVAEX_DECL(void) VBoxVBVAExDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
    277 {
    278     LogFlowFunc(("\n"));
    279 
    280     vboxVBVAExCtlSubmitEnableDisable(pCtx, pHGSMICtx, false);
    281 
    282     pCtx->fHwBufferOverflow = false;
    283     pCtx->pRecord           = NULL;
    284     pCtx->pVBVA             = NULL;
    285 
    286     return;
    287 }
    288 
    289 VBVAEX_DECL(bool) VBoxVBVAExBufferBeginUpdate(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
    290 {
    291     bool bRc = false;
    292 
    293     // LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
    294 
    295     Assert(pCtx->pVBVA);
    296     /* we do not use u32HostEvents & VBVA_F_MODE_ENABLED,
    297      * VBVA stays enabled once ENABLE call succeeds, until it is disabled with DISABLED call */
    298 //    if (   pCtx->pVBVA
    299 //        && (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
    300     {
    301         uint32_t indexRecordNext;
    302 
    303         Assert(!pCtx->fHwBufferOverflow);
    304         Assert(pCtx->pRecord == NULL);
    305 
    306         indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
    307 
    308         if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
    309         {
    310             /* All slots in the records queue are used. */
    311             vboxVBVAExFlush (pCtx, pHGSMICtx);
    312         }
    313 
    314         if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
    315         {
    316             /* Even after flush there is no place. Fail the request. */
    317             LogFunc(("no space in the queue of records!!! first %d, last %d\n",
    318                     indexRecordNext, pCtx->pVBVA->indexRecordFree));
    319         }
    320         else
    321         {
    322             /* Initialize the record. */
    323             VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
    324 
    325             pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
    326 
    327             pCtx->pVBVA->indexRecordFree = indexRecordNext;
    328 
    329             // LogFunc(("indexRecordNext = %d\n", indexRecordNext));
    330 
    331             /* Remember which record we are using. */
    332             pCtx->pRecord = pRecord;
    333 
    334             bRc = true;
    335         }
    336     }
    337 
    338     return bRc;
    339 }
    340 
    341 VBVAEX_DECL(void) VBoxVBVAExBufferEndUpdate(PVBVAEXBUFFERCONTEXT pCtx)
    342 {
    343     VBVARECORD *pRecord;
    344 
    345     // LogFunc(("\n"));
    346 
    347     Assert(pCtx->pVBVA);
    348 
    349     pRecord = pCtx->pRecord;
    350     Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
    351 
    352     /* Mark the record completed. */
    353     pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
    354 
    355     pCtx->fHwBufferOverflow = false;
    356     pCtx->pRecord = NULL;
    357 
    358     return;
    359 }
    360 
    361 DECLINLINE(bool) vboxVBVAExIsEntryInRange(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
    362 {
    363     return (     u32First != u32Free
    364              && (
    365                      (u32First < u32Free && u32Entry >= u32First && u32Entry < u32Free)
    366                   || (u32First > u32Free && (u32Entry >= u32First || u32Entry < u32Free))
    367                  )
    368            );
    369 }
    370 
    371 DECLINLINE(bool) vboxVBVAExIsEntryInRangeOrEmpty(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
    372 {
    373     return vboxVBVAExIsEntryInRange(u32First, u32Entry, u32Free)
    374             || (    u32First == u32Entry
    375                  && u32Entry == u32Free);
    376 }
    377 #ifdef DEBUG
    378 
    379 DECLINLINE(void) vboxHwBufferVerifyCompleted(PVBVAEXBUFFERCONTEXT pCtx)
    380 {
    381     VBVABUFFER *pVBVA = pCtx->pVBVA;
    382     if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->indexRecordFirstUncompleted, pVBVA->indexRecordFirst, pVBVA->indexRecordFree))
    383     {
    384         WARN(("invalid record set"));
    385     }
    386 
    387     if (!vboxVBVAExIsEntryInRangeOrEmpty(pCtx->off32DataUncompleted, pVBVA->off32Data, pVBVA->off32Free))
    388     {
    389         WARN(("invalid data set"));
    390     }
    391 }
    392 #endif
    393 
    394 /*
    395  * Private operations.
    396  */
    397 static uint32_t vboxHwBufferAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
    398 {
    399     int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
    400 
    401     return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
    402 }
    403 
    404 static uint32_t vboxHwBufferContiguousAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
    405 {
    406     int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
    407 
    408     return i32Diff > 0 ? i32Diff: pVBVA->cbData - pVBVA->off32Free;
    409 }
    410 
    411 static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
    412                                     uint32_t cb, uint32_t offset)
    413 {
    414     VBVABUFFER *pVBVA = pCtx->pVBVA;
    415     uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
    416     uint8_t  *dst                 = &pVBVA->au8Data[offset];
    417     int32_t i32Diff               = cb - u32BytesTillBoundary;
    418 
    419     if (i32Diff <= 0)
    420     {
    421         /* Chunk will not cross buffer boundary. */
    422         memcpy (dst, p, cb);
    423     }
    424     else
    425     {
    426         /* Chunk crosses buffer boundary. */
    427         memcpy (dst, p, u32BytesTillBoundary);
    428         memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
    429     }
    430 
    431     return;
    432 }
    433 
    434 static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
    435                               PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
    436                               const void *p, uint32_t cb)
    437 {
    438     VBVARECORD *pRecord;
    439     uint32_t cbHwBufferAvail;
    440 
    441     uint32_t cbWritten = 0;
    442 
    443     VBVABUFFER *pVBVA = pCtx->pVBVA;
    444     Assert(pVBVA);
    445 
    446     if (!pVBVA || pCtx->fHwBufferOverflow)
    447     {
    448         return false;
    449     }
    450 
    451     Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
    452     Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
    453 
    454     pRecord = pCtx->pRecord;
    455     Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
    456 
    457     // LogFunc(("%d\n", cb));
    458 
    459     cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
    460 
    461     while (cb > 0)
    462     {
    463         uint32_t cbChunk = cb;
    464 
    465         // LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
    466         //             pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
    467 
    468         if (cbChunk >= cbHwBufferAvail)
    469         {
    470             LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
    471 
    472             vboxVBVAExFlush(pCtx, pHGSMICtx);
    473 
    474             cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
    475 
    476             if (cbChunk >= cbHwBufferAvail)
    477             {
    478                 WARN(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
    479                             cb, cbHwBufferAvail));
    480 
    481                 if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
    482                 {
    483                     WARN(("Buffer overflow!!!\n"));
    484                     pCtx->fHwBufferOverflow = true;
    485                     Assert(false);
    486                     return false;
    487                 }
    488 
    489                 cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
    490             }
    491         }
    492 
    493         Assert(cbChunk <= cb);
    494         Assert(cbChunk <= vboxHwBufferAvail(pCtx, pVBVA));
    495 
    496         vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
    497 
    498         pVBVA->off32Free   = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
    499         pRecord->cbRecord += cbChunk;
    500         cbHwBufferAvail -= cbChunk;
    501 
    502         cb        -= cbChunk;
    503         cbWritten += cbChunk;
    504     }
    505 
    506     return true;
    507 }
    508 
    509 /*
    510  * Public writer to the hardware buffer.
    511  */
    512 VBVAEX_DECL(uint32_t) VBoxVBVAExGetFreeTail(PVBVAEXBUFFERCONTEXT pCtx)
    513 {
    514     VBVABUFFER *pVBVA = pCtx->pVBVA;
    515     if (pVBVA->off32Data <= pVBVA->off32Free)
    516         return pVBVA->cbData - pVBVA->off32Free;
    517     return 0;
    518 }
    519 
    520 VBVAEX_DECL(void *) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb)
    521 {
    522     VBVARECORD *pRecord;
    523     uint32_t cbHwBufferContiguousAvail;
    524     uint32_t offset;
    525 
    526     VBVABUFFER *pVBVA = pCtx->pVBVA;
    527     Assert(pVBVA);
    528 
    529     if (!pVBVA || pCtx->fHwBufferOverflow)
    530     {
    531         return NULL;
    532     }
    533 
    534     Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
    535     Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
    536 
    537     pRecord = pCtx->pRecord;
    538     Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
    539 
    540     // LogFunc(("%d\n", cb));
    541 
    542     if (pVBVA->cbData < cb)
    543     {
    544         WARN(("requested to allocate buffer of size %d bigger than the VBVA ring buffer size %d", cb, pVBVA->cbData));
    545         return NULL;
    546     }
    547 
    548     cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
    549 
    550     if (cbHwBufferContiguousAvail < cb)
    551     {
    552         if (cb > pVBVA->cbData - pVBVA->off32Free)
    553         {
    554             /* the entire contiguous part is smaller than the requested buffer */
    555             return NULL;
    556         }
    557 
    558         vboxVBVAExFlush(pCtx, pHGSMICtx);
    559 
    560         cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
    561         if (cbHwBufferContiguousAvail < cb)
    562         {
    563             /* this is really bad - the host did not clean up buffer even after we requested it to flush */
    564             WARN(("Host did not clean up the buffer!"));
    565             return NULL;
    566         }
    567     }
    568 
    569     offset = pVBVA->off32Free;
    570 
    571     pVBVA->off32Free = (pVBVA->off32Free + cb) % pVBVA->cbData;
    572     pRecord->cbRecord += cb;
    573 
    574     return &pVBVA->au8Data[offset];
    575 }
    576 
    577 VBVAEX_DECL(bool) VBoxVBVAExIsProcessing(PVBVAEXBUFFERCONTEXT pCtx)
    578 {
    579     uint32_t u32HostEvents = pCtx->pVBVA->hostFlags.u32HostEvents;
    580     return !!(u32HostEvents & VBVA_F_STATE_PROCESSING);
    581 }
    582 
    583 VBVAEX_DECL(void) VBoxVBVAExCBufferCompleted(PVBVAEXBUFFERCONTEXT pCtx)
    584 {
    585     VBVABUFFER *pVBVA = pCtx->pVBVA;
    586     uint32_t cbBuffer = pVBVA->aRecords[pCtx->indexRecordFirstUncompleted].cbRecord;
    587     pCtx->indexRecordFirstUncompleted = (pCtx->indexRecordFirstUncompleted + 1) % VBVA_MAX_RECORDS;
    588     pCtx->off32DataUncompleted = (pCtx->off32DataUncompleted + cbBuffer) % pVBVA->cbData;
    589 }
    590 
    591 VBVAEX_DECL(bool) VBoxVBVAExWrite(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, const void *pv, uint32_t cb)
    592 {
    593     return vboxHwBufferWrite(pCtx, pHGSMICtx, pv, cb);
    594 }
    595 
    596 VBVAEX_DECL(bool) VBoxVBVAExOrderSupported(PVBVAEXBUFFERCONTEXT pCtx, unsigned code)
    597 {
    598     VBVABUFFER *pVBVA = pCtx->pVBVA;
    599 
    600     if (!pVBVA)
    601     {
    602         return false;
    603     }
    604 
    605     if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
    606     {
    607         return true;
    608     }
    609 
    610     return false;
    611 }
    612 
    613 VBVAEX_DECL(void) VBoxVBVAExSetupBufferContext(PVBVAEXBUFFERCONTEXT pCtx, uint32_t offVRAMBuffer, uint32_t cbBuffer,
    614                                                PFNVBVAEXBUFFERFLUSH pfnFlush, void *pvFlush)
    615 {
    616     memset(pCtx, 0, RT_UOFFSETOF(VBVAEXBUFFERCONTEXT, pVBVA));
    617     pCtx->offVRAMBuffer = offVRAMBuffer;
    618     pCtx->cbBuffer      = cbBuffer;
    619     pCtx->pfnFlush = pfnFlush;
    620     pCtx->pvFlush = pvFlush;
    621 }
    622 
    623 static void* vboxVBVAExIterCur(PVBVAEXBUFFERITERBASE pIter, struct VBVABUFFER *pVBVA, uint32_t *pcbBuffer, bool *pfProcessed)
    624 {
    625     uint32_t cbRecord = pVBVA->aRecords[pIter->iCurRecord].cbRecord;
    626     if (cbRecord == VBVA_F_RECORD_PARTIAL)
    627         return NULL;
    628     if (pcbBuffer)
    629         *pcbBuffer = cbRecord;
    630     if (pfProcessed)
    631         *pfProcessed = !vboxVBVAExIsEntryInRange(pVBVA->indexRecordFirst, pIter->iCurRecord, pVBVA->indexRecordFree);
    632     return &pVBVA->au8Data[pIter->off32CurCmd];
    633 }
    634 
    635 DECLINLINE(uint32_t) vboxVBVAExSubst(uint32_t x, uint32_t val, uint32_t maxVal)
    636 {
    637     int32_t result = (int32_t)(x - val);
    638     return result >= 0 ? (uint32_t)result : maxVal - (((uint32_t)(-result)) % maxVal);
    639 }
    640 
    641 VBVAEX_DECL(void) VBoxVBVAExBIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERBACKWARDITER pIter)
    642 {
    643     struct VBVABUFFER *pVBVA = pCtx->pVBVA;
    644     pIter->Base.pCtx = pCtx;
    645     uint32_t iCurRecord = vboxVBVAExSubst(pVBVA->indexRecordFree, 1, VBVA_MAX_RECORDS);
    646     if (vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, iCurRecord, pVBVA->indexRecordFree))
    647     {
    648         /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[iCurRecord].cbRecord below,
    649          * the pCtx->pVBVA->aRecords[iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
    650          * and we are in a submitter context now */
    651         pIter->Base.iCurRecord = iCurRecord;
    652         pIter->Base.off32CurCmd = vboxVBVAExSubst(pVBVA->off32Free, pCtx->pVBVA->aRecords[iCurRecord].cbRecord, pVBVA->cbData);
    653     }
    654     else
    655     {
    656         /* no data */
    657         pIter->Base.iCurRecord = pVBVA->indexRecordFree;
    658         pIter->Base.off32CurCmd = pVBVA->off32Free;
    659     }
    660 }
    661 
    662 VBVAEX_DECL(void *) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
    663 {
    664     PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
    665     struct VBVABUFFER *pVBVA = pCtx->pVBVA;
    666     uint32_t indexRecordFirstUncompleted = pCtx->indexRecordFirstUncompleted;
    667     if (!vboxVBVAExIsEntryInRange(indexRecordFirstUncompleted, pIter->Base.iCurRecord, pVBVA->indexRecordFree))
    668         return NULL;
    669 
    670     void *pvBuffer = vboxVBVAExIterCur(&pIter->Base, pVBVA, pcbBuffer, pfProcessed);
    671     AssertRelease(pvBuffer);
    672 
    673     /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord below,
    674      * the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
    675      * and we are in a submitter context now */
    676     pIter->Base.iCurRecord = vboxVBVAExSubst(pIter->Base.iCurRecord, 1, VBVA_MAX_RECORDS);
    677     pIter->Base.off32CurCmd = vboxVBVAExSubst(pIter->Base.off32CurCmd, pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord, pVBVA->cbData);
    678 
    679     return pvBuffer;
    680 }
    681 
    682 VBVAEX_DECL(void) VBoxVBVAExCFIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERFORWARDITER pIter)
    683 {
    684     pIter->Base.pCtx = pCtx;
    685     pIter->Base.iCurRecord = pCtx->indexRecordFirstUncompleted;
    686     pIter->Base.off32CurCmd = pCtx->off32DataUncompleted;
    687 }
    688 
    689 VBVAEX_DECL(void *) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
    690 {
    691     PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
    692     struct VBVABUFFER *pVBVA = pCtx->pVBVA;
    693     uint32_t indexRecordFree = pVBVA->indexRecordFree;
    694     if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pIter->Base.iCurRecord, indexRecordFree))
    695         return NULL;
    696 
    697     uint32_t cbBuffer;
    698     void *pvData = vboxVBVAExIterCur(&pIter->Base, pVBVA, &cbBuffer, pfProcessed);
    699     if (!pvData)
    700         return NULL;
    701 
    702     pIter->Base.iCurRecord = (pIter->Base.iCurRecord + 1) % VBVA_MAX_RECORDS;
    703     pIter->Base.off32CurCmd = (pIter->Base.off32CurCmd + cbBuffer) % pVBVA->cbData;
    704 
    705     if (pcbBuffer)
    706         *pcbBuffer = cbBuffer;
    707 
    708     return pvData;
    709 }
    710 
    711 /**/
    712 
    713 int VBoxCmdVbvaEnable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
    714 {
    715     return VBoxVBVAExEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->Vbva.pVBVA);
    716 }
    717 
    718 int VBoxCmdVbvaDisable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
    719 {
    720     VBoxVBVAExDisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx);
    721     return VINF_SUCCESS;
    722 }
    723 
    724 int VBoxCmdVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
    725 {
    726     int rc = VINF_SUCCESS;
    727     VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
    728     memset(pVbva, 0, sizeof (*pVbva));
    729     return rc;
    730 }
    731 
    732 static void vboxCmdVbvaDdiNotifyCompleteIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
    733 {
    734     DXGKARGCB_NOTIFY_INTERRUPT_DATA notify;
    735     memset(&notify, 0, sizeof(DXGKARGCB_NOTIFY_INTERRUPT_DATA));
    736     switch (enmComplType)
    737     {
    738         case DXGK_INTERRUPT_DMA_COMPLETED:
    739             notify.InterruptType = DXGK_INTERRUPT_DMA_COMPLETED;
    740             notify.DmaCompleted.SubmissionFenceId = u32FenceId;
    741             notify.DmaCompleted.NodeOrdinal = pVbva->idNode;
    742             break;
    743 
    744         case DXGK_INTERRUPT_DMA_PREEMPTED:
    745             notify.InterruptType = DXGK_INTERRUPT_DMA_PREEMPTED;
    746             notify.DmaPreempted.PreemptionFenceId = u32FenceId;
    747             notify.DmaPreempted.NodeOrdinal = pVbva->idNode;
    748             notify.DmaPreempted.LastCompletedFenceId = pVbva->u32FenceCompleted;
    749             break;
    750 
    751         case DXGK_INTERRUPT_DMA_FAULTED:
    752             Assert(0);
    753             notify.InterruptType = DXGK_INTERRUPT_DMA_FAULTED;
    754             notify.DmaFaulted.FaultedFenceId = u32FenceId;
    755             notify.DmaFaulted.Status = STATUS_UNSUCCESSFUL; /** @todo better status ? */
    756             notify.DmaFaulted.NodeOrdinal = pVbva->idNode;
    757             break;
    758 
    759         default:
    760             WARN(("unrecognized completion type %d", enmComplType));
    761             break;
    762     }
    763 
    764     pDevExt->u.primary.DxgkInterface.DxgkCbNotifyInterrupt(pDevExt->u.primary.DxgkInterface.DeviceHandle, &notify);
    765 }
    766 
    767 static int vboxCmdVbvaFlush(PVBOXMP_DEVEXT pDevExt, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
    768 {
    769     RT_NOREF(pDevExt);
    770 
    771     /* Issue the flush command. */
    772     VBVACMDVBVAFLUSH *pFlush = (VBVACMDVBVAFLUSH*)VBoxHGSMIBufferAlloc(pCtx,
    773                                                                        sizeof(VBVACMDVBVAFLUSH),
    774                                                                        HGSMI_CH_VBVA,
    775                                                                        VBVA_CMDVBVA_FLUSH);
    776     if (!pFlush)
    777     {
    778         WARN(("VBoxHGSMIBufferAlloc failed\n"));
    779         return VERR_OUT_OF_RESOURCES;
    780     }
    781 
    782     pFlush->u32Flags = fBufferOverflow ?  VBVACMDVBVAFLUSH_F_GUEST_BUFFER_OVERFLOW : 0;
    783 
    784     VBoxHGSMIBufferSubmit(pCtx, pFlush);
    785 
    786     VBoxHGSMIBufferFree(pCtx, pFlush);
    787 
    788     return VINF_SUCCESS;
    789 }
    790 
    791 typedef struct VBOXCMDVBVA_CHECK_COMPLETED_CB
    792 {
    793     PVBOXMP_DEVEXT pDevExt;
    794     VBOXCMDVBVA *pVbva;
    795     /* last completted fence id */
    796     uint32_t u32FenceCompleted;
    797     /* last submitted fence id */
    798     uint32_t u32FenceSubmitted;
    799     /* last processed fence id (i.e. either completed or cancelled) */
    800     uint32_t u32FenceProcessed;
    801 } VBOXCMDVBVA_CHECK_COMPLETED_CB;
    802 
    803 static BOOLEAN vboxCmdVbvaCheckCompletedIrqCb(PVOID pContext)
    804 {
    805     VBOXCMDVBVA_CHECK_COMPLETED_CB *pCompleted = (VBOXCMDVBVA_CHECK_COMPLETED_CB*)pContext;
    806     BOOLEAN bRc = DxgkDdiInterruptRoutineNew(pCompleted->pDevExt, 0);
    807     if (pCompleted->pVbva)
    808     {
    809         pCompleted->u32FenceCompleted = pCompleted->pVbva->u32FenceCompleted;
    810         pCompleted->u32FenceSubmitted = pCompleted->pVbva->u32FenceSubmitted;
    811         pCompleted->u32FenceProcessed = pCompleted->pVbva->u32FenceProcessed;
    812     }
    813     else
    814     {
    815         WARN(("no vbva"));
    816         pCompleted->u32FenceCompleted = 0;
    817         pCompleted->u32FenceSubmitted = 0;
    818         pCompleted->u32FenceProcessed = 0;
    819     }
    820     return bRc;
    821 }
    822 
    823 
    824 static uint32_t vboxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow, uint32_t *pu32FenceSubmitted, uint32_t *pu32FenceProcessed)
    825 {
    826     if (fPingHost)
    827         vboxCmdVbvaFlush(pDevExt, pCtx, fBufferOverflow);
    828 
    829     VBOXCMDVBVA_CHECK_COMPLETED_CB context;
    830     context.pDevExt = pDevExt;
    831     context.pVbva = pVbva;
    832     context.u32FenceCompleted = 0;
    833     context.u32FenceSubmitted = 0;
    834     context.u32FenceProcessed = 0;
    835     BOOLEAN bRet;
    836     NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
    837                             pDevExt->u.primary.DxgkInterface.DeviceHandle,
    838                             vboxCmdVbvaCheckCompletedIrqCb,
    839                             &context,
    840                             0, /* IN ULONG MessageNumber */
    841                             &bRet);
    842     AssertNtStatusSuccess(Status);
    843 
    844     if (pu32FenceSubmitted)
    845         *pu32FenceSubmitted = context.u32FenceSubmitted;
    846 
    847     if (pu32FenceProcessed)
    848         *pu32FenceProcessed = context.u32FenceProcessed;
    849 
    850     return context.u32FenceCompleted;
    851 }
    852 
    853 static DECLCALLBACK(void) voxCmdVbvaFlushCb(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush)
    854 {
    855     NOREF(pCtx);
    856     PVBOXMP_DEVEXT pDevExt = (PVBOXMP_DEVEXT)pvFlush;
    857 
    858     vboxCmdVbvaCheckCompleted(pDevExt, NULL,  true /*fPingHost*/, pHGSMICtx, true /*fBufferOverflow*/, NULL, NULL);
    859 }
    860 
    861 int VBoxCmdVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, ULONG offBuffer, ULONG cbBuffer)
    862 {
    863     memset(pVbva, 0, sizeof (*pVbva));
    864 
    865     int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
    866                                        (void**)&pVbva->Vbva.pVBVA,
    867                                        offBuffer,
    868                                        cbBuffer);
    869     if (RT_SUCCESS(rc))
    870     {
    871         Assert(pVbva->Vbva.pVBVA);
    872         VBoxVBVAExSetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer, voxCmdVbvaFlushCb, pDevExt);
    873     }
    874     else
    875     {
    876         WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
    877     }
    878 
    879     return rc;
    880 }
    881 
    882 void VBoxCmdVbvaSubmitUnlock(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, VBOXCMDVBVA_HDR* pCmd, uint32_t u32FenceID)
    883 {
    884     if (u32FenceID)
    885         pVbva->u32FenceSubmitted = u32FenceID;
    886     else
    887         WARN(("no cmd fence specified"));
    888 
    889     pCmd->u8State = VBOXCMDVBVA_STATE_SUBMITTED;
    890 
    891     pCmd->u2.u32FenceID = u32FenceID;
    892 
    893     VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
    894 
    895     if (!VBoxVBVAExIsProcessing(&pVbva->Vbva))
    896     {
    897         /* Issue the submit command. */
    898         HGSMIGUESTCOMMANDCONTEXT *pCtx = &VBoxCommonFromDeviceExt(pDevExt)->guestCtx;
    899         VBVACMDVBVASUBMIT *pSubmit = (VBVACMDVBVASUBMIT*)VBoxHGSMIBufferAlloc(pCtx,
    900                                        sizeof (VBVACMDVBVASUBMIT),
    901                                        HGSMI_CH_VBVA,
    902                                        VBVA_CMDVBVA_SUBMIT);
    903         if (!pSubmit)
    904         {
    905             WARN(("VBoxHGSMIBufferAlloc failed\n"));
    906             return;
    907         }
    908 
    909         pSubmit->u32Reserved = 0;
    910 
    911         VBoxHGSMIBufferSubmit(pCtx, pSubmit);
    912 
    913         VBoxHGSMIBufferFree(pCtx, pSubmit);
    914     }
    915 }
    916 
    917 VBOXCMDVBVA_HDR* VBoxCmdVbvaSubmitLock(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t cbCmd)
    918 {
    919     if (VBoxVBVAExGetSize(&pVbva->Vbva) < cbCmd)
    920     {
    921         WARN(("buffer does not fit the vbva buffer, we do not support splitting buffers"));
    922         return NULL;
    923     }
    924 
    925     if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
    926     {
    927         WARN(("VBoxVBVAExBufferBeginUpdate failed!"));
    928         return NULL;
    929     }
    930 
    931     void* pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
    932     if (!pvBuffer)
    933     {
    934         LOG(("failed to allocate contiguous buffer %d bytes, trying nopping the tail", cbCmd));
    935         uint32_t cbTail = VBoxVBVAExGetFreeTail(&pVbva->Vbva);
    936         if (!cbTail)
    937         {
    938             WARN(("this is not a free tail case, cbTail is NULL"));
    939             return NULL;
    940         }
    941 
    942         Assert(cbTail < cbCmd);
    943 
    944         pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbTail);
    945 
    946         Assert(pvBuffer);
    947 
    948         *((uint8_t*)pvBuffer) = VBOXCMDVBVA_OPTYPE_NOP;
    949 
    950         VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
    951 
    952         if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
    953         {
    954             WARN(("VBoxVBVAExBufferBeginUpdate 2 failed!"));
    955             return NULL;
    956         }
    957 
    958         pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
    959         if (!pvBuffer)
    960         {
    961             WARN(("failed to allocate contiguous buffer %d bytes", cbCmd));
    962             return NULL;
    963         }
    964     }
    965 
    966     Assert(pvBuffer);
    967 
    968     return (VBOXCMDVBVA_HDR*)pvBuffer;
    969 }
    970 
    971 int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t u32FenceID, uint32_t cbCmd)
    972 {
    973     VBOXCMDVBVA_HDR* pHdr = VBoxCmdVbvaSubmitLock(pDevExt, pVbva, cbCmd);
    974 
    975     if (!pHdr)
    976     {
    977         WARN(("VBoxCmdVbvaSubmitLock failed"));
    978         return VERR_GENERAL_FAILURE;
    979     }
    980 
    981     memcpy(pHdr, pCmd, cbCmd);
    982 
    983     VBoxCmdVbvaSubmitUnlock(pDevExt, pVbva, pCmd, u32FenceID);
    984 
    985     return VINF_SUCCESS;
    986 }
    987 
    988 bool VBoxCmdVbvaCheckCompletedIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
    989 {
    990     if (pVbva->Vbva.pVBVA == NULL)
    991         return false;
    992 
    993     VBVAEXBUFFERFORWARDITER Iter;
    994     VBoxVBVAExCFIterInit(&pVbva->Vbva, &Iter);
    995 
    996     bool fHasCommandsCompletedPreempted = false;
    997     bool fProcessed;
    998     uint8_t* pu8Cmd;
    999 
    1000 
    1001     while ((pu8Cmd = (uint8_t*)VBoxVBVAExCFIterNext(&Iter, NULL, &fProcessed)) != NULL)
    1002     {
    1003         if (!fProcessed)
    1004             break;
    1005 
    1006         if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
    1007             continue;
    1008 
    1009         VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
    1010         uint8_t u8State = pCmd->u8State;
    1011         uint32_t u32FenceID = pCmd->u2.u32FenceID;
    1012 
    1013         Assert(u8State == VBOXCMDVBVA_STATE_IN_PROGRESS
    1014                 || u8State == VBOXCMDVBVA_STATE_CANCELLED);
    1015         Assert(u32FenceID);
    1016         VBoxVBVAExCBufferCompleted(&pVbva->Vbva);
    1017 
    1018         if (!u32FenceID)
    1019         {
    1020             WARN(("fence is NULL"));
    1021             continue;
    1022         }
    1023 
    1024         pVbva->u32FenceProcessed = u32FenceID;
    1025 
    1026         if (u8State == VBOXCMDVBVA_STATE_IN_PROGRESS)
    1027             pVbva->u32FenceCompleted = u32FenceID;
    1028         else
    1029         {
    1030             Assert(u8State == VBOXCMDVBVA_STATE_CANCELLED);
    1031             continue;
    1032         }
    1033 
    1034         Assert(u32FenceID);
    1035         vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, u32FenceID, DXGK_INTERRUPT_DMA_COMPLETED);
    1036 
    1037         if (pVbva->cPreempt && pVbva->aPreempt[pVbva->iCurPreempt].u32SubmitFence == u32FenceID)
    1038         {
    1039             Assert(pVbva->aPreempt[pVbva->iCurPreempt].u32PreemptFence);
    1040             vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, pVbva->aPreempt[pVbva->iCurPreempt].u32PreemptFence, DXGK_INTERRUPT_DMA_PREEMPTED);
    1041             --pVbva->cPreempt;
    1042             if (!pVbva->cPreempt)
    1043                 pVbva->iCurPreempt = 0;
    1044             else
    1045             {
    1046                 ++pVbva->iCurPreempt;
    1047                 pVbva->iCurPreempt %= VBOXCMDVBVA_PREEMPT_EL_SIZE;
    1048             }
    1049         }
    1050 
    1051         fHasCommandsCompletedPreempted = true;
    1052     }
    1053 
    1054 #ifdef DEBUG
    1055     vboxHwBufferVerifyCompleted(&pVbva->Vbva);
    1056 #endif
    1057 
    1058     return fHasCommandsCompletedPreempted;
    1059 }
    1060 
    1061 #if 0
    1062 static uint32_t vboxCVDdiSysMemElBuild(VBOXCMDVBVA_SYSMEMEL *pEl, PMDL pMdl, uint32_t iPfn, uint32_t cPages)
    1063 {
    1064     PFN_NUMBER cur = MmGetMdlPfnArray(pMdl)[iPfn];
    1065     uint32_t cbEl = sizeof (*pEl);
    1066     uint32_t cStoredPages = 1;
    1067     PFN_NUMBER next;
    1068     pEl->iPage1 = (uint32_t)(cur & 0xfffff);
    1069     pEl->iPage2 = (uint32_t)(cur >> 20);
    1070     --cPages;
    1071     for ( ; cPages && cStoredPages < VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX; --cPages, ++cStoredPages, cur = next)
    1072     {
    1073         next = MmGetMdlPfnArray(pMdl)[iPfn+cStoredPages];
    1074         if (next != cur+1)
    1075             break;
    1076     }
    1077 
    1078     Assert(cStoredPages);
    1079     pEl->cPagesAfterFirst = cStoredPages - 1;
    1080 
    1081     return cPages;
    1082 }
    1083 
    1084 uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
    1085 {
    1086     uint32_t cInitPages = cPages;
    1087     uint32_t cbInitBuffer = cbBuffer;
    1088     uint32_t cEls = 0;
    1089     VBOXCMDVBVA_SYSMEMEL *pEl = pCmd->aSysMem;
    1090 
    1091     Assert(cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER));
    1092 
    1093     cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aSysMem);
    1094 
    1095     for (; cPages && cbBuffer >= sizeof (VBOXCMDVBVA_SYSMEMEL); ++cEls, cbBuffer-=sizeof (VBOXCMDVBVA_SYSMEMEL), ++pEl)
    1096     {
    1097         cPages = vboxCVDdiSysMemElBuild(pEl, pMdl, iPfn + cInitPages - cPages, cPages);
    1098     }
    1099 
    1100     *pcPagesWritten = cInitPages - cPages;
    1101     return cbInitBuffer - cbBuffer;
    1102 }
    1103 #endif
    1104 
    1105 uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
    1106 {
    1107     uint32_t cbInitBuffer = cbBuffer;
    1108     uint32_t i = 0;
    1109     VBOXCMDVBVAPAGEIDX *pPageNumbers = pCmd->Data.aPageNumbers;
    1110 
    1111     cbBuffer -= RT_UOFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
    1112 
    1113     for (; i < cPages && cbBuffer >= sizeof (*pPageNumbers); ++i, cbBuffer -= sizeof (*pPageNumbers))
    1114     {
    1115         pPageNumbers[i] = (VBOXCMDVBVAPAGEIDX)(MmGetMdlPfnArray(pMdl)[iPfn + i]);
    1116     }
    1117 
    1118     *pcPagesWritten = i;
    1119     Assert(cbInitBuffer - cbBuffer == RT_UOFFSETOF_DYN(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers[i]));
    1120     Assert(cbInitBuffer - cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER));
    1121     return cbInitBuffer - cbBuffer;
    1122 }
    1123 
    1124 
    1125 #endif
  • trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPVbva.h

    r80435 r80483  
    6262        } while (0)
    6363
    64 
    65 #ifdef VBOX_WITH_CROGL
    66 /* customized VBVA implementation */
    67 struct VBVAEXBUFFERCONTEXT;
    68 
    69 typedef DECLCALLBACKPTR(void, PFNVBVAEXBUFFERFLUSH) (struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush);
    70 
    71 /**
    72  * Structure grouping the context needed for sending graphics acceleration
    73  * information to the host via VBVA.  Each screen has its own VBVA buffer.
    74  */
    75 typedef struct VBVAEXBUFFERCONTEXT
    76 {
    77     /** Offset of the buffer in the VRAM section for the screen */
    78     uint32_t    offVRAMBuffer;
    79     /** Length of the buffer in bytes */
    80     uint32_t    cbBuffer;
    81     /** This flag is set if we wrote to the buffer faster than the host could
    82      * read it. */
    83     bool        fHwBufferOverflow;
    84     /* the window between indexRecordFirstUncompleted and pVBVA->::indexRecordFirst represents
    85      * command records processed by the host, but not completed by the guest yet */
    86     volatile uint32_t    indexRecordFirstUncompleted;
    87     /* the window between off32DataUncompleted and pVBVA->::off32Data represents
    88      * command data processed by the host, but not completed by the guest yet */
    89     uint32_t    off32DataUncompleted;
    90     /* flush function */
    91     PFNVBVAEXBUFFERFLUSH pfnFlush;
    92     void *pvFlush;
    93     /** The VBVA record that we are currently preparing for the host, NULL if
    94      * none. */
    95     struct VBVARECORD *pRecord;
    96     /** Pointer to the VBVA buffer mapped into the current address space.  Will
    97      * be NULL if VBVA is not enabled. */
    98     struct VBVABUFFER *pVBVA;
    99 } VBVAEXBUFFERCONTEXT, *PVBVAEXBUFFERCONTEXT;
    100 
    101 typedef struct VBVAEXBUFFERITERBASE
    102 {
    103     struct VBVAEXBUFFERCONTEXT *pCtx;
    104     /* index of the current record */
    105     uint32_t iCurRecord;
    106     /* offset of the current command */
    107     uint32_t off32CurCmd;
    108 } VBVAEXBUFFERITERBASE, *PVBVAEXBUFFERITERBASE;
    109 
    110 typedef struct VBVAEXBUFFERFORWARDITER
    111 {
    112     VBVAEXBUFFERITERBASE Base;
    113 } VBVAEXBUFFERFORWARDITER, *PVBVAEXBUFFERFORWARDITER;
    114 
    115 typedef struct VBVAEXBUFFERBACKWARDITER
    116 {
    117     VBVAEXBUFFERITERBASE Base;
    118 } VBVAEXBUFFERBACKWARDITER, *PVBVAEXBUFFERBACKWARDITER;
    119 
    120 #define VBOXCMDVBVA_BUFFERSIZE(_cbCmdApprox) (RT_OFFSETOF(VBVABUFFER, au8Data) + ((RT_SIZEOFMEMB(VBVABUFFER, aRecords)/RT_SIZEOFMEMB(VBVABUFFER, aRecords[0])) * (_cbCmdApprox)))
    121 
    122 typedef struct VBOXCMDVBVA_PREEMPT_EL
    123 {
    124     uint32_t u32SubmitFence;
    125     uint32_t u32PreemptFence;
    126 } VBOXCMDVBVA_PREEMPT_EL;
    127 
    128 #define VBOXCMDVBVA_PREEMPT_EL_SIZE 16
    129 
    130 typedef struct VBOXCMDVBVA
    131 {
    132     VBVAEXBUFFERCONTEXT Vbva;
    133 
    134     /* last completted fence id */
    135     uint32_t u32FenceCompleted;
    136     /* last submitted fence id */
    137     uint32_t u32FenceSubmitted;
    138     /* last processed fence id (i.e. either completed or cancelled) */
    139     uint32_t u32FenceProcessed;
    140 
    141     /* node ordinal */
    142     uint32_t idNode;
    143 
    144     uint32_t cPreempt;
    145     uint32_t iCurPreempt;
    146     VBOXCMDVBVA_PREEMPT_EL aPreempt[VBOXCMDVBVA_PREEMPT_EL_SIZE];
    147 } VBOXCMDVBVA;
    148 
    149 /** @name VBVAEx APIs
    150  * @{ */
    151 #define VBVAEX_DECL(type) type VBOXCALL
    152 VBVAEX_DECL(int) VBoxVBVAExEnable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, struct VBVABUFFER *pVBVA);
    153 VBVAEX_DECL(void) VBoxVBVAExDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx);
    154 VBVAEX_DECL(bool) VBoxVBVAExBufferBeginUpdate(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx);
    155 VBVAEX_DECL(void) VBoxVBVAExBufferEndUpdate(PVBVAEXBUFFERCONTEXT pCtx);
    156 VBVAEX_DECL(bool) VBoxVBVAExWrite(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, const void *pv, uint32_t cb);
    157 
    158 VBVAEX_DECL(bool) VBoxVBVAExOrderSupported(PVBVAEXBUFFERCONTEXT pCtx, unsigned code);
    159 
    160 VBVAEX_DECL(void) VBoxVBVAExSetupBufferContext(PVBVAEXBUFFERCONTEXT pCtx, uint32_t offVRAMBuffer, uint32_t cbBuffer,
    161                                         PFNVBVAEXBUFFERFLUSH pfnFlush, void *pvFlush);
    162 
    163 DECLINLINE(uint32_t) VBoxVBVAExGetSize(PVBVAEXBUFFERCONTEXT pCtx)
    164 {
    165     return pCtx->pVBVA->cbData;
    166 }
    167 
    168 /** can be used to ensure the command will not cross the ring buffer boundary,
    169  * and thus will not be splitted */
    170 VBVAEX_DECL(uint32_t) VBoxVBVAExGetFreeTail(PVBVAEXBUFFERCONTEXT pCtx);
    171 /** allocates a contiguous buffer of a given size, i.e. the one that is not splitted across ringbuffer boundaries */
    172 VBVAEX_DECL(void *) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb);
    173 /** answers whether host is in "processing" state now,
    174  * i.e. if "processing" is true after the command is submitted, no notification is required to be posted to host to make the commandbe processed,
    175  * otherwise, host should be notified about the command */
    176 VBVAEX_DECL(bool) VBoxVBVAExIsProcessing(PVBVAEXBUFFERCONTEXT pCtx);
    177 
    178 /** initializes iterator that starts with free record,
    179  * i.e. VBoxVBVAExIterNext would return the first uncompleted record.
    180  *
    181  * can be used by submitter only */
    182 VBVAEX_DECL(void) VBoxVBVAExBIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERBACKWARDITER pIter);
    183 /** can be used by submitter only */
    184 VBVAEX_DECL(void *) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed);
    185 
    186 /* completer functions
    187  * completer can only use below ones, and submitter is NOT allowed to use them.
    188  * Completter functions are prefixed with VBoxVBVAExC as opposed to submitter ones,
    189  * that do not have the last "C" in the prefix */
    190 /** initializes iterator that starts with completed record,
    191  * i.e. VBoxVBVAExIterPrev would return the first uncompleted record.
    192  * note that we can not have iterator that starts at processed record
    193  * (i.e. the one processed by host, but not completed by guest, since host modifies
    194  * VBVABUFFER::off32Data and VBVABUFFER::indexRecordFirst concurrently,
    195  * and so we may end up with inconsistent index-offData pair
    196  *
    197  * can be used by completer only */
    198 VBVAEX_DECL(void) VBoxVBVAExCFIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERFORWARDITER pIter);
    199 /** can be used by completer only */
    200 VBVAEX_DECL(void *) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed);
    201 
    202 VBVAEX_DECL(void) VBoxVBVAExCBufferCompleted(PVBVAEXBUFFERCONTEXT pCtx);
    203 /** @}  */
    204 
    205 struct VBOXCMDVBVA_HDR;
    206 
    207 int VBoxCmdVbvaEnable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva);
    208 int VBoxCmdVbvaDisable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva);
    209 int VBoxCmdVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva);
    210 int VBoxCmdVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, ULONG offBuffer, ULONG cbBuffer);
    211 int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t u32FenceID, uint32_t cbCmd);
    212 void VBoxCmdVbvaSubmitUnlock(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, VBOXCMDVBVA_HDR* pCmd, uint32_t u32FenceID);
    213 VBOXCMDVBVA_HDR* VBoxCmdVbvaSubmitLock(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t cbCmd);
    214 bool VBoxCmdVbvaCheckCompletedIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva);
    215 
    216 /*helper functions for filling vbva commands */
    217 DECLINLINE(void) VBoxCVDdiPackRect(VBOXCMDVBVA_RECT *pVbvaRect, const RECT *pRect)
    218 {
    219     pVbvaRect->xLeft = (int16_t)pRect->left;
    220     pVbvaRect->yTop = (int16_t)pRect->top;
    221     pVbvaRect->xRight = (int16_t)pRect->right;
    222     pVbvaRect->yBottom = (int16_t)pRect->bottom;
    223 }
    224 
    225 DECLINLINE(void) VBoxCVDdiPackRects(VBOXCMDVBVA_RECT *paVbvaRects, const RECT *paRects, uint32_t cRects)
    226 {
    227     for (uint32_t i = 0; i < cRects; ++i)
    228     {
    229         VBoxCVDdiPackRect(&paVbvaRects[i], &paRects[i]);
    230     }
    231 
    232 }
    233 
    234 uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten);
    235 
    236 #endif /* #ifdef VBOX_WITH_CROGL */
    237 
    23864#endif /* !GA_INCLUDED_SRC_WINNT_Graphics_Video_mp_wddm_VBoxMPVbva_h */
  • trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPWddm.cpp

    r80482 r80483  
    14311431}
    14321432
    1433 #ifdef VBOX_WITH_CROGL
    1434 BOOLEAN DxgkDdiInterruptRoutineNew(
    1435     IN CONST PVOID MiniportDeviceContext,
    1436     IN ULONG MessageNumber
    1437     )
    1438 {
    1439     RT_NOREF(MessageNumber);
    1440 //    LOGF(("ENTER, context(0x%p), msg(0x%x)", MiniportDeviceContext, MessageNumber));
    1441 
    1442     vboxVDbgBreakFv();
    1443 
    1444     PVBOXMP_DEVEXT pDevExt = (PVBOXMP_DEVEXT)MiniportDeviceContext;
    1445     BOOLEAN bOur = FALSE;
    1446     bool bNeedDpc = FALSE;
    1447     if (!VBoxCommonFromDeviceExt(pDevExt)->hostCtx.pfHostFlags) /* If HGSMI is enabled at all. */
    1448     {
    1449         WARN(("ISR called with hgsmi disabled!"));
    1450         return FALSE;
    1451     }
    1452 
    1453     VBOXVTLIST CtlList;
    1454     vboxVtListInit(&CtlList);
    1455 #ifdef VBOX_WITH_VIDEOHWACCEL
    1456     VBOXVTLIST VhwaCmdList;
    1457     vboxVtListInit(&VhwaCmdList);
    1458 #endif
    1459 
    1460     uint32_t flags = VBoxCommonFromDeviceExt(pDevExt)->hostCtx.pfHostFlags->u32HostFlags;
    1461     bOur = RT_BOOL(flags & HGSMIHOSTFLAGS_IRQ);
    1462 
    1463     if (bOur)
    1464         VBoxHGSMIClearIrq(&VBoxCommonFromDeviceExt(pDevExt)->hostCtx);
    1465 
    1466     bNeedDpc |= VBoxCmdVbvaCheckCompletedIrq(pDevExt, &pDevExt->CmdVbva);
    1467 
    1468     do {
    1469         /* re-read flags right here to avoid host-guest racing,
    1470          * i.e. the situation:
    1471          * 1. guest reads flags ant it is HGSMIHOSTFLAGS_IRQ, i.e. HGSMIHOSTFLAGS_GCOMMAND_COMPLETED no set
    1472          * 2. host completes guest command, sets the HGSMIHOSTFLAGS_GCOMMAND_COMPLETED and raises IRQ
    1473          * 3. guest clleans IRQ and exits  */
    1474         flags = VBoxCommonFromDeviceExt(pDevExt)->hostCtx.pfHostFlags->u32HostFlags;
    1475 
    1476         if (flags & HGSMIHOSTFLAGS_GCOMMAND_COMPLETED)
    1477         {
    1478             /* read the command offset */
    1479             HGSMIOFFSET offCmd = VBVO_PORT_READ_U32(VBoxCommonFromDeviceExt(pDevExt)->guestCtx.port);
    1480             if (offCmd == HGSMIOFFSET_VOID)
    1481             {
    1482                 WARN(("void command offset!"));
    1483                 continue;
    1484             }
    1485 
    1486             uint16_t chInfo;
    1487             uint8_t RT_UNTRUSTED_VOLATILE_HOST *pvCmd =
    1488                 HGSMIBufferDataAndChInfoFromOffset(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx.heapCtx.Heap.area, offCmd, &chInfo);
    1489             if (!pvCmd)
    1490             {
    1491                 WARN(("zero cmd"));
    1492                 continue;
    1493             }
    1494 
    1495             switch (chInfo)
    1496             {
    1497                 case VBVA_CMDVBVA_CTL:
    1498                 {
    1499                     int rc = VBoxSHGSMICommandProcessCompletion(&VBoxCommonFromDeviceExt(pDevExt)->guestCtx.heapCtx,
    1500                                                                 (VBOXSHGSMIHEADER *)pvCmd, TRUE /*bool bIrq*/ , &CtlList);
    1501                     AssertRC(rc);
    1502                     break;
    1503                 }
    1504 #ifdef VBOX_WITH_VIDEOHWACCEL
    1505                 case VBVA_VHWA_CMD:
    1506                 {
    1507                     vboxVhwaPutList(&VhwaCmdList, (VBOXVHWACMD*)pvCmd);
    1508                     break;
    1509                 }
    1510 #endif /* # ifdef VBOX_WITH_VIDEOHWACCEL */
    1511                 default:
    1512                     AssertBreakpoint();
    1513             }
    1514         }
    1515         else if (flags & HGSMIHOSTFLAGS_COMMANDS_PENDING)
    1516         {
    1517             AssertBreakpoint();
    1518             /** @todo FIXME: implement !!! */
    1519         }
    1520         else
    1521             break;
    1522     } while (1);
    1523 
    1524     if (!vboxVtListIsEmpty(&CtlList))
    1525     {
    1526         vboxVtListCat(&pDevExt->CtlList, &CtlList);
    1527         bNeedDpc = TRUE;
    1528         ASMAtomicWriteU32(&pDevExt->fCompletingCommands, 1);
    1529     }
    1530 
    1531     if (!vboxVtListIsEmpty(&VhwaCmdList))
    1532     {
    1533         vboxVtListCat(&pDevExt->VhwaCmdList, &VhwaCmdList);
    1534         bNeedDpc = TRUE;
    1535         ASMAtomicWriteU32(&pDevExt->fCompletingCommands, 1);
    1536     }
    1537 
    1538     bNeedDpc |= !vboxVdmaDdiCmdIsCompletedListEmptyIsr(pDevExt);
    1539 
    1540     if (bOur)
    1541     {
    1542         if (flags & HGSMIHOSTFLAGS_VSYNC)
    1543         {
    1544             Assert(0);
    1545             DXGKARGCB_NOTIFY_INTERRUPT_DATA notify;
    1546             for (UINT i = 0; i < (UINT)VBoxCommonFromDeviceExt(pDevExt)->cDisplays; ++i)
    1547             {
    1548                 PVBOXWDDM_TARGET pTarget = &pDevExt->aTargets[i];
    1549                 if (pTarget->fConnected)
    1550                 {
    1551                     memset(&notify, 0, sizeof(DXGKARGCB_NOTIFY_INTERRUPT_DATA));
    1552                     notify.InterruptType = DXGK_INTERRUPT_CRTC_VSYNC;
    1553                     notify.CrtcVsync.VidPnTargetId = i;
    1554                     pDevExt->u.primary.DxgkInterface.DxgkCbNotifyInterrupt(pDevExt->u.primary.DxgkInterface.DeviceHandle, &notify);
    1555                     bNeedDpc = TRUE;
    1556                 }
    1557             }
    1558         }
    1559     }
    1560 
    1561     if (pDevExt->bNotifyDxDpc)
    1562         bNeedDpc = TRUE;
    1563 
    1564     if (bNeedDpc)
    1565         pDevExt->u.primary.DxgkInterface.DxgkCbQueueDpc(pDevExt->u.primary.DxgkInterface.DeviceHandle);
    1566 
    1567     return bOur;
    1568 }
    1569 #endif
    1570 
    15711433static BOOLEAN DxgkDdiInterruptRoutineLegacy(
    15721434    IN CONST PVOID MiniportDeviceContext,
     
    29502812#ifdef VBOX_WITH_CROGL
    29512813    if (pDevExt->fCmdVbvaEnabled)
    2952         return DxgkDdiInterruptRoutineNew(pDevExt, pdc->MessageNumber);
     2814    {
     2815        AssertFailed(); /* Should not be here, because this is not used with 3D gallium driver. */
     2816        return FALSE;
     2817    }
    29532818#endif
    29542819    return DxgkDdiInterruptRoutineLegacy(pDevExt, pdc->MessageNumber);
  • trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPWddm.h

    r80422 r80483  
    193193bool vboxWddmGhDisplayCheckSetInfoFromSource(PVBOXMP_DEVEXT pDevExt, PVBOXWDDM_SOURCE pSource);
    194194
    195 #ifdef VBOX_WITH_CROGL
    196 BOOLEAN DxgkDdiInterruptRoutineNew(
    197     IN CONST PVOID MiniportDeviceContext,
    198     IN ULONG MessageNumber
    199     );
    200 #endif
    201 
    202195#define VBOXWDDM_IS_DISPLAYONLY() (g_VBoxDisplayOnly)
    203196
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette