VirtualBox

Changeset 49365 in vbox for trunk/src/VBox/Additions/WINNT


Ignore:
Timestamp:
Nov 1, 2013 3:15:27 PM (11 years ago)
Author:
vboxsync
Message:

wddm: basics for ring buffer-based command submission

Location:
trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPVbva.cpp

    r49332 r49365  
    4949                                       offBuffer,
    5050                                       cbBuffer);
    51     AssertRC(rc);
    5251    if (RT_SUCCESS(rc))
    5352    {
     
    5655        pVbva->srcId = srcId;
    5756    }
     57    else
     58    {
     59        WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
     60    }
     61
    5862
    5963    return rc;
     
    9498}
    9599
    96 #ifdef VBOXVDMA_WITH_VBVA
    97 int vboxVbvaReportCmdOffset (PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva, uint32_t offCmd)
    98 {
    99     VBOXVDMAVBVACMD cmd;
    100     cmd.offCmd = offCmd;
    101     return vboxWrite (pDevExt, pVbva, &cmd, sizeof(cmd));
    102 }
     100/* command vbva ring buffer */
     101
     102/* customized VBVA implementation */
     103
     104/* Forward declarations of internal functions. */
     105static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
     106                                    uint32_t cb, uint32_t offset);
     107static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
     108                              PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
     109                              const void *p, uint32_t cb);
     110
     111DECLINLINE(void) vboxVBVAExFlush(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
     112{
     113    pCtx->pfnFlush(pCtx, pHGSMICtx, pCtx->pvFlush);
     114}
     115
     116static bool vboxVBVAExInformHost(PVBVAEXBUFFERCONTEXT pCtx,
     117                               PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, bool bEnable)
     118{
     119    bool bRc = false;
     120
     121#if 0  /* All callers check this */
     122    if (ppdev->bHGSMISupported)
    103123#endif
     124    {
     125        void *p = VBoxHGSMIBufferAlloc(pHGSMICtx,
     126                                       sizeof (VBVAENABLE_EX),
     127                                       HGSMI_CH_VBVA,
     128                                       pCtx->u16EnableOp);
     129        if (!p)
     130        {
     131            LogFunc(("HGSMIHeapAlloc failed\n"));
     132        }
     133        else
     134        {
     135            VBVAENABLE_EX *pEnable = (VBVAENABLE_EX *)p;
     136
     137            pEnable->Base.u32Flags  = bEnable? VBVA_F_ENABLE: VBVA_F_DISABLE;
     138            pEnable->Base.u32Offset = pCtx->offVRAMBuffer;
     139            pEnable->Base.i32Result = VERR_NOT_SUPPORTED;
     140            pEnable->Base.u32Flags |= VBVA_F_ABSOFFSET;
     141
     142            VBoxHGSMIBufferSubmit(pHGSMICtx, p);
     143
     144            if (bEnable)
     145            {
     146                bRc = RT_SUCCESS(pEnable->Base.i32Result);
     147            }
     148            else
     149            {
     150                bRc = true;
     151            }
     152
     153            VBoxHGSMIBufferFree(pHGSMICtx, p);
     154        }
     155    }
     156
     157    return bRc;
     158}
     159
     160/*
     161 * Public hardware buffer methods.
     162 */
     163RTDECL(bool) VBoxVBVAExEnable(PVBVAEXBUFFERCONTEXT pCtx,
     164                            PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
     165                            VBVABUFFER *pVBVA)
     166{
     167    bool bRc = false;
     168
     169    LogFlowFunc(("pVBVA %p\n", pVBVA));
     170
     171#if 0  /* All callers check this */
     172    if (ppdev->bHGSMISupported)
     173#endif
     174    {
     175        LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
     176
     177        pVBVA->hostFlags.u32HostEvents      = 0;
     178        pVBVA->hostFlags.u32SupportedOrders = 0;
     179        pVBVA->off32Data          = 0;
     180        pVBVA->off32Free          = 0;
     181        memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
     182        pVBVA->indexRecordFirst   = 0;
     183        pVBVA->indexRecordFree    = 0;
     184        pVBVA->cbPartialWriteThreshold = 256;
     185        pVBVA->cbData             = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
     186
     187        pCtx->fHwBufferOverflow = false;
     188        pCtx->pRecord    = NULL;
     189        pCtx->pVBVA      = pVBVA;
     190
     191        bRc = vboxVBVAExInformHost(pCtx, pHGSMICtx, true);
     192    }
     193
     194    if (!bRc)
     195    {
     196        VBoxVBVAExDisable(pCtx, pHGSMICtx);
     197    }
     198
     199    return bRc;
     200}
     201
     202RTDECL(void) VBoxVBVAExDisable(PVBVAEXBUFFERCONTEXT pCtx,
     203                             PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
     204{
     205    LogFlowFunc(("\n"));
     206
     207    pCtx->fHwBufferOverflow = false;
     208    pCtx->pRecord           = NULL;
     209    pCtx->pVBVA             = NULL;
     210
     211    vboxVBVAExInformHost(pCtx, pHGSMICtx, false);
     212
     213    return;
     214}
     215
     216RTDECL(bool) VBoxVBVAExBufferBeginUpdate(PVBVAEXBUFFERCONTEXT pCtx,
     217                                       PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
     218{
     219    bool bRc = false;
     220
     221    // LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
     222
     223    if (   pCtx->pVBVA
     224        && (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
     225    {
     226        uint32_t indexRecordNext;
     227
     228        Assert(!pCtx->fHwBufferOverflow);
     229        Assert(pCtx->pRecord == NULL);
     230
     231        indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
     232
     233        if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
     234        {
     235            /* All slots in the records queue are used. */
     236            vboxVBVAExFlush (pCtx, pHGSMICtx);
     237        }
     238
     239        if (indexRecordNext == pCtx->indexRecordFirstUncompleted)
     240        {
     241            /* Even after flush there is no place. Fail the request. */
     242            LogFunc(("no space in the queue of records!!! first %d, last %d\n",
     243                    indexRecordNext, pCtx->pVBVA->indexRecordFree));
     244        }
     245        else
     246        {
     247            /* Initialize the record. */
     248            VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
     249
     250            pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
     251
     252            pCtx->pVBVA->indexRecordFree = indexRecordNext;
     253
     254            // LogFunc(("indexRecordNext = %d\n", indexRecordNext));
     255
     256            /* Remember which record we are using. */
     257            pCtx->pRecord = pRecord;
     258
     259            bRc = true;
     260        }
     261    }
     262
     263    return bRc;
     264}
     265
     266RTDECL(void) VBoxVBVAExBufferEndUpdate(PVBVAEXBUFFERCONTEXT pCtx)
     267{
     268    VBVARECORD *pRecord;
     269
     270    // LogFunc(("\n"));
     271
     272    Assert(pCtx->pVBVA);
     273
     274    pRecord = pCtx->pRecord;
     275    Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
     276
     277    /* Mark the record completed. */
     278    pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
     279
     280    pCtx->fHwBufferOverflow = false;
     281    pCtx->pRecord = NULL;
     282
     283    return;
     284}
     285
     286DECLINLINE(bool) vboxVBVAExIsEntryInRange(uint32_t u32First, uint32_t u32Entry, uint32_t u32Free)
     287{
     288    return (     u32First != u32Free
     289             && (
     290                     (u32First < u32Free && u32Entry >= u32First && u32Entry < u32Free)
     291                  || (u32First > u32Free && (u32Entry >= u32First || u32Entry < u32Free))
     292                 )
     293           );
     294}
     295
     296#ifdef DEBUG
     297
     298DECLINLINE(void) vboxHwBufferVerifyCompleted(PVBVAEXBUFFERCONTEXT pCtx)
     299{
     300    VBVABUFFER *pVBVA = pCtx->pVBVA;
     301    if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pVBVA->indexRecordFirst, pVBVA->indexRecordFree))
     302    {
     303        WARN(("invalid record set"));
     304    }
     305
     306    if (!vboxVBVAExIsEntryInRange(pCtx->off32DataUncompleted, pVBVA->off32Data, pVBVA->off32Free))
     307    {
     308        WARN(("invalid data set"));
     309    }
     310}
     311#endif
     312
     313/*
     314 * Private operations.
     315 */
     316static uint32_t vboxHwBufferAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
     317{
     318    int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
     319
     320    return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
     321}
     322
     323static uint32_t vboxHwBufferContiguousAvail(PVBVAEXBUFFERCONTEXT pCtx, const VBVABUFFER *pVBVA)
     324{
     325    int32_t i32Diff = pCtx->off32DataUncompleted - pVBVA->off32Free;
     326
     327    return i32Diff > 0 ? i32Diff: pVBVA->cbData - pVBVA->off32Free;
     328}
     329
     330static void vboxHwBufferPlaceDataAt(PVBVAEXBUFFERCONTEXT pCtx, const void *p,
     331                                    uint32_t cb, uint32_t offset)
     332{
     333    VBVABUFFER *pVBVA = pCtx->pVBVA;
     334    uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
     335    uint8_t  *dst                 = &pVBVA->au8Data[offset];
     336    int32_t i32Diff               = cb - u32BytesTillBoundary;
     337
     338    if (i32Diff <= 0)
     339    {
     340        /* Chunk will not cross buffer boundary. */
     341        memcpy (dst, p, cb);
     342    }
     343    else
     344    {
     345        /* Chunk crosses buffer boundary. */
     346        memcpy (dst, p, u32BytesTillBoundary);
     347        memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
     348    }
     349
     350    return;
     351}
     352
     353static bool vboxHwBufferWrite(PVBVAEXBUFFERCONTEXT pCtx,
     354                              PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
     355                              const void *p, uint32_t cb)
     356{
     357    VBVARECORD *pRecord;
     358    uint32_t cbHwBufferAvail;
     359
     360    uint32_t cbWritten = 0;
     361
     362    VBVABUFFER *pVBVA = pCtx->pVBVA;
     363    Assert(pVBVA);
     364
     365    if (!pVBVA || pCtx->fHwBufferOverflow)
     366    {
     367        return false;
     368    }
     369
     370    Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
     371    Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
     372
     373    pRecord = pCtx->pRecord;
     374    Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
     375
     376    // LogFunc(("%d\n", cb));
     377
     378    cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
     379
     380    while (cb > 0)
     381    {
     382        uint32_t cbChunk = cb;
     383
     384        // LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
     385        //             pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
     386
     387        if (cbChunk >= cbHwBufferAvail)
     388        {
     389            LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
     390
     391            vboxVBVAExFlush(pCtx, pHGSMICtx);
     392
     393            cbHwBufferAvail = vboxHwBufferAvail(pCtx, pVBVA);
     394
     395            if (cbChunk >= cbHwBufferAvail)
     396            {
     397                WARN(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
     398                            cb, cbHwBufferAvail));
     399
     400                if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
     401                {
     402                    WARN(("Buffer overflow!!!\n"));
     403                    pCtx->fHwBufferOverflow = true;
     404                    Assert(false);
     405                    return false;
     406                }
     407
     408                cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
     409            }
     410        }
     411
     412        Assert(cbChunk <= cb);
     413        Assert(cbChunk <= vboxHwBufferAvail(pCtx, pVBVA));
     414
     415        vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
     416
     417        pVBVA->off32Free   = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
     418        pRecord->cbRecord += cbChunk;
     419        cbHwBufferAvail -= cbChunk;
     420
     421        cb        -= cbChunk;
     422        cbWritten += cbChunk;
     423    }
     424
     425    return true;
     426}
     427
     428/*
     429 * Public writer to the hardware buffer.
     430 */
     431RTDECL(uint32_t) VBoxVBVAExGetFreeTail(PVBVAEXBUFFERCONTEXT pCtx)
     432{
     433    VBVABUFFER *pVBVA = pCtx->pVBVA;
     434    if (pVBVA->off32Data <= pVBVA->off32Free)
     435        return pVBVA->cbData - pVBVA->off32Free;
     436    return 0;
     437}
     438
     439RTDECL(void*) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb)
     440{
     441    VBVARECORD *pRecord;
     442    uint32_t cbHwBufferContiguousAvail;
     443    uint32_t offset;
     444
     445    VBVABUFFER *pVBVA = pCtx->pVBVA;
     446    Assert(pVBVA);
     447
     448    if (!pVBVA || pCtx->fHwBufferOverflow)
     449    {
     450        return NULL;
     451    }
     452
     453    Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
     454    Assert(pCtx->indexRecordFirstUncompleted != pVBVA->indexRecordFree);
     455
     456    pRecord = pCtx->pRecord;
     457    Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
     458
     459    // LogFunc(("%d\n", cb));
     460
     461    if (pVBVA->cbData < cb)
     462    {
     463        WARN(("requested to allocate buffer of size %d bigger than the VBVA ring buffer size %d", cb, pVBVA->cbData));
     464        return NULL;
     465    }
     466
     467    cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
     468
     469    if (cbHwBufferContiguousAvail < cb)
     470    {
     471        if (cb < pVBVA->cbData - pVBVA->off32Free)
     472        {
     473            /* the entire contiguous part is smaller than the requested buffer */
     474            return NULL;
     475        }
     476
     477        vboxVBVAExFlush(pCtx, pHGSMICtx);
     478
     479        cbHwBufferContiguousAvail = vboxHwBufferContiguousAvail(pCtx, pVBVA);
     480        if (cbHwBufferContiguousAvail < cb)
     481        {
     482            /* this is really bad - the host did not clean up buffer even after we requested it to flush */
     483            WARN(("Host did not clean up the buffer!"));
     484            return NULL;
     485        }
     486    }
     487
     488    offset = pVBVA->off32Free;
     489
     490    pVBVA->off32Free = (pVBVA->off32Free + cb) % pVBVA->cbData;
     491    pRecord->cbRecord += cb;
     492
     493    return &pVBVA->au8Data[offset];
     494}
     495
     496RTDECL(bool) VBoxVBVAExIsProcessing(PVBVAEXBUFFERCONTEXT pCtx)
     497{
     498    uint32_t u32HostEvents = pCtx->pVBVA->hostFlags.u32HostEvents;
     499    return !!(u32HostEvents & VBVA_F_STATE_PROCESSING);
     500}
     501
     502RTDECL(void) VBoxVBVAExCBufferCompleted(PVBVAEXBUFFERCONTEXT pCtx)
     503{
     504    VBVABUFFER *pVBVA = pCtx->pVBVA;
     505    uint32_t cbBuffer = pVBVA->aRecords[pCtx->indexRecordFirstUncompleted].cbRecord;
     506    pCtx->indexRecordFirstUncompleted = (pCtx->indexRecordFirstUncompleted + 1) % VBVA_MAX_RECORDS;
     507    pCtx->off32DataUncompleted = (pCtx->off32DataUncompleted + cbBuffer) % pVBVA->cbData;
     508#ifdef DEBUG
     509    vboxHwBufferVerifyCompleted(pCtx);
     510#endif
     511}
     512
     513RTDECL(bool) VBoxVBVAExWrite(PVBVAEXBUFFERCONTEXT pCtx,
     514                           PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
     515                           const void *pv, uint32_t cb)
     516{
     517    return vboxHwBufferWrite(pCtx, pHGSMICtx, pv, cb);
     518}
     519
     520RTDECL(bool) VBoxVBVAExOrderSupported(PVBVAEXBUFFERCONTEXT pCtx, unsigned code)
     521{
     522    VBVABUFFER *pVBVA = pCtx->pVBVA;
     523
     524    if (!pVBVA)
     525    {
     526        return false;
     527    }
     528
     529    if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
     530    {
     531        return true;
     532    }
     533
     534    return false;
     535}
     536
     537RTDECL(void) VBoxVBVAExSetupBufferContext(PVBVAEXBUFFERCONTEXT pCtx,
     538                                        uint32_t offVRAMBuffer,
     539                                        uint32_t cbBuffer,
     540                                        PFNVBVAEXBUFFERFLUSH pfnFlush,
     541                                        void *pvFlush,
     542                                        uint16_t u16EnableOp)
     543{
     544    memset(pCtx, 0, RT_OFFSETOF(VBVAEXBUFFERCONTEXT, pVBVA));
     545    pCtx->offVRAMBuffer = offVRAMBuffer;
     546    pCtx->cbBuffer      = cbBuffer;
     547    pCtx->u16EnableOp   = u16EnableOp;
     548    pCtx->pfnFlush = pfnFlush;
     549    pCtx->pvFlush = pvFlush;
     550}
     551
     552static void* vboxVBVAExIterCur(PVBVAEXBUFFERITERBASE pIter, struct VBVABUFFER *pVBVA, uint32_t *pcbBuffer, bool *pfProcessed)
     553{
     554    uint32_t cbRecord = pVBVA->aRecords[pIter->iCurRecord].cbRecord;
     555    if (cbRecord == VBVA_F_RECORD_PARTIAL)
     556        return NULL;
     557    if (pcbBuffer)
     558        *pcbBuffer = cbRecord;
     559    if (pfProcessed)
     560        *pfProcessed = !vboxVBVAExIsEntryInRange(pVBVA->indexRecordFirst, pIter->iCurRecord, pVBVA->indexRecordFree);
     561    return &pVBVA->au8Data[pIter->off32CurCmd];
     562}
     563
     564DECLINLINE(uint32_t) vboxVBVAExSubst(uint32_t x, uint32_t val, uint32_t maxVal)
     565{
     566    int32_t result = (int32_t)(x - val);
     567    return result >= 0 ? (uint32_t)result : maxVal - (((uint32_t)(-result)) % maxVal);
     568}
     569
     570RTDECL(void) VBoxVBVAExBIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERBACKWARDITER pIter)
     571{
     572    struct VBVABUFFER *pVBVA = pCtx->pVBVA;
     573    pIter->Base.pCtx = pCtx;
     574    uint32_t iCurRecord = vboxVBVAExSubst(pVBVA->indexRecordFree, 1, VBVA_MAX_RECORDS);
     575    if (vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, iCurRecord, pVBVA->indexRecordFree))
     576    {
     577        /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[iCurRecord].cbRecord below,
     578         * the pCtx->pVBVA->aRecords[iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
     579         * and we are in a submitter context now */
     580        pIter->Base.iCurRecord = iCurRecord;
     581        pIter->Base.off32CurCmd = vboxVBVAExSubst(pVBVA->off32Free, pCtx->pVBVA->aRecords[iCurRecord].cbRecord, pVBVA->cbData);
     582    }
     583    else
     584    {
     585        /* no data */
     586        pIter->Base.iCurRecord = pVBVA->indexRecordFree;
     587        pIter->Base.off32CurCmd = pVBVA->off32Free;
     588    }
     589}
     590
     591RTDECL(void*) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
     592{
     593    PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
     594    struct VBVABUFFER *pVBVA = pCtx->pVBVA;
     595    uint32_t indexRecordFirstUncompleted = pCtx->indexRecordFirstUncompleted;
     596    if (!vboxVBVAExIsEntryInRange(indexRecordFirstUncompleted, pIter->Base.iCurRecord, pVBVA->indexRecordFree))
     597        return NULL;
     598
     599    void *pvBuffer = vboxVBVAExIterCur(&pIter->Base, pVBVA, pcbBuffer, pfProcessed);
     600    AssertRelease(pvBuffer);
     601
     602    /* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord below,
     603     * the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
     604     * and we are in a submitter context now */
     605    pIter->Base.iCurRecord = vboxVBVAExSubst(pIter->Base.iCurRecord, 1, VBVA_MAX_RECORDS);
     606    pIter->Base.off32CurCmd = vboxVBVAExSubst(pIter->Base.off32CurCmd, pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord, pVBVA->cbData);
     607
     608    return pvBuffer;
     609}
     610
     611RTDECL(void) VBoxVBVAExCFIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERFORWARDITER pIter)
     612{
     613    pIter->Base.pCtx = pCtx;
     614    pIter->Base.iCurRecord = pCtx->indexRecordFirstUncompleted;
     615    pIter->Base.off32CurCmd = pCtx->off32DataUncompleted;
     616}
     617
     618RTDECL(void*) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
     619{
     620    PVBVAEXBUFFERCONTEXT pCtx = pIter->Base.pCtx;
     621    struct VBVABUFFER *pVBVA = pCtx->pVBVA;
     622    uint32_t indexRecordFree = pVBVA->indexRecordFree;
     623    if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pIter->Base.iCurRecord, indexRecordFree))
     624        return NULL;
     625
     626    uint32_t cbBuffer;
     627    void *pvData = vboxVBVAExIterCur(&pIter->Base, pVBVA, &cbBuffer, pfProcessed);
     628    if (!pvData)
     629        return NULL;
     630
     631    pIter->Base.iCurRecord = (pIter->Base.iCurRecord + 1) % VBVA_MAX_RECORDS;
     632    pIter->Base.off32CurCmd = (pIter->Base.off32CurCmd + cbBuffer) % pVBVA->cbData;
     633
     634    if (pcbBuffer)
     635        *pcbBuffer = cbBuffer;
     636
     637    return pvData;
     638}
     639
     640/**/
     641
     642int VBoxCmdVbvaEnable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
     643{
     644    if (VBoxVBVAExEnable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, pVbva->Vbva.pVBVA))
     645        return VINF_SUCCESS;
     646
     647    WARN(("VBoxVBVAExEnable failed!"));
     648    return VERR_GENERAL_FAILURE;
     649}
     650
     651int VBoxCmdVbvaDisable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
     652{
     653    VBoxVBVAExDisable(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx);
     654    return VINF_SUCCESS;
     655}
     656
     657int VBoxCmdVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
     658{
     659    int rc = VINF_SUCCESS;
     660    VBoxMPCmnUnmapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt), (void**)&pVbva->Vbva.pVBVA);
     661    memset(pVbva, 0, sizeof (*pVbva));
     662    return rc;
     663}
     664
     665static void vboxCmdVbvaDdiNotifyCompleteIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
     666{
     667    DXGKARGCB_NOTIFY_INTERRUPT_DATA notify;
     668    memset(&notify, 0, sizeof(DXGKARGCB_NOTIFY_INTERRUPT_DATA));
     669    switch (enmComplType)
     670    {
     671        case DXGK_INTERRUPT_DMA_COMPLETED:
     672            notify.InterruptType = DXGK_INTERRUPT_DMA_COMPLETED;
     673            notify.DmaCompleted.SubmissionFenceId = u32FenceId;
     674            notify.DmaCompleted.NodeOrdinal = pVbva->idNode;
     675            break;
     676
     677        case DXGK_INTERRUPT_DMA_PREEMPTED:
     678            notify.InterruptType = DXGK_INTERRUPT_DMA_PREEMPTED;
     679            notify.DmaPreempted.PreemptionFenceId = u32FenceId;
     680            notify.DmaPreempted.NodeOrdinal = pVbva->idNode;
     681            notify.DmaPreempted.LastCompletedFenceId = pVbva->u32FenceCompleted;
     682            break;
     683
     684        case DXGK_INTERRUPT_DMA_FAULTED:
     685            Assert(0);
     686            notify.InterruptType = DXGK_INTERRUPT_DMA_FAULTED;
     687            notify.DmaFaulted.FaultedFenceId = u32FenceId;
     688            notify.DmaFaulted.Status = STATUS_UNSUCCESSFUL; /* @todo: better status ? */
     689            notify.DmaFaulted.NodeOrdinal = pVbva->idNode;
     690            break;
     691
     692        default:
     693            WARN(("unrecognized completion type %d", enmComplType));
     694            break;
     695    }
     696
     697    pDevExt->u.primary.DxgkInterface.DxgkCbNotifyInterrupt(pDevExt->u.primary.DxgkInterface.DeviceHandle, &notify);
     698}
     699
     700typedef struct VBOXCMDVBVA_NOTIFYCOMPLETED_CB
     701{
     702    PVBOXMP_DEVEXT pDevExt;
     703    VBOXCMDVBVA *pVbva;
     704    UINT u32FenceId;
     705    DXGK_INTERRUPT_TYPE enmComplType;
     706} VBOXCMDVBVA_NOTIFYCOMPLETED_CB, *PVBOXCMDVBVA_NOTIFYCOMPLETED_CB;
     707
     708static BOOLEAN vboxCmdVbvaDdiNotifyCompleteCb(PVOID pvContext)
     709{
     710    PVBOXCMDVBVA_NOTIFYCOMPLETED_CB pData = (PVBOXCMDVBVA_NOTIFYCOMPLETED_CB)pvContext;
     711    vboxCmdVbvaDdiNotifyCompleteIrq(pData->pDevExt, pData->pVbva, pData->u32FenceId, pData->enmComplType);
     712
     713    pData->pDevExt->u.primary.DxgkInterface.DxgkCbQueueDpc(pData->pDevExt->u.primary.DxgkInterface.DeviceHandle);
     714    return TRUE;
     715}
     716
     717static int vboxCmdVbvaDdiNotifyComplete(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
     718{
     719    VBOXCMDVBVA_NOTIFYCOMPLETED_CB Data;
     720    Data.pDevExt = pDevExt;
     721    Data.pVbva = pVbva;
     722    Data.u32FenceId = u32FenceId;
     723    Data.enmComplType = enmComplType;
     724    BOOLEAN bDummy;
     725    NTSTATUS Status = pDevExt->u.primary.DxgkInterface.DxgkCbSynchronizeExecution(
     726            pDevExt->u.primary.DxgkInterface.DeviceHandle,
     727            vboxCmdVbvaDdiNotifyCompleteCb,
     728            &Data,
     729            0, /* IN ULONG MessageNumber */
     730            &bDummy);
     731    if (!NT_SUCCESS(Status))
     732    {
     733        WARN(("DxgkCbSynchronizeExecution failed Status %#x", Status));
     734        return VERR_GENERAL_FAILURE;
     735    }
     736    return Status;
     737}
     738
     739static int vboxCmdVbvaFlush(PVBOXMP_DEVEXT pDevExt, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
     740{
     741    /* Issue the flush command. */
     742    VBVACMDVBVAFLUSH *pFlush = (VBVACMDVBVAFLUSH*)VBoxHGSMIBufferAlloc(pCtx,
     743                                   sizeof (VBVACMDVBVAFLUSH),
     744                                   HGSMI_CH_VBVA,
     745                                   VBVA_CMDVBVA_FLUSH);
     746    if (!pFlush)
     747    {
     748        WARN(("VBoxHGSMIBufferAlloc failed\n"));
     749        return VERR_OUT_OF_RESOURCES;
     750    }
     751
     752    pFlush->u32Flags = fBufferOverflow ?  VBVACMDVBVAFLUSH_F_GUEST_BUFFER_OVERFLOW : 0;
     753
     754    VBoxHGSMIBufferSubmit(pCtx, pFlush);
     755
     756    VBoxHGSMIBufferFree(pCtx, pFlush);
     757
     758    return VINF_SUCCESS;
     759}
     760
     761static void vboxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, bool fPingHost, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
     762{
     763    if (fPingHost)
     764        vboxCmdVbvaFlush(pDevExt, pCtx, fBufferOverflow);
     765
     766    vboxWddmCallIsr(pDevExt);
     767}
     768
     769DECLCALLBACK(void) voxCmdVbvaFlushCb(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush)
     770{
     771    PVBOXMP_DEVEXT pDevExt = (PVBOXMP_DEVEXT)pvFlush;
     772
     773    vboxCmdVbvaCheckCompleted(pDevExt, true /*fPingHost*/, pHGSMICtx, true /*fBufferOverflow*/);
     774}
     775
     776int VBoxCmdVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, ULONG offBuffer, ULONG cbBuffer)
     777{
     778    memset(pVbva, 0, sizeof (*pVbva));
     779
     780    int rc = VBoxMPCmnMapAdapterMemory(VBoxCommonFromDeviceExt(pDevExt),
     781                                       (void**)&pVbva->Vbva.pVBVA,
     782                                       offBuffer,
     783                                       cbBuffer);
     784    if (RT_SUCCESS(rc))
     785    {
     786        Assert(pVbva->Vbva.pVBVA);
     787        VBoxVBVAExSetupBufferContext(&pVbva->Vbva, offBuffer, cbBuffer, voxCmdVbvaFlushCb, pDevExt, VBVA_CMDVBVA_ENABLE);
     788    }
     789    else
     790    {
     791        WARN(("VBoxMPCmnMapAdapterMemory failed rc %d", rc));
     792    }
     793
     794    return rc;
     795}
     796
     797int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
     798{
     799    int rc = VINF_SUCCESS;
     800
     801    pCmd->u8State = VBOXCMDVBVA_STATE_SUBMITTED;
     802    pVbva->u32FenceSubmitted = pCmd->u32FenceID;
     803
     804    if (VBoxVBVAExGetSize(&pVbva->Vbva) > cbCmd)
     805    {
     806        WARN(("buffer does not fit the vbva buffer, we do not support splitting buffers"));
     807        return VERR_NOT_SUPPORTED;
     808    }
     809
     810    if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
     811    {
     812        WARN(("VBoxVBVAExBufferBeginUpdate failed!"));
     813        return VERR_GENERAL_FAILURE;
     814    }
     815
     816    void* pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
     817    if (!pvBuffer)
     818    {
     819        WARN(("failed to allocate contiguous buffer, trying nopping the tail"));
     820        uint32_t cbTail = VBoxVBVAExGetFreeTail(&pVbva->Vbva);
     821        if (!cbTail)
     822        {
     823            WARN(("this is not a free tail case, cbTail is NULL"));
     824            return VERR_BUFFER_OVERFLOW;
     825        }
     826
     827        Assert(cbTail < cbCmd);
     828
     829        pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbTail);
     830
     831        Assert(pvBuffer);
     832
     833        *((uint8_t*)pvBuffer) = VBOXCMDVBVA_OPTYPE_NOP;
     834
     835        VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
     836
     837        if (!VBoxVBVAExBufferBeginUpdate(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx))
     838        {
     839            WARN(("VBoxVBVAExBufferBeginUpdate 2 failed!"));
     840            return VERR_GENERAL_FAILURE;
     841        }
     842
     843        pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
     844        if (!pvBuffer)
     845        {
     846            WARN(("failed to allocate contiguous buffer, failing"));
     847            return VERR_GENERAL_FAILURE;
     848        }
     849    }
     850
     851    Assert(pvBuffer);
     852
     853    memcpy(pvBuffer, pCmd, cbCmd);
     854
     855    VBoxVBVAExBufferEndUpdate(&pVbva->Vbva);
     856
     857    if (!VBoxVBVAExIsProcessing(&pVbva->Vbva))
     858    {
     859        /* Issue the submit command. */
     860        HGSMIGUESTCOMMANDCONTEXT *pCtx = &VBoxCommonFromDeviceExt(pDevExt)->guestCtx;
     861        VBVACMDVBVASUBMIT *pSubmit = (VBVACMDVBVASUBMIT*)VBoxHGSMIBufferAlloc(pCtx,
     862                                       sizeof (VBVACMDVBVASUBMIT),
     863                                       HGSMI_CH_VBVA,
     864                                       VBVA_CMDVBVA_SUBMIT);
     865        if (!pSubmit)
     866        {
     867            WARN(("VBoxHGSMIBufferAlloc failed\n"));
     868            return VERR_OUT_OF_RESOURCES;
     869        }
     870
     871        pSubmit->u32Reserved = 0;
     872
     873        VBoxHGSMIBufferSubmit(pCtx, pSubmit);
     874
     875        VBoxHGSMIBufferFree(pCtx, pSubmit);
     876    }
     877
     878    return VINF_SUCCESS;
     879}
     880
     881bool VBoxCmdVbvaPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32FenceID)
     882{
     883    VBVAEXBUFFERBACKWARDITER Iter;
     884    VBoxVBVAExBIterInit(&pVbva->Vbva, &Iter);
     885
     886    uint32_t cbBuffer;
     887    bool fProcessed;
     888    uint8_t* pu8Cmd;
     889
     890    while ((pu8Cmd = (uint8_t*)VBoxVBVAExBIterNext(&Iter, &cbBuffer, &fProcessed)) != NULL)
     891    {
     892        if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
     893            continue;
     894
     895        VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
     896
     897        if (pCmd->u32FenceID != u32FenceID)
     898            continue;
     899
     900        if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_CANCELLED, VBOXCMDVBVA_STATE_SUBMITTED))
     901            Assert(pCmd->u8State == VBOXCMDVBVA_STATE_IN_PROGRESS);
     902
     903        /* we have cancelled the command successfully */
     904        vboxCmdVbvaDdiNotifyComplete(pDevExt, pVbva, u32FenceID, DXGK_INTERRUPT_DMA_PREEMPTED);
     905        return true;
     906    }
     907
     908    return false;
     909}
     910
     911bool VBoxCmdVbvaCheckCompletedIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva)
     912{
     913    VBVAEXBUFFERFORWARDITER Iter;
     914    VBoxVBVAExCFIterInit(&pVbva->Vbva, &Iter);
     915
     916    bool fHasCommandsCompletedPreempted = false;
     917    bool fProcessed;
     918    uint8_t* pu8Cmd;
     919
     920
     921    while ((pu8Cmd = (uint8_t*)VBoxVBVAExCFIterNext(&Iter, NULL, &fProcessed)) != NULL)
     922    {
     923        if (!fProcessed)
     924            break;
     925
     926        if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
     927            continue;
     928
     929        VBOXCMDVBVA_HDR *pCmd = (VBOXCMDVBVA_HDR*)pu8Cmd;
     930        uint8_t u8State = pCmd->u8State;
     931        uint32_t u32FenceID = pCmd->u32FenceID;
     932
     933        Assert(u8State == VBOXCMDVBVA_STATE_IN_PROGRESS
     934                || u8State == VBOXCMDVBVA_STATE_CANCELLED);
     935        Assert(u32FenceID);
     936        VBoxVBVAExCBufferCompleted(&pVbva->Vbva);
     937        DXGK_INTERRUPT_TYPE enmDdiNotify;
     938
     939        if (u8State == VBOXCMDVBVA_STATE_IN_PROGRESS)
     940        {
     941            pVbva->u32FenceCompleted = u32FenceID;
     942            enmDdiNotify = DXGK_INTERRUPT_DMA_COMPLETED;
     943        }
     944        else
     945            enmDdiNotify = DXGK_INTERRUPT_DMA_PREEMPTED;
     946
     947        vboxCmdVbvaDdiNotifyCompleteIrq(pDevExt, pVbva, pCmd->u32FenceID, enmDdiNotify);
     948
     949        fHasCommandsCompletedPreempted = true;
     950    }
     951
     952    return fHasCommandsCompletedPreempted;
     953}
     954
     955void VBoxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, bool fPingHost)
     956{
     957    vboxCmdVbvaCheckCompleted(pDevExt, fPingHost, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, false /* fBufferOverflow */);
     958}
  • trunk/src/VBox/Additions/WINNT/Graphics/Video/mp/wddm/VBoxMPVbva.h

    r49332 r49365  
    3131int vboxVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva);
    3232int vboxVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva, ULONG offBuffer, ULONG cbBuffer, D3DDDI_VIDEO_PRESENT_SOURCE_ID srcId);
    33 int vboxVbvaReportCmdOffset(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva, uint32_t offCmd);
    3433int vboxVbvaReportDirtyRect(PVBOXMP_DEVEXT pDevExt, struct VBOXWDDM_SOURCE *pSrc, RECT *pRectOrig);
    3534
     
    6059
    6160
     61/* customized VBVA implementation */
     62struct VBVAEXBUFFERCONTEXT;
     63
     64typedef DECLCALLBACKPTR(void, PFNVBVAEXBUFFERFLUSH) (struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush);
     65
     66/**
     67 * Structure grouping the context needed for sending graphics acceleration
     68 * information to the host via VBVA.  Each screen has its own VBVA buffer.
     69 */
     70typedef struct VBVAEXBUFFERCONTEXT
     71{
     72    /** Offset of the buffer in the VRAM section for the screen */
     73    uint32_t    offVRAMBuffer;
     74    /** Length of the buffer in bytes */
     75    uint32_t    cbBuffer;
     76    /** This flag is set if we wrote to the buffer faster than the host could
     77     * read it. */
     78    bool        fHwBufferOverflow;
     79    /* VBVA operation used to enable/disable VBVA */
     80    uint16_t    u16EnableOp;
     81    /* the window between indexRecordFirstUncompleted and pVBVA->::indexRecordFirst represents
     82     * command records processed by the host, but not completed by the guest yet */
     83    volatile uint32_t    indexRecordFirstUncompleted;
     84    /* the window between off32DataUncompleted and pVBVA->::off32Data represents
     85     * command data processed by the host, but not completed by the guest yet */
     86    uint32_t    off32DataUncompleted;
     87    /* flush function */
     88    PFNVBVAEXBUFFERFLUSH pfnFlush;
     89    void *pvFlush;
     90    /** The VBVA record that we are currently preparing for the host, NULL if
     91     * none. */
     92    struct VBVARECORD *pRecord;
     93    /** Pointer to the VBVA buffer mapped into the current address space.  Will
     94     * be NULL if VBVA is not enabled. */
     95    struct VBVABUFFER *pVBVA;
     96} VBVAEXBUFFERCONTEXT, *PVBVAEXBUFFERCONTEXT;
     97
     98typedef struct VBVAEXBUFFERITERBASE
     99{
     100    struct VBVAEXBUFFERCONTEXT *pCtx;
     101    /* index of the current record */
     102    uint32_t iCurRecord;
     103    /* offset of the current command */
     104    uint32_t off32CurCmd;
     105} VBVAEXBUFFERITERBASE, *PVBVAEXBUFFERITERBASE;
     106
     107typedef struct VBVAEXBUFFERFORWARDITER
     108{
     109    VBVAEXBUFFERITERBASE Base;
     110} VBVAEXBUFFERFORWARDITER, *PVBVAEXBUFFERFORWARDITER;
     111
     112typedef struct VBVAEXBUFFERBACKWARDITER
     113{
     114    VBVAEXBUFFERITERBASE Base;
     115} VBVAEXBUFFERBACKWARDITER, *PVBVAEXBUFFERBACKWARDITER;
     116
     117
     118typedef struct VBOXCMDVBVA
     119{
     120    VBVAEXBUFFERCONTEXT Vbva;
     121
     122    /* last completted fence id */
     123    uint32_t u32FenceCompleted;
     124    /* last submitted fence id */
     125    uint32_t u32FenceSubmitted;
     126
     127    /* node ordinal */
     128    uint32_t idNode;
     129} VBOXCMDVBVA;
     130
     131/** @name VBVAEx APIs
     132 * @{ */
     133RTDECL(bool) VBoxVBVAExEnable(PVBVAEXBUFFERCONTEXT pCtx,
     134                            PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
     135                            struct VBVABUFFER *pVBVA);
     136RTDECL(void) VBoxVBVAExDisable(PVBVAEXBUFFERCONTEXT pCtx,
     137                             PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx);
     138RTDECL(bool) VBoxVBVAExBufferBeginUpdate(PVBVAEXBUFFERCONTEXT pCtx,
     139                                       PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx);
     140RTDECL(void) VBoxVBVAExBufferEndUpdate(PVBVAEXBUFFERCONTEXT pCtx);
     141RTDECL(bool) VBoxVBVAExWrite(PVBVAEXBUFFERCONTEXT pCtx,
     142                           PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
     143                           const void *pv, uint32_t cb);
     144
     145RTDECL(bool) VBoxVBVAExOrderSupported(PVBVAEXBUFFERCONTEXT pCtx, unsigned code);
     146
     147RTDECL(void) VBoxVBVAExSetupBufferContext(PVBVAEXBUFFERCONTEXT pCtx,
     148                                        uint32_t offVRAMBuffer,
     149                                        uint32_t cbBuffer,
     150                                        PFNVBVAEXBUFFERFLUSH pfnFlush,
     151                                        void *pvFlush,
     152                                        uint16_t u16EnableOp);
     153
     154DECLINLINE(uint32_t) VBoxVBVAExGetSize(PVBVAEXBUFFERCONTEXT pCtx)
     155{
     156    return pCtx->pVBVA->cbData;
     157}
     158
     159/* can be used to ensure the command will not cross the ring buffer boundary,
     160 * and thus will not be splitted */
     161RTDECL(uint32_t) VBoxVBVAExGetFreeTail(PVBVAEXBUFFERCONTEXT pCtx);
     162/* allocates a contiguous buffer of a given size, i.e. the one that is not splitted across ringbuffer boundaries */
     163RTDECL(void*) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb);
     164/* answers whether host is in "processing" state now,
     165 * i.e. if "processing" is true after the command is submitted, no notification is required to be posted to host to make the commandbe processed,
     166 * otherwise, host should be notified about the command */
     167RTDECL(bool) VBoxVBVAExIsProcessing(PVBVAEXBUFFERCONTEXT pCtx);
     168
     169/* initializes iterator that starts with free record,
     170 * i.e. VBoxVBVAExIterNext would return the first uncompleted record.
     171 *
     172 * can be used by submitter only */
     173RTDECL(void) VBoxVBVAExBIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERBACKWARDITER pIter);
     174/* can be used by submitter only */
     175RTDECL(void*) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed);
     176
     177/* completer functions
     178 * completer can only use below ones, and submitter is NOT allowed to use them.
     179 * Completter functions are prefixed with VBoxVBVAExC as opposed to submitter ones,
     180 * that do not have the last "C" in the prefix */
     181/* initializes iterator that starts with completed record,
     182 * i.e. VBoxVBVAExIterPrev would return the first uncompleted record.
     183 * note that we can not have iterator that starts at processed record
     184 * (i.e. the one processed by host, but not completed by guest, since host modifies
     185 * VBVABUFFER::off32Data and VBVABUFFER::indexRecordFirst concurrently,
     186 * and so we may end up with inconsistent index-offData pair
     187 *
     188 * can be used by completter only */
     189RTDECL(void) VBoxVBVAExCFIterInit(PVBVAEXBUFFERCONTEXT pCtx, PVBVAEXBUFFERFORWARDITER pIter);
     190/* can be used by completter only */
     191RTDECL(void*) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed);
     192
     193RTDECL(void) VBoxVBVAExCBufferCompleted(PVBVAEXBUFFERCONTEXT pCtx);
     194
     195/** @}  */
     196
     197struct VBOXCMDVBVA_HDR;
     198
     199int VBoxCmdVbvaEnable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva);
     200int VBoxCmdVbvaDisable(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva);
     201int VBoxCmdVbvaDestroy(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva);
     202int VBoxCmdVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, ULONG offBuffer, ULONG cbBuffer);
     203int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd);
     204bool VBoxCmdVbvaPreempt(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, uint32_t u32FenceID);
     205void VBoxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, bool fPingHost);
     206bool VBoxCmdVbvaCheckCompletedIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva);
     207
    62208#endif /* #ifndef ___VBoxMPVbva_h___ */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette