VirtualBox

Changeset 92939 in vbox for trunk/src/VBox/Devices/VirtIO


Ignore:
Timestamp:
Dec 15, 2021 3:51:28 PM (3 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
148914
Message:

Improve transitional behavior, and save/load exec code. Some Rx buffer handling code optimization for speed, and make it easier to understand and maintain. Add missing function comments and improve others. Try to make debug logging even clearer and more succinct. And any other miscellaneous small improvements I could find. See BugRef(8651) Comment #171

Location:
trunk/src/VBox/Devices/VirtIO
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp

    r92091 r92939  
    4242*   Defined Constants And Macros                                                                                                 *
    4343*********************************************************************************************************************************/
     44
    4445#define INSTANCE(a_pVirtio)                 ((a_pVirtio)->szInstance)
    4546#define VIRTQNAME(a_pVirtio, a_uVirtq)      ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
    4647
    47 
    4848#define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \
    4949            (virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq) == 0)
    5050
    51 
    5251#define IS_DRIVER_OK(a_pVirtio)             ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
    5352#define WAS_DRIVER_OK(a_pVirtio)            ((a_pVirtio)->fPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
     53
     54/**
     55 * These defines are used to track  guest virtio-net driver writing driver features accepted flags
     56 * in two 32-bit operations (in arbitrary order), and one bit dedicated to ensured 'features complete'
     57 * is handled once.
     58 */
     59#define DRIVER_FEATURES_0_WRITTEN                        1   /**< fDriverFeatures[0]  written by guest virtio-net */
     60#define DRIVER_FEATURES_1_WRITTEN                        2   /**< fDriverFeatures[1]  written by guest virtio-net */
     61#define DRIVER_FEATURES_0_AND_1_WRITTEN                  3   /**< Both 32-bit parts of fDriverFeatures[] written  */
     62#define DRIVER_FEATURES_COMPLETE_HANDLED                 4   /**< Features negotiation complete handler called    */
    5463
    5564/**
     
    6877
    6978
    70 /** Marks the start of the virtio saved state (just for sanity). */
    71 #define VIRTIO_SAVEDSTATE_MARKER                        UINT64_C(0x1133557799bbddff)
    72 /** The current saved state version for the virtio core. */
    73 #define VIRTIO_SAVEDSTATE_VERSION                       UINT32_C(1)
    74 
    75 
    7679/*********************************************************************************************************************************
    7780*   Structures and Typedefs                                                                                                      *
    7881*********************************************************************************************************************************/
    79 
    8082
    8183/** @name virtq related flags
     
    9092
    9193/**
    92  * virtq related structs
    93  * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
     94 * virtq-related structs
     95 * (struct names follow VirtIO 1.0 spec, field names use VBox styled naming, w/respective spec'd name in comments)
    9496 */
    9597typedef struct virtq_desc
     
    125127} VIRTQ_USED_T, *PVIRTQ_USED_T;
    126128
    127 
    128129const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState)
    129130{
     
    141142
    142143static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq);
    143 static int  virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec);
     144static int  virtioNudgeGuest(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec);
     145
     146DECLINLINE(uint16_t) virtioCoreR3CountPendingBufs(uint16_t uRingIdx, uint16_t uShadowIdx, uint16_t uQueueSize)
     147{
     148    if (uShadowIdx == uRingIdx)
     149        return 0;
     150    else
     151    if (uShadowIdx > uRingIdx)
     152        return uShadowIdx - uRingIdx;
     153    return uQueueSize - (uRingIdx - uShadowIdx);
     154}
    144155
    145156/** @name Internal queue operations
     
    156167    uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
    157168
    158         virtioCoreGCPhysRead(pVirtio, pDevIns,
    159                           pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),
    160                           pDesc, sizeof(VIRTQ_DESC_T));
     169    virtioCoreGCPhysRead(pVirtio, pDevIns,
     170                         pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),
     171                         pDesc, sizeof(VIRTQ_DESC_T));
    161172}
    162173#endif
     
    207218                         pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
    208219                         &fFlags, sizeof(fFlags));
    209 
    210220    return fFlags;
    211221}
     
    249259}
    250260
    251 
    252261#ifdef IN_RING3
    253262DECLINLINE(uint16_t) virtioReadUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
     
    289298
    290299    if (uIdxActual < uIdxShadow)
    291         uIdxDelta = (uIdxActual + VIRTQ_SIZE) - uIdxShadow;
     300        uIdxDelta = (uIdxActual + pVirtq->uQueueSize) - uIdxShadow;
    292301    else
    293302        uIdxDelta = uIdxActual - uIdxShadow;
    294 
    295     LogFunc(("%s, %u %s\n",
    296         pVirtq->szName, uIdxDelta, uIdxDelta == 1 ? "entry" : "entries"));
    297303
    298304    return uIdxDelta;
     
    320326    if (!pVirtio->fLegacyDriver && !pVirtq->uEnable)
    321327    {
    322         LogRelFunc(("virtq: %d (%s) not enabled\n", uVirtq, VIRTQNAME(pVirtio, uVirtq)));
     328        LogRelFunc(("virtq: %s not enabled\n", VIRTQNAME(pVirtio, uVirtq)));
    323329        return 0;
    324330    }
    325 
    326331    return virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq);
    327332}
     
    440445           else
    441446               cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", c, (col + 1) % 8 ? "" : "  ");
    442             ADJCURSOR(cbPrint);
     447           ADJCURSOR(cbPrint);
    443448        }
    444449        for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++)
     
    456461#undef ADJCURSOR
    457462}
    458 #endif /* LOG_ENABLED */
    459 
    460 /** API function: See header file */
    461 int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio)
    462 {
    463     Log12Func(("%s", pVirtio->fLegacyDriver ? "Legacy Guest Driver handling mode\n" : ""));
    464     return pVirtio->fLegacyDriver;
    465 }
     463
    466464
    467465/** API function: See header file */
     
    470468                                int fHasIndex, uint32_t idx)
    471469{
    472     if (!LogIs6Enabled())
    473         return;
    474 
    475     char szIdx[16];
    476     if (fHasIndex)
    477         RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx);
    478     else
    479         szIdx[0] = '\0';
    480 
    481     if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
    482     {
    483         char szDepiction[64];
    484         size_t cchDepiction;
    485         if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
    486             cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]",
    487                                        pszMember, szIdx, uOffset, uOffset + cb - 1);
     470    if (LogIs6Enabled())
     471    {
     472        char szIdx[16];
     473        if (fHasIndex)
     474            RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx);
    488475        else
    489             cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx);
    490 
    491         /* padding */
    492         if (cchDepiction < 30)
    493             szDepiction[cchDepiction++] = ' ';
    494         while (cchDepiction < 30)
    495             szDepiction[cchDepiction++] = '.';
    496         szDepiction[cchDepiction] = '\0';
    497 
    498         RTUINT64U uValue;
    499         uValue.u = 0;
    500         memcpy(uValue.au8, pv, cb);
    501         Log6(("%-23s: Guest %s %s %#0*RX64\n",
    502                   pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u));
    503     }
    504     else /* odd number or oversized access, ... log inline hex-dump style */
    505     {
    506         Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
    507                   pszFunc, fWrite ? "wrote" : "read ", pszMember,
    508                   szIdx, uOffset, uOffset + cb, cb, pv));
     476            szIdx[0] = '\0';
     477
     478        if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
     479        {
     480            char szDepiction[64];
     481            size_t cchDepiction;
     482            if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
     483                cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]",
     484                                           pszMember, szIdx, uOffset, uOffset + cb - 1);
     485            else
     486                cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx);
     487
     488            /* padding */
     489            if (cchDepiction < 30)
     490                szDepiction[cchDepiction++] = ' ';
     491            while (cchDepiction < 30)
     492                szDepiction[cchDepiction++] = '.';
     493            szDepiction[cchDepiction] = '\0';
     494
     495            RTUINT64U uValue;
     496            uValue.u = 0;
     497            memcpy(uValue.au8, pv, cb);
     498            Log6(("%-23s: Guest %s %s %#0*RX64\n",
     499                      pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u));
     500        }
     501        else /* odd number or oversized access, ... log inline hex-dump style */
     502        {
     503            Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
     504                      pszFunc, fWrite ? "wrote" : "read ", pszMember,
     505                      szIdx, uOffset, uOffset + cb, cb, pv));
     506        }
    509507    }
    510508    RT_NOREF2(fWrite, pszFunc);
     
    512510
    513511/**
    514  * Makes the MMIO-mapped Virtio fDeviceStatus registers non-cryptic (buffers to
    515  * keep the output clean during multi-threaded activity)
     512 * Log MMIO-mapped Virtio fDeviceStatus register bitmask, naming the bits
    516513 */
    517514DECLINLINE(void) virtioCoreFormatDeviceStatus(uint8_t bStatus, char *pszBuf, size_t uSize)
    518515{
    519 
    520 #define ADJCURSOR(len) cp += len; uSize -= len; sep = (char *)" | ";
    521 
     516#   define ADJCURSOR(len) { cp += len; uSize -= len; sep = (char *)" | "; }
    522517    memset(pszBuf, 0, uSize);
    523     size_t len;
    524     char *cp = pszBuf;
    525     char *sep = (char *)"";
    526 
    527     if (bStatus == 0) {
     518    char *cp = pszBuf, *sep = (char *)"";
     519    int len;
     520    if (bStatus == 0)
    528521        RTStrPrintf(cp, uSize, "RESET");
    529         return;
    530     }
    531     if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
    532     {
    533         len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE");
    534         ADJCURSOR(len);
    535    }
    536     if (bStatus & VIRTIO_STATUS_DRIVER)
    537     {
    538         len = RTStrPrintf(cp, uSize, "%sDRIVER", sep);
    539         ADJCURSOR(len);
    540     }
    541     if (bStatus & VIRTIO_STATUS_FEATURES_OK)
    542     {
    543         len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep);
    544         ADJCURSOR(len);
    545     }
    546     if (bStatus & VIRTIO_STATUS_DRIVER_OK)
    547     {
    548         len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep);
    549         ADJCURSOR(len);
    550     }
    551     if (bStatus & VIRTIO_STATUS_FAILED)
    552     {
    553         len = RTStrPrintf(cp, uSize, "%sFAILED", sep);
    554         ADJCURSOR(len);
    555     }
    556     if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
    557         RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep);
    558 
    559 #undef ADJCURSOR
     522    else
     523    {
     524        if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
     525        {
     526            len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE");
     527            ADJCURSOR(len);
     528        }
     529        if (bStatus & VIRTIO_STATUS_DRIVER)
     530        {
     531            len = RTStrPrintf(cp, uSize, "%sDRIVER", sep);
     532            ADJCURSOR(len);
     533        }
     534        if (bStatus & VIRTIO_STATUS_FEATURES_OK)
     535        {
     536            len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep);
     537            ADJCURSOR(len);
     538        }
     539        if (bStatus & VIRTIO_STATUS_DRIVER_OK)
     540        {
     541            len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep);
     542            ADJCURSOR(len);
     543        }
     544        if (bStatus & VIRTIO_STATUS_FAILED)
     545        {
     546            len = RTStrPrintf(cp, uSize, "%sFAILED", sep);
     547            ADJCURSOR(len);
     548        }
     549        if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
     550            RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep);
     551    }
     552#   undef ADJCURSOR
     553}
     554
     555#endif /* LOG_ENABLED */
     556
     557/** API function: See header file */
     558int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio)
     559{
     560    return pVirtio->fLegacyDriver;
    560561}
    561562
     
    570571    pVirtq->uUsedIdxShadow  = 0;
    571572    pVirtq->fUsedRingEvent = false;
     573    pVirtq->fAttached = true;
    572574    RTStrCopy(pVirtq->szName, sizeof(pVirtq->szName), pcszName);
    573575    return VINF_SUCCESS;
     576}
     577
     578int virtioCoreR3VirtqDetach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
     579{
     580    PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtqNbr];
     581    pVirtq->uVirtq          = 0;
     582    pVirtq->uAvailIdxShadow = 0;
     583    pVirtq->uUsedIdxShadow  = 0;
     584    pVirtq->fUsedRingEvent  = false;
     585    pVirtq->fAttached       = false;
     586    memset(pVirtq->szName, 0, sizeof(pVirtq->szName));
     587    return VINF_SUCCESS;
     588}
     589
     590bool virtioCoreR3VirtqIsAttached(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
     591{
     592    return pVirtio->aVirtqueues[uVirtqNbr].fAttached;
     593}
     594
     595bool virtioCoreR3VirtqIsEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
     596{
     597    PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtqNbr];
     598    return (bool)pVirtq->uEnable && pVirtq->GCPhysVirtqDesc;
    574599}
    575600
     
    582607
    583608    /** @todo add ability to dump physical contents described by any descriptor (using existing VirtIO core API function) */
    584 //    bool fDump      = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)"
     609//  bool fDump      = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)"
    585610
    586611    uint16_t uAvailIdx       = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
     
    648673        pHlp->pfnPrintf(pHlp,     "      No desc chains available\n");
    649674    pHlp->pfnPrintf(pHlp, "\n");
    650 
    651675}
    652676
     
    661685    return cRefs;
    662686}
    663 
    664687
    665688/** API Function: See header file */
     
    687710void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio)
    688711{
    689     virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
    690 }
     712    virtioNudgeGuest(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
     713}
     714
    691715
    692716/** API Function: See header file */
    693717void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtq, bool fEnable)
    694718{
    695 
    696719    Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
    697720    PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
     
    719742        if (!pVirtio->fLegacyDriver)
    720743            pVirtio->fGenUpdatePending = true;
    721         virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
     744        virtioNudgeGuest(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
    722745    }
    723746}
     
    748771    return VINF_SUCCESS;
    749772}
    750 
    751773
    752774/** API Function: See header file */
     
    797819    {
    798820        PVIRTIOSGSEG pSeg;
    799 
    800821        /*
    801822         * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
     
    804825         * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
    805826         */
    806         if (cSegsIn + cSegsOut >= VIRTQ_SIZE)
     827        if (cSegsIn + cSegsOut >= pVirtq->uQueueSize)
    807828        {
    808829            static volatile uint32_t s_cMessages  = 0;
     
    823844        if (desc.fFlags & VIRTQ_DESC_F_WRITE)
    824845        {
    825             Log6Func(("%s IN  idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
     846            Log6Func(("%s IN  idx=%-4u seg=%-3u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
    826847            cbIn += desc.cb;
    827848            pSeg = &paSegsIn[cSegsIn++];
     
    829850        else
    830851        {
    831             Log6Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
     852            Log6Func(("%s OUT desc_idx=%-4u seg=%-3u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
    832853            cbOut += desc.cb;
    833854            pSeg = &paSegsOut[cSegsOut++];
     
    840861#endif
    841862        }
    842 
    843863        pSeg->GCPhys = desc.GCPhysBuf;
    844864        pSeg->cbSeg = desc.cb;
    845 
    846865        uDescIdx = desc.uDescIdxNext;
    847866    } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
     
    915934    AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
    916935
    917     Log6Func(("    Copying device data to %s (%s guest), desc chain idx %d\n",
    918               VIRTQNAME(pVirtio, uVirtq), pVirtio->fLegacyDriver ? "legacy" : "modern", virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
    919 
    920     /* Copy s/g buf (virtual memory) to guest phys mem (IN direction). */
     936    Log6Func(("    Copying device data to %s, [desc:%u → used ring:%u]\n",
     937              VIRTQNAME(pVirtio, uVirtq), pVirtqBuf->uHeadIdx, pVirtq->uUsedIdxShadow));
     938
     939    /* Copy s/g buf (virtual memory) to guest phys mem (VirtIO "IN" direction). */
    921940
    922941    size_t cbCopy = 0, cbTotal = 0, cbRemain = 0;
     
    944963    }
    945964
    946     /* If this write-ahead crosses threshold where the driver wants to get an event, flag it */
     965    /* Flag if write-ahead crosses threshold where guest driver indicated it wants event notification */
    947966    if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
    948967        if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
     
    951970    /*
    952971     * Place used buffer's descriptor in used ring but don't update used ring's slot index.
    953      * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync() */
     972     * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync()
     973     */
    954974    virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbTotal);
    955975
    956     if (pSgVirtReturn)
    957         Log6Func(("     ... %d segs, %zu bytes, copied to %u byte buf. residual: %zu bytes\n",
    958                   pSgVirtReturn->cSegs, cbTotal - cbRemain,  pVirtqBuf->cbPhysReturn, pVirtqBuf->cbPhysReturn - cbTotal));
    959 
    960     Log6Func(("    %s used_idx=%u\n", VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
     976#ifdef LOG_ENABLED
     977    if (LogIs6Enabled() && pSgVirtReturn)
     978    {
     979
     980        LogFunc(("     ... %d segs, %zu bytes, copied to %u byte buf@offset=%u. Residual: %zu bytes\n",
     981             pSgVirtReturn->cSegs,  cbTotal - cbRemain,  pVirtqBuf->cbPhysReturn,
     982              ((virtioCoreGCPhysChainCalcBufSize(pVirtqBuf->pSgPhysReturn) -
     983                virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)) - (cbTotal - cbRemain)),
     984                virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn) ));
     985
     986        uint16_t uPending = virtioCoreR3CountPendingBufs(
     987                                virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq),
     988                                pVirtq->uUsedIdxShadow, pVirtq->uQueueSize);
     989
     990        LogFunc(("    %u used buf%s not synced in %s\n", uPending, uPending == 1 ? "" : "s ",
     991                    VIRTQNAME(pVirtio, uVirtq)));
     992    }
     993#endif
     994    return VINF_SUCCESS;
     995}
     996
     997/** API function: See Header file  */
     998int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
     999                                size_t cb, void const *pv, PVIRTQBUF pVirtqBuf, uint32_t cbEnqueue, bool fFence)
     1000{
     1001    Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
     1002    Assert(pv);
     1003
     1004    PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
     1005    PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
     1006
     1007    Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
     1008    Assert(pVirtqBuf->cRefs > 0);
     1009
     1010    AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
     1011
     1012    Log6Func(("    Copying device data to %s, [desc chain head idx:%u]\n",
     1013              VIRTQNAME(pVirtio, uVirtq), pVirtqBuf->uHeadIdx));
     1014
     1015    /*
     1016     * Convert virtual memory simple buffer to guest physical memory (VirtIO descriptor chain)
     1017     */
     1018    uint8_t *pvBuf = (uint8_t *)pv;
     1019    size_t cbRemain = cb, cbCopy = 0;
     1020    while (cbRemain)
     1021    {
     1022        cbCopy = RT_MIN(pSgPhysReturn->cbSegLeft, cbRemain);
     1023        Assert(cbCopy > 0);
     1024        virtioCoreGCPhysWrite(pVirtio, pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pvBuf, cbCopy);
     1025        virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
     1026        pvBuf += cbCopy;
     1027        cbRemain -= cbCopy;
     1028    }
     1029    LogFunc(("     ...%zu bytes, copied to %u byte buf@offset=%u. Residual: %zu bytes\n",
     1030              cb ,  pVirtqBuf->cbPhysReturn,
     1031              ((virtioCoreGCPhysChainCalcBufSize(pVirtqBuf->pSgPhysReturn) -
     1032                 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)) - cb),
     1033                 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)));
     1034
     1035    if (cbEnqueue)
     1036    {
     1037        if (fFence)
     1038        {
     1039            RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
     1040            Assert(!(cbCopy >> 32));
     1041        }
     1042        /* Flag if write-ahead crosses threshold where guest driver indicated it wants event notification */
     1043        if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
     1044            if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
     1045                pVirtq->fUsedRingEvent = true;
     1046        /*
     1047         * Place used buffer's descriptor in used ring but don't update used ring's slot index.
     1048         * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync()
     1049         */
     1050        Log6Func(("    Enqueue desc chain head idx %u to %s used ring @ %u\n", pVirtqBuf->uHeadIdx,
     1051                VIRTQNAME(pVirtio, uVirtq), pVirtq->uUsedIdxShadow));
     1052
     1053        virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, cbEnqueue);
     1054
     1055#ifdef LOG_ENABLED
     1056        if (LogIs6Enabled())
     1057        {
     1058            uint16_t uPending = virtioCoreR3CountPendingBufs(
     1059                                    virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq),
     1060                                    pVirtq->uUsedIdxShadow, pVirtq->uQueueSize);
     1061
     1062            LogFunc(("    %u used buf%s not synced in %s\n",
     1063                    uPending, uPending == 1 ? "" : "s ", VIRTQNAME(pVirtio, uVirtq)));
     1064        }
     1065#endif
     1066    } /* fEnqueue */
    9611067
    9621068    return VINF_SUCCESS;
     
    9761082            ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
    9771083
    978     Log6Func(("    %s ++used_idx=%u\n", pVirtq->szName, pVirtq->uUsedIdxShadow));
     1084    Log6Func(("    Sync %s used ring (%u → idx)\n",
     1085                        pVirtq->szName, pVirtq->uUsedIdxShadow));
    9791086
    9801087    virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow);
     
    9981105    PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
    9991106
    1000     /* See VirtIO 1.0, section 4.1.5.2 It implies that uVirtq and uNotifyIdx should match.
    1001      * Disregarding this notification may cause throughput to stop, however there's no way to know
    1002      * which was queue was intended for wake-up if the two parameters disagree. */
    1003 
     1107    /* VirtIO 1.0, section 4.1.5.2 implies uVirtq and uNotifyIdx should match. Disregarding any of
     1108     * these notifications (if those indicies disagree) may break device/driver synchronization,
     1109     * causing eternal throughput starvation, yet there's no specified way to disambiguate
     1110     * which queue to wake-up in any awkward situation where the two parameters differ.
     1111     */
    10041112    AssertMsg(uNotifyIdx == uVirtq,
    10051113              ("Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
     
    10101118    PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
    10111119
    1012     Log6Func(("%s (desc chains: %u)\n", pVirtq->szName,
     1120    Log6Func(("%s: (desc chains: %u)\n", pVirtq->szName ? pVirtq->szName : "?UNAMED QUEUE?",
    10131121        virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq)));
    10141122
     
    10481156                   pVirtq->szName, (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)));
    10491157#endif
    1050             virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
     1158            virtioNudgeGuest(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
    10511159            pVirtq->fUsedRingEvent = false;
    10521160            return;
     
    10621170        if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT))
    10631171        {
    1064             virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
     1172            virtioNudgeGuest(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
    10651173            return;
    10661174        }
     
    10771185 * @param   uVec        MSI-X vector, if enabled
    10781186 */
    1079 static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixVector)
     1187static int virtioNudgeGuest(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixVector)
    10801188{
    10811189    if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
    1082         Log6Func(("reason: buffer added to 'used' ring.\n"));
     1190        Log6Func(("Reason for interrupt - buffer added to 'used' ring.\n"));
    10831191    else
    10841192    if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
    1085        Log6Func(("reason: device config change\n"));
     1193       Log6Func(("Reason for interrupt - device config change\n"));
    10861194
    10871195    if (!pVirtio->fMsiSupport)
     
    11321240{
    11331241    LogFunc(("Resetting device VirtIO state\n"));
    1134     pVirtio->fLegacyDriver          = 1;   /* Assume this. Cleared if VIRTIO_F_VERSION_1 feature ack'd */
     1242    pVirtio->fLegacyDriver          = pVirtio->fOfferLegacy;   /* Cleared if VIRTIO_F_VERSION_1 feature ack'd */
    11351243    pVirtio->uDeviceFeaturesSelect  = 0;
    11361244    pVirtio->uDriverFeaturesSelect  = 0;
     
    11681276}
    11691277#endif /* IN_RING3 */
     1278
     1279/*
     1280 * Determines whether guest virtio driver is modern or legacy and does callback
     1281 * informing device-specific code that feature negotiation is complete.
     1282 * Should be called only once (coordinated via the 'toggle' flag)
     1283 */
     1284#ifdef IN_RING3
     1285DECLINLINE(void) virtioR3DoFeaturesCompleteOnceOnly(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
     1286{
     1287        if (pVirtio->uDriverFeatures & VIRTIO_F_VERSION_1)
     1288        {
     1289            LogFunc(("VIRTIO_F_VERSION_1 feature ack'd by guest\n"));
     1290            pVirtio->fLegacyDriver = 0;
     1291        }
     1292        else
     1293        {
     1294            if (pVirtio->fOfferLegacy)
     1295            {
     1296                pVirtio->fLegacyDriver = 1;
     1297                LogFunc(("VIRTIO_F_VERSION_1 feature was NOT set by guest\n"));
     1298            }
     1299            else
     1300                AssertMsgFailed(("Guest didn't accept VIRTIO_F_VERSION_1, but fLegacyOffered flag not set.\n"));
     1301        }
     1302        pVirtioCC->pfnFeatureNegotiationComplete(pVirtio, pVirtio->uDriverFeatures, pVirtio->fLegacyDriver);
     1303        pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_COMPLETE_HANDLED;
     1304}
     1305#endif
    11701306
    11711307/**
     
    12281364                case 0:
    12291365                    memcpy(&pVirtio->uDriverFeatures, pv, cb);
     1366                    pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_0_WRITTEN;
     1367            LogFunc(("Set DRIVER_FEATURES_0_WRITTEN. pVirtio->fDriverFeaturesWritten=%d\n", pVirtio->fDriverFeaturesWritten));
     1368                    if (     (pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_0_AND_1_WRITTEN) == DRIVER_FEATURES_0_AND_1_WRITTEN
     1369                        && !(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED))
     1370#ifdef IN_RING0
     1371                        return VINF_IOM_R3_MMIO_WRITE;
     1372#endif
     1373#ifdef IN_RING3
     1374                        virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
     1375#endif
    12301376                    VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
    12311377                    break;
    12321378                case 1:
    12331379                    memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
    1234                     if (pVirtio->uDriverFeatures & VIRTIO_F_VERSION_1)
    1235                     {
     1380                    pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_1_WRITTEN;
     1381            LogFunc(("Set DRIVER_FEATURES_1_WRITTEN. pVirtio->fDriverFeaturesWritten=%d\n", pVirtio->fDriverFeaturesWritten));
     1382                    if (     (pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_0_AND_1_WRITTEN) == DRIVER_FEATURES_0_AND_1_WRITTEN
     1383                        && !(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED))
    12361384#ifdef IN_RING0
    12371385                        return VINF_IOM_R3_MMIO_WRITE;
    12381386#endif
    12391387#ifdef IN_RING3
    1240                         pVirtio->fLegacyDriver = 0;
    1241                         pVirtioCC->pfnGuestVersionHandler(pVirtio, 1 /* fModern */);
    1242 #endif
    1243                     }
     1388                        virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
     1389#endif
    12441390                    VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
    12451391                    break;
     
    13891535 * This I/O handler exists only to handle access from legacy drivers.
    13901536 */
    1391 
    13921537static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
    13931538{
    1394 
    13951539    PVIRTIOCORE   pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
    13961540    STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a);
    13971541
    13981542    RT_NOREF(pvUser);
     1543    Log(("%-23s: Port read at offset=%RTiop, cb=%#x%s",
     1544        __FUNCTION__, offPort, cb,
     1545        VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort) ? "" : "\n"));
    13991546
    14001547    void *pv = pu32; /* To use existing macros */
     
    14121559    if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
    14131560    {
    1414         uint32_t val = pVirtio->uDriverFeatures & 0xffffffff;
     1561        uint32_t val = pVirtio->uDriverFeatures &  UINT32_C(0xffffffff);
    14151562        memcpy(pu32, &val, cb);
    14161563        VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
     
    14201567    {
    14211568        *(uint8_t *)pu32 = pVirtio->fDeviceStatus;
    1422 
    14231569        if (LogIs7Enabled())
    14241570        {
     
    14351581        pVirtio->uISR = 0;
    14361582        virtioLowerInterrupt( pDevIns,  0);
    1437         Log((" ISR read and cleared\n"));
     1583        Log((" (ISR read and cleared)\n"));
    14381584    }
    14391585    else
     
    14821628        return rc;
    14831629    }
    1484 
    14851630    STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
    14861631    return VINF_SUCCESS;
    14871632}
    1488 
    14891633
    14901634/**
     
    15041648    int fWrite = 1;         /* To use existing macros */
    15051649
    1506 //    LogFunc(("Write to port offset=%RTiop, cb=%#x, u32=%#x\n", offPort, cb, u32));
     1650    Log(("%-23s: Port written at offset=%RTiop, cb=%#x, u32=%#x\n",  __FUNCTION__, offPort, cb, u32));
    15071651
    15081652    if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(   uVirtqSelect,        VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
     
    15351679                pVirtio->uDriverFeatures &= VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED;
    15361680        }
     1681        if (!((pVirtio->fDriverFeaturesWritten ^= 1) & 1))
     1682        {
     1683#ifdef IN_RING0
     1684            Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
     1685            return VINF_IOM_R3_MMIO_WRITE;
     1686#endif
     1687#ifdef IN_RING3
     1688            PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
     1689            virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
     1690#endif
     1691        }
    15371692        VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures,          VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
    15381693    }
     
    15561711            Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
    15571712        }
    1558 
    15591713        if (fDriverStateImproved  || fDriverInitiatedReset)
    15601714        {
     
    16931847
    16941848        /*
    1695          * Additionally, anytime any part of the device-specific configuration (which our client maintains)
    1696          * is READ it needs to be checked to see if it changed since the last time any part was read, in
    1697          * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
     1849         * Anytime any part of the dev-specific dev config (which this virtio core implementation sees
     1850         * as a blob, and virtio dev-specific code separates into fields) is READ, it must be compared
     1851         * for deltas from previous read to maintain a config gen. seq. counter (VirtIO 1.0, section 4.1.4.3.1)
    16981852         */
    16991853        bool fDevSpecificFieldChanged = RT_BOOL(memcmp(pVirtioCC->pbDevSpecificCfg + uOffset,
     
    17061860        {
    17071861            ++pVirtio->uConfigGeneration;
    1708             Log6Func(("Bumped cfg. generation to %d because %s%s\n",
    1709                       pVirtio->uConfigGeneration,
     1862            Log6Func(("Bumped cfg. generation to %d because %s%s\n", pVirtio->uConfigGeneration,
    17101863                      fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
    17111864                      pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
     
    17691922#else
    17701923        STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
     1924        Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
    17711925        return VINF_IOM_R3_MMIO_WRITE;
    17721926#endif
     
    18191973    if (uAddress == pVirtio->uPciCfgDataOff)
    18201974    {
    1821         /*
    1822          * VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
    1823          * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
    1824          * (the virtio_pci_cfg_cap capability), and access data items.
    1825          * This is used by BIOS to gain early boot access to the the storage device.
    1826          */
     1975     /* See comments in PCI Cfg capability initialization (in capabilities setup section of this code) */
    18271976        struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
    18281977        uint32_t uLength = pPciCap->uLength;
     
    18632012    if (uAddress == pVirtio->uPciCfgDataOff)
    18642013    {
    1865         /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
    1866          * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
    1867          * (the virtio_pci_cfg_cap capability), and access data items.
    1868          * This is used by BIOS to gain early boot access to the the storage device.*/
    1869 
     2014        /* See comments in PCI Cfg capability initialization (in capabilities setup section of this code) */
    18702015        struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
    18712016        uint32_t uLength = pPciCap->uLength;
     
    18892034
    18902035/*********************************************************************************************************************************
    1891 *   Saved state.                                                                                                                 *
     2036*   Saved state (SSM)                                                                                                            *
    18922037*********************************************************************************************************************************/
     2038
     2039
     2040/**
     2041 * Loads a saved device state (called from device-specific code on SSM final pass)
     2042 *
     2043 * @param   pVirtio     Pointer to the shared virtio state.
     2044 * @param   pHlp        The ring-3 device helpers.
     2045 * @param   pSSM        The saved state handle.
     2046 * @returns VBox status code.
     2047 */
     2048int virtioCoreR3LegacyDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp,
     2049                               PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uVirtioLegacy_3_1_Beta)
     2050{
     2051    int rc;
     2052    uint32_t uDriverFeaturesLegacy32bit;
     2053
     2054    rc = pHlp->pfnSSMGetU32(  pSSM, &uDriverFeaturesLegacy32bit);
     2055    AssertRCReturn(rc, rc);
     2056    pVirtio->uDriverFeatures = (uint64_t)uDriverFeaturesLegacy32bit;
     2057
     2058    rc = pHlp->pfnSSMGetU16(  pSSM, &pVirtio->uVirtqSelect);
     2059    AssertRCReturn(rc, rc);
     2060
     2061    rc = pHlp->pfnSSMGetU8(   pSSM, &pVirtio->fDeviceStatus);
     2062    AssertRCReturn(rc, rc);
     2063
     2064    char szOut[80] = { 0 };
     2065    virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
     2066    Log(("Loaded legacy device status = (%s)\n", szOut));
     2067
     2068    rc = pHlp->pfnSSMGetU8(   pSSM, &pVirtio->uISR);
     2069    AssertRCReturn(rc, rc);
     2070
     2071    uint32_t cQueues = 3; /* Thes constant default value copied from earliest v0.9 code */
     2072    if (uVersion > uVirtioLegacy_3_1_Beta)
     2073    {
     2074        rc = pHlp->pfnSSMGetU32(pSSM, &cQueues);
     2075        AssertRCReturn(rc, rc);
     2076    }
     2077
     2078    AssertLogRelMsgReturn(cQueues <= VIRTQ_MAX_COUNT, ("%#x\n", cQueues), VERR_SSM_LOAD_CONFIG_MISMATCH);
     2079    AssertLogRelMsgReturn(pVirtio->uVirtqSelect < cQueues || (cQueues == 0 && pVirtio->uVirtqSelect),
     2080                          ("uVirtqSelect=%u cQueues=%u\n", pVirtio->uVirtqSelect, cQueues),
     2081                          VERR_SSM_LOAD_CONFIG_MISMATCH);
     2082
     2083    Log(("\nRestoring %d  legacy-only virtio-net device queues from saved state:\n", cQueues));
     2084    for (unsigned uVirtq = 0; uVirtq < cQueues; uVirtq++)
     2085    {
     2086        PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
     2087
     2088        if (uVirtq == cQueues - 1)
     2089            RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-ctrlq");
     2090        else if (uVirtq % 2)
     2091            RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-xmitq<%d>", uVirtq / 2);
     2092        else
     2093            RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-recvq<%d>", uVirtq / 2);
     2094
     2095        rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uQueueSize);
     2096        AssertRCReturn(rc, rc);
     2097
     2098        uint32_t uVirtqPfn;
     2099        rc = pHlp->pfnSSMGetU32(pSSM, &uVirtqPfn);
     2100        AssertRCReturn(rc, rc);
     2101
     2102        rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uAvailIdxShadow);
     2103        AssertRCReturn(rc, rc);
     2104
     2105        rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uUsedIdxShadow);
     2106        AssertRCReturn(rc, rc);
     2107
     2108        if (uVirtqPfn)
     2109        {
     2110            pVirtq->GCPhysVirtqDesc  = (uint64_t)uVirtqPfn * VIRTIO_PAGE_SIZE;
     2111            pVirtq->GCPhysVirtqAvail = pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * pVirtq->uQueueSize;
     2112            pVirtq->GCPhysVirtqUsed  =
     2113                RT_ALIGN(pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]), VIRTIO_PAGE_SIZE);
     2114            pVirtq->uEnable = 1;
     2115        }
     2116        else
     2117        {
     2118            LogFunc(("WARNING: QUEUE \"%s\" PAGE NUMBER ZERO IN SAVED STATE\n", pVirtq->szName));
     2119            pVirtq->uEnable = 0;
     2120        }
     2121        pVirtq->uNotifyOffset = 0;  /* unused in legacy mode */
     2122        pVirtq->uMsixVector   = 0;  /* unused in legacy mode */
     2123    }
     2124    pVirtio->fGenUpdatePending = 0; /* unused in legacy mode */
     2125    pVirtio->uConfigGeneration = 0; /* unused in legacy mode */
     2126    pVirtio->uPciCfgDataOff    = 0; /* unused in legacy mode (port I/O used instead)   */
     2127
     2128    return VINF_SUCCESS;
     2129}
     2130
     2131/**
     2132 * Loads a saved device state (called from device-specific code on SSM final pass)
     2133 *
     2134 * Note: This loads state saved by a Modern (VirtIO 1.0+) device, of which this transitional device is one,
     2135 *       and thus supports both legacy and modern guest virtio drivers.
     2136 *
     2137 * @param   pVirtio     Pointer to the shared virtio state.
     2138 * @param   pHlp        The ring-3 device helpers.
     2139 * @param   pSSM        The saved state handle.
     2140 * @returns VBox status code.
     2141 */
     2142int virtioCoreR3ModernDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uTestVersion, uint32_t cQueues)
     2143{
     2144    RT_NOREF2(cQueues, uVersion);
     2145    LogFunc(("\n"));
     2146    /*
     2147     * Check the marker and (embedded) version number.
     2148     */
     2149    uint64_t uMarker = 0;
     2150    int rc;
     2151
     2152    rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
     2153    AssertRCReturn(rc, rc);
     2154    if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
     2155        return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
     2156                                        N_("Expected marker value %#RX64 found %#RX64 instead"),
     2157                                        VIRTIO_SAVEDSTATE_MARKER, uMarker);
     2158    uint32_t uVersionSaved = 0;
     2159    rc = pHlp->pfnSSMGetU32(pSSM, &uVersionSaved);
     2160    AssertRCReturn(rc, rc);
     2161    if (uVersionSaved != uTestVersion)
     2162        return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
     2163                                        N_("Unsupported virtio version: %u"), uVersionSaved);
     2164    /*
     2165     * Load the state.
     2166     */
     2167    rc = pHlp->pfnSSMGetU32(  pSSM, &pVirtio->fLegacyDriver);
     2168    AssertRCReturn(rc, rc);
     2169    rc = pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending);
     2170    AssertRCReturn(rc, rc);
     2171    rc = pHlp->pfnSSMGetU8(   pSSM, &pVirtio->fDeviceStatus);
     2172    AssertRCReturn(rc, rc);
     2173    rc = pHlp->pfnSSMGetU8(   pSSM, &pVirtio->uConfigGeneration);
     2174    AssertRCReturn(rc, rc);
     2175    rc = pHlp->pfnSSMGetU8(   pSSM, &pVirtio->uPciCfgDataOff);
     2176    AssertRCReturn(rc, rc);
     2177    rc = pHlp->pfnSSMGetU8(   pSSM, &pVirtio->uISR);
     2178    AssertRCReturn(rc, rc);
     2179    rc = pHlp->pfnSSMGetU16(  pSSM, &pVirtio->uVirtqSelect);
     2180    AssertRCReturn(rc, rc);
     2181    rc = pHlp->pfnSSMGetU32(  pSSM, &pVirtio->uDeviceFeaturesSelect);
     2182    AssertRCReturn(rc, rc);
     2183    rc = pHlp->pfnSSMGetU32(  pSSM, &pVirtio->uDriverFeaturesSelect);
     2184    AssertRCReturn(rc, rc);
     2185    rc = pHlp->pfnSSMGetU64(  pSSM, &pVirtio->uDriverFeatures);
     2186    AssertRCReturn(rc, rc);
     2187
     2188    /** @todo Adapt this loop use cQueues argument instead of static queue count (safely with SSM versioning) */
     2189    for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
     2190    {
     2191        PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
     2192        rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc);
     2193        AssertRCReturn(rc, rc);
     2194        rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail);
     2195        AssertRCReturn(rc, rc);
     2196        rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed);
     2197        AssertRCReturn(rc, rc);
     2198        rc = pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uNotifyOffset);
     2199        AssertRCReturn(rc, rc);
     2200        rc = pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uMsixVector);
     2201        AssertRCReturn(rc, rc);
     2202        rc = pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uEnable);
     2203        AssertRCReturn(rc, rc);
     2204        rc = pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uQueueSize);
     2205        AssertRCReturn(rc, rc);
     2206        rc = pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uAvailIdxShadow);
     2207        AssertRCReturn(rc, rc);
     2208        rc = pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uUsedIdxShadow);
     2209        AssertRCReturn(rc, rc);
     2210        rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName,  sizeof(pVirtq->szName));
     2211        AssertRCReturn(rc, rc);
     2212    }
     2213    return VINF_SUCCESS;
     2214}
    18932215
    18942216/**
     
    19002222 * @returns VBox status code.
    19012223 */
    1902 int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
    1903 {
     2224int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t cQueues)
     2225{
     2226    RT_NOREF(cQueues);
     2227    /** @todo figure out a way to save cQueues (with SSM versioning) */
     2228
    19042229    LogFunc(("\n"));
    19052230    pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
    1906     pHlp->pfnSSMPutU32(pSSM, VIRTIO_SAVEDSTATE_VERSION);
    1907 
     2231    pHlp->pfnSSMPutU32(pSSM, uVersion);
     2232
     2233    pHlp->pfnSSMPutU32( pSSM, pVirtio->fLegacyDriver);
    19082234    pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
    19092235    pHlp->pfnSSMPutU8(  pSSM, pVirtio->fDeviceStatus);
     
    19322258        AssertRCReturn(rc, rc);
    19332259    }
    1934 
    1935     return VINF_SUCCESS;
    1936 }
    1937 
    1938 /**
    1939  * Called from the FNSSMDEVLOADEXEC function of the device.
    1940  *
    1941  * @param   pVirtio     Pointer to the shared virtio state.
    1942  * @param   pHlp        The ring-3 device helpers.
    1943  * @param   pSSM        The saved state handle.
    1944  * @returns VBox status code.
    1945  */
    1946 int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
    1947 {
    1948     LogFunc(("\n"));
    1949     /*
    1950      * Check the marker and (embedded) version number.
    1951      */
    1952     uint64_t uMarker = 0;
    1953     int rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
    1954     AssertRCReturn(rc, rc);
    1955     if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
    1956         return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
    1957                                         N_("Expected marker value %#RX64 found %#RX64 instead"),
    1958                                         VIRTIO_SAVEDSTATE_MARKER, uMarker);
    1959     uint32_t uVersion = 0;
    1960     rc = pHlp->pfnSSMGetU32(pSSM, &uVersion);
    1961     AssertRCReturn(rc, rc);
    1962     if (uVersion != VIRTIO_SAVEDSTATE_VERSION)
    1963         return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
    1964                                         N_("Unsupported virtio version: %u"), uVersion);
    1965     /*
    1966      * Load the state.
    1967      */
    1968     pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending);
    1969     pHlp->pfnSSMGetU8(   pSSM, &pVirtio->fDeviceStatus);
    1970     pHlp->pfnSSMGetU8(   pSSM, &pVirtio->uConfigGeneration);
    1971     pHlp->pfnSSMGetU8(   pSSM, &pVirtio->uPciCfgDataOff);
    1972     pHlp->pfnSSMGetU8(   pSSM, &pVirtio->uISR);
    1973     pHlp->pfnSSMGetU16(  pSSM, &pVirtio->uVirtqSelect);
    1974     pHlp->pfnSSMGetU32(  pSSM, &pVirtio->uDeviceFeaturesSelect);
    1975     pHlp->pfnSSMGetU32(  pSSM, &pVirtio->uDriverFeaturesSelect);
    1976     pHlp->pfnSSMGetU64(  pSSM, &pVirtio->uDriverFeatures);
    1977 
    1978     for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
    1979     {
    1980         PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
    1981 
    1982         pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc);
    1983         pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail);
    1984         pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed);
    1985         pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uNotifyOffset);
    1986         pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uMsixVector);
    1987         pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uEnable);
    1988         pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uQueueSize);
    1989         pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uAvailIdxShadow);
    1990         pHlp->pfnSSMGetU16(      pSSM, &pVirtq->uUsedIdxShadow);
    1991         rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName,  sizeof(pVirtq->szName));
    1992         AssertRCReturn(rc, rc);
    1993     }
    1994 
    19952260    return VINF_SUCCESS;
    19962261}
     
    20022267
    20032268/**
    2004  * This must be called by the client to handle VM state changes
    2005  * after the client takes care of its device-specific tasks for the state change.
    2006  * (i.e. Reset, suspend, power-off, resume)
     2269 * This must be called by the client to handle VM state changes after the client takes care of its device-specific
     2270 * tasks for the state change (i.e. reset, suspend, power-off, resume)
    20072271 *
    20082272 * @param   pDevIns     The device instance.
     
    20572321/** API Function: See header file */
    20582322int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams,
    2059                      const char *pcszInstance, uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
     2323                     const char *pcszInstance, uint64_t fDevSpecificFeatures, uint32_t fOfferLegacy,
     2324                     void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
    20602325{
    20612326    /*
    2062      * The pVirtio state must be the first member of the shared device instance
    2063      * data, otherwise we cannot get our bearings in the PCI configuration callbacks.
     2327     * Virtio state must be the first member of shared device instance data,
     2328     * otherwise can't get our bearings in PCI config callbacks.
    20642329     */
    20652330    AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
     
    20732338    AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER);
    20742339    AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER);
    2075     AssertReturn(pVirtioCC->pfnGuestVersionHandler,  VERR_INVALID_POINTER);
     2340    AssertReturn(pVirtioCC->pfnFeatureNegotiationComplete,  VERR_INVALID_POINTER);
    20762341    AssertReturn(VIRTQ_SIZE > 0 && VIRTQ_SIZE <= 32768,  VERR_OUT_OF_RANGE); /* VirtIO specification-defined limit */
    20772342
    20782343#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed
    2079        * The legacy MSI support has not been implemented yet
     2344       * VBox legacy MSI support has not been implemented yet
    20802345       */
    20812346# ifdef VBOX_WITH_MSI_DEVICES
     
    20842349#endif
    20852350
    2086     /* Tell the device-specific code that guest is in legacy mode (for now) */
    2087     pVirtioCC->pfnGuestVersionHandler(pVirtio, false /* fModern */);
    2088 
    20892351    /*
    2090      * The host features offered include both device-specific features
    2091      * and reserved feature bits (device independent)
     2352     * Host features (presented as a smörgasbord for guest to select from)
     2353     * include both dev-specific features & reserved dev-independent features (bitmask).
    20922354     */
    20932355    pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
    20942356                             | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
    20952357                             | fDevSpecificFeatures;
     2358
     2359    pVirtio->fLegacyDriver = pVirtio->fOfferLegacy = fOfferLegacy;
    20962360
    20972361    RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
     
    21382402    uint32_t cbRegion = 0;
    21392403
    2140     /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
     2404    /*
     2405     * Common capability (VirtIO 1.0, section 4.1.4.3)
     2406     */
    21412407    pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
    21422408    pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
     
    21522418
    21532419    /*
    2154      * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based on the choice
    2155      * of this implementation to make each queue's uNotifyOffset equal to (VirtqSelect) ordinal
    2156      * value of the queue (different strategies are possible according to spec).
     2420     * Notify capability (VirtIO 1.0, section 4.1.4.4).
     2421     *
     2422     * The size of the spec-defined subregion described by this VirtIO capability is
     2423     * based-on the choice of this implementation to make the notification area of each
     2424     * queue equal to queue's ordinal position (e.g. queue selector value). The VirtIO
     2425     * specification leaves it up to implementation to define queue notification area layout.
    21572426     */
    21582427    pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
     
    21702439    pVirtioCC->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
    21712440
    2172     /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
     2441    /* ISR capability (VirtIO 1.0, section 4.1.4.5)
    21732442     *
    2174      * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
    2175      * of spec shows it as a 32-bit field with upper bits 'reserved'
    2176      * Will take spec's words more literally than the diagram for now.
     2443     * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. The specification example/diagram
     2444     * illustrates this capability as 32-bit field with upper bits 'reserved'. Those depictions
     2445     * differ. The spec's wording, not the diagram, is seen to work in practice.
    21772446     */
    21782447    pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
     
    21892458    pVirtioCC->pIsrCap = pCfg;
    21902459
    2191     /*  PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
    2192      *  This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
    2193      *  by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
    2194      *  values from any region. NOTE: The linux driver not only doesn't use this feature, it will not
    2195      *  even list it as present if uLength isn't non-zero and also 4-byte-aligned as the linux driver is
    2196      *  initializing.
     2460    /*  PCI Cfg capability (VirtIO 1.0, section 4.1.4.7)
     2461     *
     2462     *  This capability facilitates early-boot access to this device (BIOS).
     2463     *  This region isn't page-MMIO mapped. PCI configuration accesses are intercepted,
     2464     *  wherein uBar, uOffset and uLength are modulated by consumers to locate and read/write
     2465     *  values in any part of any region. (NOTE: Linux driver doesn't utilize this feature.
     2466     *  This capability only appears in lspci output on Linux if uLength is non-zero, 4-byte aligned,
     2467     *  during initialization of linux virtio driver).
    21972468     */
    21982469    pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
     
    22112482    if (pVirtioCC->pbDevSpecificCfg)
    22122483    {
    2213         /* Device specific config capability (via VirtIO 1.0, section 4.1.4.6).
     2484        /* Device-specific config capability (VirtIO 1.0, section 4.1.4.6).
     2485         *
    22142486         * Client defines the device-specific config struct and passes size to virtioCoreR3Init()
    2215          * to inform this. */
     2487         * to inform this.
     2488         */
    22162489        pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
    22172490        pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
     
    22632536        return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
    22642537
    2265     /* As a transitional device that supports legacy VirtIO drivers, this VirtIO device generic implementation presents
    2266      * legacy driver interface in I/O space at BAR0. The following maps the common (e.g. device independent)
    2267      * dev config area as well as device-specific dev config area (whose size is passed to init function of this VirtIO
    2268      * generic device code) for access via Port I/O, since legacy drivers (e.g. pre VirtIO 1.0) don't use MMIO callbacks.
    2269      * (See VirtIO 1.1, Section 4.1.4.8).
    2270      */
    2271     rc = PDMDevHlpPCIIORegionCreateIo(pDevIns, VIRTIO_REGION_LEGACY_IO, sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T) + cbDevSpecificCfg,
    2272                                       virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/, pVirtioCC->pcszPortIoName,
    2273                                       NULL /*paExtDescs*/, &pVirtio->hLegacyIoPorts);
    2274     AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register legacy config in I/O space at BAR0 */")));
     2538    if (pVirtio->fOfferLegacy)
     2539    {
     2540        /* As a transitional device that supports legacy VirtIO drivers, this VirtIO device generic implementation presents
     2541         * legacy driver interface in I/O space at BAR0. The following maps the common (e.g. device independent)
     2542         * dev config area as well as device-specific dev config area (whose size is passed to init function of this VirtIO
     2543         * generic device code) for access via Port I/O, since legacy drivers (e.g. pre VirtIO 1.0) don't use MMIO callbacks.
     2544         * (See VirtIO 1.1, Section 4.1.4.8).
     2545         */
     2546        rc = PDMDevHlpPCIIORegionCreateIo(pDevIns, VIRTIO_REGION_LEGACY_IO, sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T) + cbDevSpecificCfg,
     2547                                          virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/, pVirtioCC->pcszPortIoName,
     2548                                          NULL /*paExtDescs*/, &pVirtio->hLegacyIoPorts);
     2549        AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register legacy config in I/O space at BAR0 */")));
     2550    }
    22752551
    22762552    /*  Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
    2277      * 'unknown' device-specific capability without querying the capability to figure
    2278      *  out size, so pad with an extra page
     2553     * 'unknown' device-specific capability without querying the capability to determine size, so pad w/extra page.
    22792554     */
    22802555    rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + VIRTIO_PAGE_SIZE, VIRTIO_PAGE_SIZE),
     
    23042579# endif /* VBOX_WITH_STATISTICS */
    23052580
    2306     virtioResetDevice(pDevIns, pVirtio); /* Reset VirtIO specific state of device */
    2307 
    23082581    return VINF_SUCCESS;
    23092582}
  • trunk/src/VBox/Devices/VirtIO/VirtioCore.h

    r92091 r92939  
    3636# define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) do { } while (0)
    3737#endif
     38
     39/** Marks the start of the virtio saved state (just for sanity). */
     40#define VIRTIO_SAVEDSTATE_MARKER                        UINT64_C(0x1133557799bbddff)
    3841
    3942/** Pointer to the shared VirtIO state. */
     
    5760#define VIRTIO_PAGE_SIZE                 4096                    /**< Page size used by VirtIO specification   */
    5861
    59 
    60 /* Note: The VirtIO specification, particularly rev. 0.95, and clarified in rev 1.0 for transitional devices,
    61          says the page sized used for Queue Size calculations is usually 4096 bytes, but dependent on the
    62          the transport. In an appendix of the 0.95 spec, the 'mmio device', which has not been
    63          implemented by VBox legacy device in VirtualBox, says guest must report the page size. For now
    64          will set page size to a static 4096 based on the original VBox legacy VirtIO implementation which
    65          tied it to PAGE_SIZE which appears to work (or at least good enough for most practical purposes)      */
    66 
    67 
    68 /** The following virtioCoreGCPhysChain*() functions mimic the functionality of the related RT s/g functions,
    69  *  except they work with the data type GCPhys rather than void *
     62/**
     63 * @todo Move the following virtioCoreGCPhysChain*() functions mimic the functionality of the related
     64 *       into some VirtualBox source tree common location and out of this code.
     65 *
     66 *       They behave identically to the S/G utilities in the RT library, except they work with that
     67 *       GCPhys data type specifically instead of void *, to avoid potentially disastrous mismatch
     68 *       between sizeof(void *) and sizeof(GCPhys).
     69 *
    7070 */
    7171typedef struct VIRTIOSGSEG                                      /**< An S/G entry                              */
     
    9191
    9292/**
    93  * VirtIO buffers are descriptor chains (scatter-gather vectors). Each buffer is described
    94  * by the index of its head descriptor, which in optionally chains to another descriptor
    95  * and so on.
    96  *
    97  * Each descriptor, [len, GCPhys] pair in the chain represents either an OUT segment (e.g. guest-to-host)
    98  * or an IN segment (host-to-guest). A VIRTQBUF is created and retured from a call to
    99  * virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet(). That function consolodates
    100  * the VirtIO descriptor chain into a representation, where pSgPhysSend is a GCPhys s/g buffer containing
    101  * all of the OUT descriptors and pSgPhysReturn is a GCPhys s/g buffer containing all of IN descriptors
    102  * to be filled with data on the host to return to theguest.
     93 * VirtIO buffers are descriptor chains (e.g. scatter-gather vectors). A VirtIO buffer is referred to by the index
     94 * of its head descriptor. Each descriptor optionally chains to another descriptor, and so on.
     95 *
     96 * For any given descriptor, each length and GCPhys pair in the chain represents either an OUT segment (e.g. guest-to-host)
     97 * or an IN segment (host-to-guest).
     98 *
     99 * A VIRTQBUF is created and retured from a call to to either virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet().
     100 *
     101 * Those functions consolidate the VirtIO descriptor chain into a single representation where:
     102 *
     103 *     pSgPhysSend    GCPhys s/g buffer containing all of the (VirtIO) OUT descriptors
     104 *     pSgPhysReturn  GCPhys s/g buffer containing all of the (VirtIO)  IN descriptors
     105 *
     106 * The OUT descriptors are data sent from guest to host (dev-specific commands and/or data)
     107 * The IN are to be filled with data (converted to physical) on host, to be returned to guest
     108 *
    103109 */
    104110typedef struct VIRTQBUF
     
    166172static const VIRTIO_FEATURES_LIST s_aCoreFeatures[] =
    167173{
     174    { VIRTIO_F_VERSION_1,               "   VERSION_1            Guest driver supports VirtIO specification V1.0+ (e.g. \"modern\")\n" },
     175    { VIRTIO_F_RING_EVENT_IDX,          "   RING_EVENT_IDX       Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
    168176    { VIRTIO_F_RING_INDIRECT_DESC,      "   RING_INDIRECT_DESC   Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" },
    169     { VIRTIO_F_RING_EVENT_IDX,          "   RING_EVENT_IDX       Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
    170     { VIRTIO_F_VERSION_1,               "   VERSION              Used to detect legacy drivers.\n" },
    171177};
    172 
    173178
    174179#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 )            /**< TBD: Add VIRTIO_F_INDIRECT_DESC           */
     
    202207    kvirtIoVmStateChangedFor32BitHack = 0x7fffffff
    203208} VIRTIOVMSTATECHANGED;
    204 
    205 
    206209
    207210/** @def Virtio Device PCI Capabilities type codes */
     
    305308typedef struct VIRTQUEUE
    306309{
    307     RTGCPHYS                    GCPhysVirtqDesc;                  /**< (MMIO) PhysAdr per-Q desc structs   GUEST */
    308     RTGCPHYS                    GCPhysVirtqAvail;                 /**< (MMIO) PhysAdr per-Q avail structs  GUEST */
    309     RTGCPHYS                    GCPhysVirtqUsed;                  /**< (MMIO) PhysAdr per-Q used structs   GUEST */
    310     uint16_t                    uMsixVector;                      /**< (MMIO) Per-queue vector for MSI-X   GUEST */
    311     uint16_t                    uEnable;                          /**< (MMIO) Per-queue enable             GUEST */
    312     uint16_t                    uNotifyOffset;                    /**< (MMIO) per-Q notify offset          HOST */
    313     uint16_t                    uQueueSize;                       /**< (MMIO) Per-queue size          HOST/GUEST */
     310    RTGCPHYS                    GCPhysVirtqDesc;                  /**< (MMIO) Addr of virtq's desc  ring   GUEST */
     311    RTGCPHYS                    GCPhysVirtqAvail;                 /**< (MMIO) Addr of virtq's avail ring   GUEST */
     312    RTGCPHYS                    GCPhysVirtqUsed;                  /**< (MMIO) Addr of virtq's used  ring   GUEST */
     313    uint16_t                    uMsixVector;                      /**< (MMIO) MSI-X vector                 GUEST */
     314    uint16_t                    uEnable;                          /**< (MMIO) Queue enable flag            GUEST */
     315    uint16_t                    uNotifyOffset;                    /**< (MMIO) Notification offset for queue HOST */
     316    uint16_t                    uQueueSize;                       /**< (MMIO) Size of queue           HOST/GUEST */
    314317    uint16_t                    uAvailIdxShadow;                  /**< Consumer's position in avail ring         */
    315318    uint16_t                    uUsedIdxShadow;                   /**< Consumer's position in used ring          */
     
    317320    char                        szName[32];                       /**< Dev-specific name of queue                */
    318321    bool                        fUsedRingEvent;                   /**< Flags if used idx to notify guest reached */
    319     uint8_t                     padding[3];
     322    bool                        fAttached;                        /**< Flags if dev-specific client attached     */
    320323} VIRTQUEUE, *PVIRTQUEUE;
    321324
     
    331334    uint64_t                    uDeviceFeatures;                  /**< (MMIO) Host features offered         HOST */
    332335    uint64_t                    uDriverFeatures;                  /**< (MMIO) Host features accepted       GUEST */
     336    uint32_t                    fDriverFeaturesWritten;           /**< (MMIO) Host features complete tracking    */
    333337    uint32_t                    uDeviceFeaturesSelect;            /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
    334338    uint32_t                    uDriverFeaturesSelect;            /**< (MMIO) hi/lo select uDriverFeatures GUEST */
     
    343347    uint8_t                     fMsiSupport;                      /**< Flag set if using MSI instead of ISR      */
    344348    uint16_t                    uVirtqSelect;                     /**< (MMIO) queue selector               GUEST */
    345     uint32_t                    fLegacyDriver;                    /**< Set if guest driver < VirtIO 1.0          */
     349    uint32_t                    fLegacyDriver;                    /**< Set if guest drv < VirtIO 1.0 and allowed */
     350    uint32_t                    fOfferLegacy;                     /**< Set at init call from dev-specific code   */
    346351
    347352    /** @name The locations of the capability structures in PCI config space and the BAR.
     
    354359    /** @} */
    355360
    356 
    357 
    358361    IOMMMIOHANDLE               hMmioPciCap;                      /**< MMIO handle of PCI cap. region (\#2)      */
    359362    IOMIOPORTHANDLE             hLegacyIoPorts;                   /**< Handle of legacy I/O port range.          */
    360 
    361363
    362364#ifdef VBOX_WITH_STATISTICS
     
    374376    STAMPROFILEADV              StatWriteRC;                       /** I/O port and MMIO R3 Write profiling      */
    375377#endif
    376 
    377 
    378378    /** @} */
     379
    379380} VIRTIOCORE;
    380381
     
    389390     * @{  */
    390391    /**
    391      * Implementation-specific client callback to report VirtIO version as modern or legacy.
    392      * That's the only meaningful distinction in the VirtIO specification. Beyond that
    393      * versioning is loosely discernable through feature negotiation. There will be two callbacks,
    394      * the first indicates the guest driver is considered legacy VirtIO, as it is critical to
    395      * assume that initially. A 2nd callback will occur during feature negotiation
    396      * which will indicate the guest is modern, if the guest acknowledges VIRTIO_F_VERSION_1,
    397      * feature, or legacy if the feature isn't negotiated. That 2nd callback allows
    398      * the device-specific code to configure its behavior in terms of both guest version and features.
     392     * Implementation-specific client callback to report VirtIO when feature negotiation is
     393     * complete. It should be invoked by the VirtIO core only once.
    399394     *
    400      * @param   pVirtio    Pointer to the shared virtio state.
    401      * @param   fModern    True if guest driver identified itself as modern (e.g. VirtIO 1.0 featured)
     395     * @param   pVirtio           Pointer to the shared virtio state.
     396     * @param   fDriverFeatures   Bitmask of features the guest driver has accepted/declined.
     397     * @param   fLegacy           true if legacy mode offered and until guest driver identifies itself
     398     *                            as modern(e.g. VirtIO 1.0 featured)
    402399     */
    403     DECLCALLBACKMEMBER(void, pfnGuestVersionHandler,(PVIRTIOCORE pVirtio, uint32_t fModern));
     400    DECLCALLBACKMEMBER(void, pfnFeatureNegotiationComplete, (PVIRTIOCORE pVirtio, uint64_t fDriverFeatures, uint32_t fLegacy));
    404401
    405402    /**
     
    436433    DECLCALLBACKMEMBER(int, pfnDevCapWrite,(PPDMDEVINS pDevIns, uint32_t offCap, const void *pvBuf, uint32_t cbWrite));
    437434
    438 
    439435    /**
    440436     * When guest-to-host queue notifications are enabled, the guest driver notifies the host
     
    469465{
    470466    /**
    471      * When guest-to-host queue notifications are enabled, the guest driver notifies the host
    472      * that the avail queue has buffers, and this callback informs the client.
     467     * This callback notifies the device-specific portion of this device implementation (if guest-to-host
     468     * queue notifications are enabled), that the guest driver has notified the host (this device)
     469     * that the VirtIO "avail" ring of a queue has some new s/g buffers added by the guest VirtIO driver.
    473470     *
    474471     * @param   pVirtio    Pointer to the shared virtio state.
     
    488485} VIRTIOCORERC;
    489486
    490 
    491487/** @typedef VIRTIOCORECC
    492488 * The instance data for the current context. */
    493489typedef CTX_SUFF(VIRTIOCORE) VIRTIOCORECC;
    494490
    495 
    496491/** @name API for VirtIO parent device
    497492 * @{ */
     
    502497 * This should be called from PDMDEVREGR3::pfnConstruct.
    503498 *
    504  * @param   pDevIns                 The device instance.
     499 * @param   pDevIns                 Device instance.
    505500 * @param   pVirtio                 Pointer to the shared virtio state.  This
    506501 *                                  must be the first member in the shared
     
    519514int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
    520515                          PVIRTIOPCIPARAMS pPciParams, const char *pcszInstance,
    521                           uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg);
    522 
     516                          uint64_t fDevSpecificFeatures, uint32_t fOfferLegacy, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg);
    523517/**
    524518 * Initiate orderly reset procedure. This is an exposed API for clients that might need it.
     
    532526 * 'Attaches' host device-specific implementation's queue state to host VirtIO core
    533527 * virtqueue management infrastructure, informing the virtio core of the name of the
    534  * queue associated with the queue number. uVirtqNbr is used as the 'handle' for virt queues
    535  * in this API (and is opaquely the index into the VirtIO core's array of queue state).
    536  *
    537  * Virtqueue numbers are VirtIO specification defined (i.e. they are unique within each
    538  * VirtIO device type).
     528 * queue to associate with the queue number.
     529
     530 * Note: uVirtqNbr (ordinal index) is used as the 'handle' for virtqs in this VirtioCore
     531 * implementation's API (as an opaque selector into the VirtIO core's array of queues' states).
     532 *
     533 * Virtqueue numbers are actually VirtIO-specification defined device-specifically
     534 * (i.e. they are unique within each VirtIO device type), but are in some cases scalable
     535 * so only the pattern of queue numbers is defined by the spec and implementations may contain
     536 * a self-determined plurality of queues.
    539537 *
    540538 * @param   pVirtio     Pointer to the shared virtio state.
     
    547545
    548546/**
    549  * Enables or disables a virtq
     547 * Detaches host device-specific implementation's queue state from the host VirtIO core
     548 * virtqueue management infrastructure, informing the VirtIO core that the queue is
     549 * not utilized by the device-specific code.
    550550 *
    551551 * @param   pVirtio     Pointer to the shared virtio state.
    552552 * @param   uVirtqNbr   Virtq number
    553  * @param   fEnable     Flags whether to enable or disable the virtq
    554  *
    555  */
    556 void  virtioCoreVirtqEnable(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable);
     553 * @param   pcszName    Name to give queue
     554 *
     555 * @returns VBox status code.
     556 */
     557int  virtioCoreR3VirtqDetach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
     558
     559/**
     560 * Checks to see whether queue is attached to core.
     561 *
     562 * @param   pVirtio     Pointer to the shared virtio state.
     563 * @param   uVirtqNbr   Virtq number
     564 *
     565 * Returns boolean true or false indicating whether dev-specific reflection
     566 * of queue is attached to core.
     567 */
     568bool  virtioCoreR3VirtqIsAttached(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
     569
     570/**
     571 * Checks to see whether queue is enabled.
     572 *
     573 * @param   pVirtio     Pointer to the shared virtio state.
     574 * @param   uVirtqNbr   Virtq number
     575 *
     576 * Returns boolean true or false indicating core queue enable state.
     577 * There is no API function to enable the queue, because the actual enabling is handled
     578 * by the guest via MMIO.
     579 *
     580 * NOTE: Guest VirtIO driver's claim over this state is overridden (which violates VirtIO 1.0 spec
     581 * in a carefully controlled manner) in the case where the queue MUST be disabled, due to observed
     582 * control queue corruption (e.g. null GCPhys virtq base addr) while restoring legacy-only device's
     583 * (DevVirtioNet.cpp) as a way to flag that the queue is unusable-as-saved and must to be removed.
     584 * That is all handled in the load/save exec logic. Device reset could potentially, depending on
     585 * parameters passed from host VirtIO device to guest VirtIO driver, result in guest re-establishing
     586 * queue, except, in that situation, the queue operational state would be valid.
     587 */
     588bool  virtioCoreR3VirtqIsEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
    557589
    558590/**
    559591 * Enable or disable notification for the specified queue.
    560592 *
    561  * With notification enabled, the guest driver notifies the host device (via MMIO
    562  * to the queue notification offset describe in VirtIO 1.0, 4.1.4.4 "Notification Structure Layout")
    563  * whenever the guest driver adds a new entry to the avail ring of the respective queue.
    564  *
    565  * Note: In the VirtIO world, the device sets flags in the used ring to communicate to the driver how to
    566  * handle notifications for the avail ring and the drivers sets flags in the avail ring to communicate
    567  * to the device how to handle sending interrupts for the used ring.
     593 * When queue notifications are enabled, the guest VirtIO driver notifies host VirtIO device
     594 * (via MMIO, see VirtIO 1.0, 4.1.4.4 "Notification Structure Layout") whenever guest driver adds
     595 * a new s/g buffer to the "avail" ring of the queue.
     596 *
     597 * Note: VirtIO queue layout includes flags the device controls in "used" ring to inform guest
     598 * driver if it should notify host of guest's buffer additions to the "avail" ring, and
     599 * conversely, the guest driver sets flags in the "avail" ring to communicate to host device
     600 * whether or not to interrupt guest when it adds buffers to used ring.
    568601 *
    569602 * @param   pVirtio     Pointer to the shared virtio state.
     
    581614
    582615/**
    583  * Displays the VirtIO spec-related features offered and their accepted/declined status
    584  * by both the VirtIO core and dev-specific device code (which invokes this function).
    585  * The result is a comprehensive list of available features the VirtIO specification
    586  * defines, which ones were actually offered by the device, and which ones were accepted
    587  * by the guest driver, thus providing a legible summary view of the configuration
    588  * the device is operating with.
    589  *
     616 * Displays a well-formated human-readable translation of otherwise inscrutable bitmasks
     617 * that embody features VirtIO specification definitions, indicating: Totality of features
     618 * that can be implemented by host and guest, which features were offered by the host, and
     619 * which were actually accepted by the guest. It displays it as a summary view of the device's
     620 * finalized operational state (host-guest negotiated architecture) in such a way that shows
     621 * which options are available for implementing or enabling.
     622 *
     623 * The non-device-specific VirtIO features list are managed by core API (e.g. implied).
     624 * Only dev-specific features must be passed as parameter.
     625
    590626 * @param   pVirtio     Pointer to the shared virtio state.
    591627 * @param   pHlp        Pointer to the debug info hlp struct
    592  * @param   s_aDevSpecificFeatures
    593  *                      Features specification lists for device-specific implementation
    594  *                      (i.e: net controller, scsi controller ...)
     628 * @param   s_aDevSpecificFeatures  Dev-specific features (virtio-net, virtio-scsi...)
    595629 * @param   cFeatures   Number of features in aDevSpecificFeatures
    596630 */
     
    599633
    600634/*
    601  * Debuging assist feature displays the state of the VirtIO core code, which includes
     635 * Debug-assist utility function to display state of the VirtIO core code, including
    602636 * an overview of the state of all of the queues.
    603637 *
     
    608642 *
    609643 * This is implemented currently to be invoked by the inheriting device-specific code
    610  * (see DevVirtioNet for an example, which receives the debugvm callback directly).
    611  * DevVirtioNet lists the available sub-options if no arguments are provided. In that
     644 * (see the the VirtualBox virtio-net (VirtIO network controller device implementation)
     645 * for an example of code that receive debugvm callback directly).
     646 *
     647 * DevVirtioNet lists available sub-options if no arguments are provided. In that
    612648 * example this virtq info related function is invoked hierarchically when virtio-net
    613649 * displays its device-specific queue info.
     
    629665
    630666/**
    631  * This function is identical to virtioCoreR3VirtqAvailBufGet(), except it doesn't 'consume'
    632  * the buffer from the avail ring of the virtq. The peek operation becomes identical to a get
    633  * operation if virtioCoreR3VirtqAvailRingNext() is called to consume the buffer from the avail ring,
    634  * at which point virtioCoreR3VirtqUsedBufPut() must be called to complete the roundtrip
    635  * transaction by putting the descriptor on the used ring.
    636  *
     667 * This function is identical to virtioCoreR3VirtqAvailBufGet(), *except* it doesn't consume
     668 * peeked buffer from avail ring of the virtq. The function *becomes* identical to the
     669 * virtioCoreR3VirtqAvailBufGet() only if virtioCoreR3VirtqAvailRingNext() is invoked to
     670 * consume buf from the queue's avail ring, followed by invocation of virtioCoreR3VirtqUsedBufPut(),
     671 * to hand host-processed buffer back to guest, which completes guest-initiated virtq buffer circuit.
    637672 *
    638673 * @param   pDevIns     The device instance.
     
    652687/**
    653688 * This function fetches the next buffer (descriptor chain) from the VirtIO "avail" ring of
    654  * indicated queue, and converts the buf's s/g vectors into OUT (e.g. guest-to-host)
     689 * indicated queue, separating the buf's s/g vectors into OUT (e.g. guest-to-host)
    655690 * components and and IN (host-to-guest) components.
    656691 *
    657  * The caller is responsible for GCPhys to host virtual memory conversions. If the
     692 * Caller is responsible for GCPhys to host virtual memory conversions. If the
    658693 * virtq buffer being peeked at is "consumed", virtioCoreR3VirtqAvailRingNext() must
    659  * be called and in that case virtioCoreR3VirtqUsedBufPut() must be called to
    660  * complete the roundtrip virtq transaction.
     694 * be called, and after that virtioCoreR3VirtqUsedBufPut() must be called to
     695 * complete the buffer transfer cycle with the guest.
    661696 *
    662697 * @param   pDevIns     The device instance.
     
    678713
    679714/**
    680  * Fetches a specific descriptor chain using avail ring of indicated queue and converts the descriptor
    681  * chain into its OUT (to device) and IN (to guest) components.
     715 * Fetches a specific descriptor chain using avail ring of indicated queue and converts the
     716 * descriptor chain into its OUT (to device) and IN (to guest) components.
    682717 *
    683718 * The caller is responsible for GCPhys to host virtual memory conversions and *must*
     
    704739/**
    705740 * Returns data to the guest to complete a transaction initiated by virtioCoreR3VirtqAvailBufGet(),
    706  * or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pairs to complete each
    707  * intervening a roundtrip transaction, ultimately putting each descriptor chain pulled from the
    708  * avail ring of a queue onto the used ring of the queue. wherein I/O transactions are always
    709  * initiated by the guest and completed by the host. In other words, for the host to send any
    710  * data to the guest, the guest must provide buffers, for the host to fill, via the avail ring
    711  * of the virtq.
     741 * (or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pair), to complete each
     742 * buffer transfer transaction (guest-host buffer cycle), ultimately moving each descriptor chain
     743 * from the avail ring of a queue onto the used ring of the queue. Note that VirtIO buffer
     744 * transactions are *always* initiated by the guest and completed by the host. In other words,
     745 * for the host to send any I/O related data to the guest (and in some cases configuration data),
     746 * the guest must provide buffers via the virtq's avail ring, for the host to fill.
    712747 *
    713748 * At some some point virtioCoreR3VirtqUsedRingSync() must be called to return data to the guest,
    714  * completing all pending virtioCoreR3VirtqAvailBufPut() transactions that have accumulated since
    715  * the last call to virtioCoreR3VirtqUsedRingSync()
    716 
    717  * @note This does a write-ahead to the used ring of the guest's queue. The data
    718  *       written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync()
    719  *
     749 * completing all pending virtioCoreR3VirtqAvailBufPut() operations that have accumulated since
     750 * the last call to virtioCoreR3VirtqUsedRingSync().
     751
     752 * @note This function effectively performs write-ahead to the used ring of the virtq.
     753 *       Data written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync()
    720754 *
    721755 * @param   pDevIns         The device instance (for reading).
     
    729763 *                          buffer originally pulled from the queue.
    730764 *
    731  * @param   fFence          If true, put up copy fence (memory barrier) after
     765 * @param   fFence          If true (default), put up copy-fence (memory barrier) after
    732766 *                          copying to guest phys. mem.
    733767 *
     
    741775 */
    742776int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, PRTSGBUF pSgVirtReturn,
    743                                  PVIRTQBUF pVirtqBuf, bool fFence);
     777                                 PVIRTQBUF pVirtqBuf, bool fFence = true);
     778
     779
     780/**
     781 * Quicker variant of same-named function (directly above) that it overloads,
     782 * Instead, this variant accepts as input a pointer to a buffer and count,
     783 * instead of S/G buffer thus doesn't have to copy between two S/G buffers and avoids some overhead.
     784 *
     785 * @param   pDevIns         The device instance (for reading).
     786 * @param   pVirtio         Pointer to the shared virtio state.
     787 * @param   uVirtqNbr       Virtq number
     788 * @param   cb              Number of bytes to add to copy to phys. buf.
     789 * @param   pv              Virtual mem buf to copy to phys buf.
     790 * @param   cbEnqueue       How many bytes in packet to enqueue (0 = don't enqueue)
     791 * @param   fFence          If true (default), put up copy-fence (memory barrier) after
     792 *                          copying to guest phys. mem.
     793 *
     794 * @returns VBox status code.
     795 * @retval  VINF_SUCCESS       Success
     796 * @retval  VERR_INVALID_STATE VirtIO not in ready state
     797 * @retval  VERR_NOT_AVAILABLE Virtq is empty
     798 *
     799 * @note    This function will not release any reference to pVirtqBuf.  The
     800 *          caller must take care of that.
     801 */
     802int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, size_t cb, const void *pv,
     803                            PVIRTQBUF pVirtqBuf, uint32_t cbEnqueue, bool fFence = true);
     804
     805
    744806/**
    745807 * Advance index of avail ring to next entry in specified virtq (see virtioCoreR3VirtqAvailBufPeek())
     
    751813
    752814/**
    753  * Checks to see if guest has acknowledged device's VIRTIO_F_VERSION_1 feature.
    754  * If not, it's presumed to be a VirtIO legacy guest driver. Note that legacy drivers
    755  * may start using the device prematurely, as opposed to the rigorously sane protocol
    756  * prescribed by the "modern" VirtIO spec. Early access implies a legacy driver.
    757  * Therefore legacy mode is the assumption until feature negotiation.
     815 * Checks to see if guest has accepted host device's VIRTIO_F_VERSION_1 (i.e. "modern")
     816 * behavioral modeling, indicating guest agreed to comply with the modern VirtIO 1.0+ specification.
     817 * Otherwise unavoidable presumption is that the host device is dealing with legacy VirtIO
     818 * guest drive, thus must be prepared to cope with less mature architecture and behaviors
     819 * from  prototype era of VirtIO. (see comments in PDM-invoked device constructor for more information).
    758820 *
    759821 * @param   pVirtio      Pointer to the virtio state.
     
    761823int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio);
    762824
     825/**
     826 * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers.
     827 * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys
     828 * access functions can't be used. The following wrappers select the memory access method based on whether the
     829 * device is operating in legacy mode or not.
     830 */
     831DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite)
     832{
     833    int rc;
     834    if (virtioCoreIsLegacyMode(pVirtio))
     835        rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
     836    else
     837        rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
     838    return rc;
     839}
     840
     841DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
     842{
     843    int rc;
     844    if (virtioCoreIsLegacyMode(pVirtio))
     845        rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
     846    else
     847        rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
     848    return rc;
     849}
     850
     851/*
     852 * (See comments for corresponding function in sg.h)
     853 */
    763854DECLINLINE(void) virtioCoreGCPhysChainInit(PVIRTIOSGBUF pGcSgBuf, PVIRTIOSGSEG paSegs, size_t cSegs)
    764855{
     
    782873}
    783874
     875/*
     876 * (See comments for corresponding function in sg.h)
     877 */
    784878DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGet(PVIRTIOSGBUF pGcSgBuf, size_t *pcbData)
    785879{
     
    826920}
    827921
     922/*
     923 * (See comments for corresponding function in sg.h)
     924 */
    828925DECLINLINE(void) virtioCoreGCPhysChainReset(PVIRTIOSGBUF pGcSgBuf)
    829926{
     
    843940}
    844941
     942/*
     943 * (See comments for corresponding function in sg.h)
     944 */
    845945DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainAdvance(PVIRTIOSGBUF pGcSgBuf, size_t cbAdvance)
    846946{
     
    860960}
    861961
     962/*
     963 * (See comments for corresponding function in sg.h)
     964 */
    862965DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGetNextSeg(PVIRTIOSGBUF pGcSgBuf, size_t *pcbSeg)
    863966{
     
    871974}
    872975
    873 DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PVIRTIOSGBUF pGcSgBuf)
     976/**
     977 * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment.
     978 *
     979 * @param   pGcSgBuf        Guest Context (GCPhys) S/G buffer to calculate length of
     980 */
     981DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf)
    874982{
    875983    size_t   cb = 0;
    876984    unsigned i  = pGcSgBuf->cSegs;
    877      while (i-- > 0)
    878          cb += pGcSgBuf->paSegs[i].cbSeg;
    879      return cb;
    880  }
    881 
     985    while (i-- > 0)
     986        cb += pGcSgBuf->paSegs[i].cbSeg;
     987    return cb;
     988}
     989
     990/*
     991 * (See comments for corresponding function in sg.h)
     992 */
     993DECLINLINE(size_t) virtioCoreGCPhysChainCalcLengthLeft(PVIRTIOSGBUF pGcSgBuf)
     994{
     995    size_t   cb = pGcSgBuf->cbSegLeft;
     996    unsigned i  = pGcSgBuf->cSegs;
     997    while (i-- > pGcSgBuf->idxSeg + 1)
     998        cb += pGcSgBuf->paSegs[i].cbSeg;
     999    return cb;
     1000}
    8821001#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
    8831002
    8841003/**
    885  * Add some bytes to a virtq (s/g) buffer, converting them from virtual memory to GCPhys
    886  *
    887  * To be performant it is left to the caller to validate the size of the buffer with regard
    888  * to data being pulled from it to avoid overruns/underruns.
     1004 * Convert and append bytes from a virtual-memory simple buffer to VirtIO guest's
     1005 * physical memory described by a buffer pulled form the avail ring of a virtq.
    8891006 *
    8901007 * @param   pVirtio     Pointer to the shared virtio state.
    891  * @param   pVirtqBuf   output: virtq buffer
     1008 * @param   pVirtqBuf   VirtIO buffer to fill
    8921009 * @param   pv          input: virtual memory buffer to receive bytes
    8931010 * @param   cb          number of bytes to add to the s/g buffer.
     
    8951012DECLINLINE(void) virtioCoreR3VirqBufFill(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
    8961013{
    897     uint8_t *pb = (uint8_t *)pv;
    898     size_t cbLim = RT_MIN(pVirtqBuf->cbPhysReturn, cb);
    899     while (cbLim)
     1014    uint8_t *pvBuf = (uint8_t *)pv;
     1015    size_t cbRemain = cb, cbTotal = 0;
     1016    PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
     1017    while (cbRemain)
    9001018    {
    901         size_t cbSeg = cbLim;
    902         RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysReturn, &cbSeg);
    903         PDMDevHlpPCIPhysWrite(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
    904         pb += cbSeg;
    905         cbLim -= cbSeg;
    906         pVirtqBuf->cbPhysSend -= cbSeg;
     1019        uint32_t cbBounded = RT_MIN(pSgPhysReturn->cbSegLeft, cbRemain);
     1020        Assert(cbBounded > 0);
     1021        virtioCoreGCPhysWrite(pVirtio, CTX_SUFF(pVirtio->pDevIns), (RTGCPHYS)pSgPhysReturn->GCPhysCur, pvBuf, cbBounded);
     1022        virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbBounded);
     1023        pvBuf += cbBounded;
     1024        cbRemain -= cbBounded;
     1025        cbTotal += cbBounded;
    9071026    }
    908     LogFunc(("Added %d/%d bytes to %s buffer, head idx: %u (%d bytes remain)\n",
    909              cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
    910              pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysReturn));
    911 }
    912 
    913 /**
    914  * Extract some bytes out of a virtq (s/g) buffer, converting them from GCPhys to virtual memory
    915  *
    916  * To be performant it is left to the caller to validate the size of the buffer with regard
    917  * to data being pulled from it to avoid overruns/underruns.
     1027    LogFunc(("Appended %d bytes to guest phys buf [head: %u]. %d bytes unused in buf.)\n",
     1028             cbTotal, pVirtqBuf->uHeadIdx, virtioCoreGCPhysChainCalcLengthLeft(pSgPhysReturn)));
     1029}
     1030
     1031/**
     1032 * Extract some bytes from of a virtq s/g buffer, converting them from GCPhys space to
     1033 * to ordinary virtual memory (i.e. making data directly accessible to host device code)
     1034 *
     1035 * As a performance optimization, it is left to the caller to validate buffer size.
    9181036 *
    9191037 * @param   pVirtio     Pointer to the shared virtio state.
     
    9371055    LogFunc(("Drained %d/%d bytes from %s buffer, head idx: %u (%d bytes left)\n",
    9381056             cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
    939              pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysSend));
     1057             pVirtqBuf->uHeadIdx, virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)));
    9401058}
    9411059
     
    10161134 * VirtIO implementation to identify this device's operational configuration after features
    10171135 * have been negotiated with guest VirtIO driver. Feature negotiation entails host indicating
    1018  * to guest which features it supports, then guest accepting among those offered which features
     1136 * to guest which features it supports, then guest accepting from among the offered, which features
    10191137 * it will enable. That becomes the agreement between the host and guest. The bitmask containing
    10201138 * virtio core features plus device-specific features is provided as a parameter to virtioCoreR3Init()
     
    10311149
    10321150/**
    1033  * Get the the name of the VM state change associated with the enumeration variable
     1151 * Get name of the VM state change associated with the enumeration variable
    10341152 *
    10351153 * @param enmState       VM state (enumeration value)
     
    10781196/**
    10791197 * Debug assist for any consumer device code
    1080 &
    10811198 * Do a hex dump of memory in guest physical context
    10821199 *
     
    10931210 */
    10941211
    1095 /**
    1096  * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment.
    1097  *
    1098  * @param   pGcSgBuf        Guest Context (GCPhys) S/G buffer to calculate length of
    1099  */
    1100 DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf)
    1101 {
    1102     size_t   cb = 0;
    1103     unsigned i  = pGcSgBuf->cSegs;
    1104     while (i-- > 0)
    1105         cb += pGcSgBuf->paSegs[i].cbSeg;
    1106     return cb;
    1107 }
    1108 
    1109 /**
    1110  * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers.
    1111  * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys
    1112  * access functions can't be used. The following wrappers select the mem access method based on whether the
    1113  * device is operating in legacy mode or not.
    1114  */
    1115 DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite)
    1116 {
    1117     int rc;
    1118     if (virtioCoreIsLegacyMode(pVirtio))
    1119         rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
    1120     else
    1121         rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
    1122     return rc;
    1123 }
    1124 
    1125 DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
    1126 {
    1127     int rc;
    1128     if (virtioCoreIsLegacyMode(pVirtio))
    1129         rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
    1130     else
    1131         rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
    1132     return rc;
    1133 }
    1134 
    11351212/** Misc VM and PDM boilerplate */
    1136 int      virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM);
    1137 int      virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM);
     1213int      virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t cQueues);
     1214int      virtioCoreR3ModernDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uTestVersion, uint32_t cQueues);
     1215int      virtioCoreR3LegacyDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uVirtioLegacy_3_1_Beta);
    11381216void     virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState);
    11391217void     virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC);
     
    11471225 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
    11481226 */
    1149 
    11501227#ifdef LOG_ENABLED
    11511228
     
    12011278 * the memory described by cb and pv.
    12021279 *
    1203  * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
     1280 * cb, pv and fWrite are implicit parameters and must be defined by invoker.
    12041281 */
    12051282#define VIRTIO_DEV_CONFIG_ACCESS(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
     
    12161293/**
    12171294 * Copies bytes into memory described by cb, pv from the specified member field of the config struct.
    1218  * The operation is a nop and logs error if implied parameter fWrite is true.
     1295 * The operation is a NOP, logging an error if an implied parameter, fWrite, is boolean true.
    12191296 *
    12201297 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
     
    12371314 * the memory described by cb and pv.
    12381315 *
    1239  * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
     1316 * cb, pv and fWrite are implicit parameters and must be defined by invoker.
    12401317 */
    12411318#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED(member, uIdx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
     
    12541331 * The operation is a nop and logs error if implied parameter fWrite is true.
    12551332 *
    1256  * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
     1333 * cb, pv and fWrite are implicit parameters and must be defined by invoker.
    12571334 */
    12581335#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED_READONLY(member, uidx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette