VirtualBox

Changeset 82961 in vbox for trunk/src/VBox/Devices/Network


Ignore:
Timestamp:
Feb 3, 2020 4:59:10 PM (5 years ago)
Author:
vboxsync
Message:

Network/DevVirtioNet_1_0.cpp: Device appears and is doing transactions over VirtIO but snags when client setups MAC filter. See BugRef(#8651) Comment #53

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/Network/DevVirtioNet_1_0.cpp

    r82863 r82961  
    6060
    6161#define VIRTIONET_SAVED_STATE_VERSION          UINT32_C(1)
    62 #define VIRTIONET_MAX_QPAIRS                   512
     62#define VIRTIONET_MAX_QPAIRS                   1
    6363#define VIRTIONET_MAX_QUEUES                   (VIRTIONET_MAX_QPAIRS * 2 + 1)
    6464#define VIRTIONET_MAX_FRAME_SIZE               65535 + 18     /**< Max IP pkt size + Ethernet header with VLAN tag  */
     
    6969#define INSTANCE(pState) pState->szInstanceName
    7070#define QUEUE_NAME(a_pVirtio, a_idxQueue) ((a_pVirtio)->virtqState[(a_idxQueue)].szVirtqName)
    71 #define VIRTQNAME(qIdx)           (pThis->aszVirtqNames[qIdx])
    72 #define CBVIRTQNAME(qIdx)         RTStrNLen(VIRTQNAME(qIdx), sizeof(VIRTQNAME(qIdx)))
     71#define VIRTQNAME(idxQueue)           (pThis->aszVirtqNames[idxQueue])
     72#define CBVIRTQNAME(idxQueue)         RTStrNLen(VIRTQNAME(idxQueue), sizeof(VIRTQNAME(idxQueue)))
    7373#define FEATURE_ENABLED(feature)  (pThis->fNegotiatedFeatures & VIRTIONET_F_##feature)
    7474#define FEATURE_DISABLED(feature) (!FEATURE_ENABLED(feature))
    75 #define FEATURE_OFFERED(feature)  (VIRTIONET_HOST_FEATURES_OFFERED & VIRTIONET_F_##feature)
     75#define FEATURE_OFFERED(feature)  VIRTIONET_HOST_FEATURES_OFFERED & VIRTIONET_F_##feature
    7676
    7777#define SET_LINK_UP(pState) \
     
    9292#define RXQIDX_QPAIR(qPairIdx)  (qPairIdx * 2)
    9393#define TXQIDX_QPAIR(qPairIdx)  (qPairIdx * 2 + 1)
    94 #define CTRLQIDX          ((pThis->fNegotiatedFeatures & VIRTIONET_F_MQ) ? ((VIRTIONET_MAX_QPAIRS - 1) * 2 + 2) : (2))
     94#define CTRLQIDX          (FEATURE_ENABLED(MQ) ? ((VIRTIONET_MAX_QPAIRS - 1) * 2 + 2) : 2)
    9595
    9696#define RXVIRTQNAME(qPairIdx)  (pThis->aszVirtqNames[RXQIDX_QPAIR(qPairIdx)])
     
    150150
    151151#define VIRTIONET_HOST_FEATURES_OFFERED \
    152       VIRTIONET_F_MAC                   \
    153     | VIRTIONET_F_STATUS                \
     152      VIRTIONET_F_STATUS                \
     153    | VIRTIONET_F_GUEST_ANNOUNCE        \
     154    | VIRTIONET_F_MAC                   \
    154155    | VIRTIONET_F_CTRL_VQ               \
    155156    | VIRTIONET_F_CTRL_RX               \
     
    220221    uint8_t uClass;                                             /**< class                                          */
    221222    uint8_t uCmd;                                               /**< command                                        */
    222     uint8_t uCmdSpecific;                                       /**< command specific                               */
    223223};
    224224#pragma pack()
     
    280280uint64_t    uOffloads;                                          /**< offloads                                        */
    281281
    282 /** @name Offload State Configuration Flags (VirtIO 1.0, 5.1.6.5.6.1)
    283  * @{  */
    284 //#define VIRTIONET_F_GUEST_CSUM                      1           /**< Guest offloads Chksum                             */
    285 //#define VIRTIONET_F_GUEST_TSO4                      7           /**< Guest offloads TSO4                             */
    286 //#define VIRTIONET_F_GUEST_TSO6                      8           /**< Guest Offloads TSO6                             */
    287 //#define VIRTIONET_F_GUEST_ECN                       9           /**< Guest Offloads ECN                              */
    288 //#define VIRTIONET_F_GUEST_UFO                      10           /**< Guest Offloads UFO                              */
    289 /** @} */
    290 
    291282/** @name Control virtq: Setting Offloads State (VirtIO 1.0, 5.1.6.5.6.1)
    292283 * @{  */
     
    294285#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET         0            /** Apply new offloads configuration                 */
    295286/** @} */
    296 
    297287
    298288/**
     
    374364    bool volatile           fLeafWantsRxBuffers;
    375365
     366    SUPSEMEVENT             hEventRxDescAvail;
     367
    376368    /** Flags whether VirtIO core is in ready state */
    377369    uint8_t                 fVirtioReady;
     
    380372    uint8_t                 fResetting;
    381373
     374    /** Quiescing I/O activity flag */
     375    uint8_t                 fQuiescing;
     376
     377
    382378    /** Promiscuous mode -- RX filter accepts all packets. */
    383379    uint8_t                 fPromiscuous;
     
    414410
    415411    /* Receive-blocking-related fields ***************************************/
    416 
    417     /** EMT: Gets signalled when more RX descriptors become available. */
    418     SUPSEMEVENT             hEventRxDescAvail;
    419412
    420413} VIRTIONET;
     
    520513static DECLCALLBACK(int) virtioNetR3WakeupWorker(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
    521514{
     515    LogFunc(("\n"));
    522516    PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET);
    523517    return PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[(uintptr_t)pThread->pvUser].hEvtProcess);
     
    532526
    533527    AssertReturnVoid(pThis->hEventRxDescAvail != NIL_SUPSEMEVENT);
    534     AssertReturnVoid(ASMAtomicReadBool(&pThis->fLeafWantsRxBuffers));
    535 
    536     Log(("%s Waking downstream driver's Rx buf waiter thread\n", INSTANCE(pThis)));
     528
     529    LogFunc(("%s Waking downstream driver's Rx buf waiter thread\n", INSTANCE(pThis)));
    537530    int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventRxDescAvail);
    538531    AssertRC(rc);
     
    619612DECLINLINE(bool) virtioNetValidateRequiredFeatures(uint32_t fFeatures)
    620613{
    621     uint32_t fGuestChksumRequired = fFeatures & VIRTIONET_F_GUEST_TSO4
    622                                || fFeatures & VIRTIONET_F_GUEST_TSO6
    623                                || fFeatures & VIRTIONET_F_GUEST_UFO;
    624 
    625     uint32_t fHostChksumRequired =  fFeatures & VIRTIONET_F_HOST_TSO4
    626                                || fFeatures & VIRTIONET_F_HOST_TSO6
    627                                || fFeatures & VIRTIONET_F_HOST_UFO;
    628 
    629     uint32_t fCtrlVqRequired =    fFeatures & VIRTIONET_F_CTRL_RX
    630                                || fFeatures & VIRTIONET_F_CTRL_VLAN
    631                                || fFeatures & VIRTIONET_F_GUEST_ANNOUNCE
    632                                || fFeatures & VIRTIONET_F_MQ
    633                                || fFeatures & VIRTIONET_F_CTRL_MAC_ADDR;
     614    LogFunc(("\n"));
     615    uint32_t fGuestChksumRequired =   fFeatures & VIRTIONET_F_GUEST_TSO4
     616                                   || fFeatures & VIRTIONET_F_GUEST_TSO6
     617                                   || fFeatures & VIRTIONET_F_GUEST_UFO;
     618
     619    uint32_t fHostChksumRequired =    fFeatures & VIRTIONET_F_HOST_TSO4
     620                                   || fFeatures & VIRTIONET_F_HOST_TSO6
     621                                   || fFeatures & VIRTIONET_F_HOST_UFO;
     622
     623    uint32_t fCtrlVqRequired =        fFeatures & VIRTIONET_F_CTRL_RX
     624                                   || fFeatures & VIRTIONET_F_CTRL_VLAN
     625                                   || fFeatures & VIRTIONET_F_GUEST_ANNOUNCE
     626                                   || fFeatures & VIRTIONET_F_MQ
     627                                   || fFeatures & VIRTIONET_F_CTRL_MAC_ADDR;
    634628
    635629    if (fGuestChksumRequired && !(fFeatures & VIRTIONET_F_GUEST_CSUM))
     
    653647    return true;
    654648}
    655 
    656 
    657 
    658649
    659650/*********************************************************************************************************************************
     
    675666                 || offConfig == RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) + sizeof(uint32_t)) \
    676667             && cb == sizeof(uint32_t)) \
    677          || (   offConfig == RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \
    678              && cb == RT_SIZEOFMEMB(VIRTIONET_CONFIG_T, member)) )
     668         || (   offConfig >= RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \
     669             && offConfig + cb <= RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \
     670                                + RT_SIZEOFMEMB(VIRTIONET_CONFIG_T, member)) )
     671
     672/*         || (   offConfig == RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \
     673               && cb == RT_SIZEOFMEMB(VIRTIONET_CONFIG_T, member)) )
     674*/
    679675
    680676#ifdef LOG_ENABLED
     
    745741static DECLCALLBACK(int) virtioNetR3DevCapRead(PPDMDEVINS pDevIns, uint32_t uOffset, void *pv, uint32_t cb)
    746742{
     743    PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET);
     744
     745    LogFunc(("%s: uOffset: %d, cb: %d\n",  INSTANCE(pThis), uOffset, cb));
    747746    return virtioNetR3CfgAccessed(PDMDEVINS_2_DATA(pDevIns, PVIRTIONET), uOffset, pv, cb, false /*fRead*/);
    748747}
     
    753752static DECLCALLBACK(int) virtioNetR3DevCapWrite(PPDMDEVINS pDevIns, uint32_t uOffset, const void *pv, uint32_t cb)
    754753{
     754    PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET);
     755
     756    LogFunc(("%s: uOffset: %d, cb: %d: %.*Rhxs\n", INSTANCE(pThis), uOffset, cb, RT_MAX(cb, 8) , pv));
    755757    return virtioNetR3CfgAccessed(PDMDEVINS_2_DATA(pDevIns, PVIRTIONET), uOffset, (void *)pv, cb, true /*fWrite*/);
    756758}
     
    799801
    800802    virtioNetR3SetVirtqNames(pThis);
    801     for (int qIdx = 0; qIdx < pThis->cVirtQueues; qIdx++)
    802         pHlp->pfnSSMGetBool(pSSM, &pThis->afQueueAttached[qIdx]);
     803
     804    for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++)
     805        pHlp->pfnSSMGetBool(pSSM, &pThis->afQueueAttached[idxQueue]);
    803806
    804807    /*
     
    810813     * Nudge queue workers
    811814     */
    812     for (int qIdx = 0; qIdx < pThis->cVirtqPairs; qIdx++)
    813     {
    814         if (pThis->afQueueAttached[qIdx])
     815    for (int idxQueue = 0; idxQueue < pThis->cVirtqPairs; idxQueue++)
     816    {
     817        if (pThis->afQueueAttached[idxQueue])
    815818        {
    816             LogFunc(("Waking %s worker.\n", VIRTQNAME(qIdx)));
    817             rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[qIdx].hEvtProcess);
     819            LogFunc(("Waking %s worker.\n", VIRTQNAME(idxQueue)));
     820            rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[idxQueue].hEvtProcess);
    818821            AssertRCReturn(rc, rc);
    819822        }
     
    829832    PVIRTIONET     pThis   = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET);
    830833    PVIRTIONETCC   pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC);
    831     PCPDMDEVHLPR3   pHlp    = pDevIns->pHlpR3;
     834    PCPDMDEVHLPR3  pHlp    = pDevIns->pHlpR3;
    832835
    833836    RT_NOREF(pThisCC);
     
    835838    LogFunc(("SAVE EXEC!!\n"));
    836839
    837     for (int qIdx = 0; qIdx < pThis->cVirtQueues; qIdx++)
    838         pHlp->pfnSSMPutBool(pSSM, pThis->afQueueAttached[qIdx]);
     840    for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++)
     841        pHlp->pfnSSMPutBool(pSSM, pThis->afQueueAttached[idxQueue]);
    839842
    840843    /*
     
    857860    PVIRTIONETCC   pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC);
    858861
    859 //    if (ASMAtomicReadu(&pThis->cActiveReqs))
    860 //        return false;
     862    /** @todo create test to conclusively determine I/O has been quiesced and add it here: */
    861863
    862864    LogFunc(("Device I/O activity quiesced: %s\n",
     
    885887    pThisCC->enmQuiescingFor = enmQuiescingFor;
    886888
     889    /*
     890     * Wake downstream network driver thread that's waiting for Rx buffers to be available
     891     * to tell it that's going to happen...
     892     */
     893    virtioNetR3WakeupRxBufWaiter(pDevIns);
     894
    887895    PDMDevHlpSetAsyncNotification(pDevIns, virtioNetR3DeviceQuiesced);
    888896
    889897    /* If already quiesced invoke async callback.  */
    890 //    if (!ASMAtomicReadu(&pThis->cActiveReqs))
    891 //        PDMDevHlpAsyncNotificationCompleted(pDevIns);
     898    if (!ASMAtomicReadBool(&pThis->fLeafWantsRxBuffers))
     899        PDMDevHlpAsyncNotificationCompleted(pDevIns);
     900
     901    /** @todo make sure Rx and Tx are really quiesced (how to we synchronize w/downstream driver?) */
    892902}
    893903
     
    915925    RT_NOREF2(pThis, pThisCC);
    916926
    917     /* VM is halted, thus no new I/O being dumped into queues by the guest.
    918      * Workers have been flagged to stop pulling stuff already queued-up by the guest.
    919      * Now tell lower-level to to suspend reqs (for example, DrvVD suspends all reqs
    920      * on its wait queue, and we will get a callback as the state changes to
    921      * suspended (and later, resumed) for each).
    922      */
    923 
    924     virtioNetR3WakeupRxBufWaiter(pDevIns);
    925 
    926927    virtioNetR3QuiesceDevice(pDevIns, enmType);
    927 
    928928}
    929929
     
    956956
    957957    pThisCC->fQuiescing = false;
     958
     959
     960    /** @todo implement this function properly */
    958961
    959962    /* Wake worker threads flagged to skip pulling queue entries during quiesce
     
    962965     */
    963966/*
    964     for (uint16_t qIdx = 0; qIdx < VIRTIONET_REQ_QUEUE_CNT; qIdx++)
    965     {
    966         if (ASMAtomicReadBool(&pThisCC->aWorkers[qIdx].fSleeping))
     967    for (uint16_t idxQueue = 0; idxQueue < VIRTIONET_REQ_QUEUE_CNT; idxQueue++)
     968    {
     969        if (ASMAtomicReadBool(&pThisCC->aWorkers[idxQueue].fSleeping))
    967970        {
    968             Log6Func(("waking %s worker.\n", VIRTQNAME(qIdx)));
    969             int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[qIdx].hEvtProcess);
     971            Log6Func(("waking %s worker.\n", VIRTQNAME(idxQueue)));
     972            int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[idxQueue].hEvtProcess);
    970973            AssertRC(rc);
    971974        }
     
    976979}
    977980
    978 
    979981#ifdef IN_RING3
    980 
    981982
    982983DECLINLINE(uint16_t) virtioNetR3Checkum16(const void *pvBuf, size_t cb)
     
    10371038
    10381039/**
    1039  * Check if the device can receive data now.
    1040  * This must be called before the pfnRecieve() method is called.
     1040 * Check whether specific queue is ready and has Rx buffers (virtqueue descriptors)
     1041 * available. This must be called before the pfnRecieve() method is called.
    10411042 *
    10421043 * @remarks As a side effect this function enables queue notification
     
    10471048 * @thread  RX
    10481049 */
    1049 static int virtioNetR3IsRxQueuePrimed(PPDMDEVINS pDevIns, PVIRTIONET pThis)
     1050static int virtioNetR3IsRxQueuePrimed(PPDMDEVINS pDevIns, PVIRTIONET pThis, uint16_t idxQueue)
    10501051{
    10511052    int rc;
    10521053
    1053     LogFlowFunc(("%s:\n", INSTANCE(pThis)));
     1054    LogFlowFunc(("%s: idxQueue = %d\n", INSTANCE(pThis), idxQueue));
    10541055
    10551056    if (!pThis->fVirtioReady)
    10561057        rc = VERR_NET_NO_BUFFER_SPACE;
    10571058
    1058     else if (!virtioCoreIsQueueEnabled(&pThis->Virtio, RXQIDX_QPAIR(0)))
     1059    else if (!virtioCoreIsQueueEnabled(&pThis->Virtio, RXQIDX_QPAIR(idxQueue)))
    10591060        rc = VERR_NET_NO_BUFFER_SPACE;
    10601061
    1061     else if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(0)))
    1062     {
    1063         virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR(0), true);
     1062    else if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue)))
     1063    {
     1064        virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR(idxQueue), true);
    10641065        rc = VERR_NET_NO_BUFFER_SPACE;
    10651066    }
    10661067    else
    10671068    {
    1068         virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR(0), false);
     1069        virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR(idxQueue), false);
    10691070        rc = VINF_SUCCESS;
    10701071    }
    10711072
    1072     LogFlowFunc(("%s: -> %Rrc\n", INSTANCE(pThis), rc));
     1073    LogFlowFunc(("%s: idxQueue = %d -> %Rrc\n", INSTANCE(pThis), idxQueue, rc));
    10731074    return rc;
     1075}
     1076
     1077/*
     1078 * Returns true if VirtIO core and device are in a running and operational state
     1079 */
     1080DECLINLINE(bool) virtioNetAllSystemsGo(PVIRTIONET pThis, PPDMDEVINS pDevIns)
     1081{
     1082    if (!pThis->fVirtioReady)
     1083        return false;
     1084
     1085    if (pThis->fQuiescing)
     1086        return false;
     1087
     1088    VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
     1089    if (!RT_LIKELY(enmVMState == VMSTATE_RUNNING || enmVMState == VMSTATE_RUNNING_LS))
     1090        return false;
     1091
     1092    return true;
    10741093}
    10751094
     
    10831102    PVIRTIONET   pThis   = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET);
    10841103
    1085     LogFlowFunc(("%s: timeoutMs=%u\n", INSTANCE(pThis), timeoutMs));
     1104    if (!virtioNetAllSystemsGo(pThis, pDevIns))
     1105    {
     1106        LogFunc(("VirtIO not ready\n"));
     1107        return VERR_NET_NO_BUFFER_SPACE;
     1108    }
    10861109
    10871110    if (!timeoutMs)
    10881111        return VERR_NET_NO_BUFFER_SPACE;
    10891112
     1113    LogFlowFunc(("%s: timeoutMs=%u\n", INSTANCE(pThis), timeoutMs));
     1114
    10901115    ASMAtomicXchgBool(&pThis->fLeafWantsRxBuffers, true);
    10911116
    1092     VMSTATE enmVMState;
    1093     while (RT_LIKELY(  (enmVMState = PDMDevHlpVMState(pDevIns)) == VMSTATE_RUNNING
    1094                      || enmVMState == VMSTATE_RUNNING_LS))
    1095     {
    1096 
    1097         if (RT_SUCCESS(virtioNetR3IsRxQueuePrimed(pDevIns, pThis)))
     1117    /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue
     1118              selection algorithm feasible or even necessary to prevent starvation? */
     1119    do {
     1120        for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue += 2) /* Skip odd queue #'s because Rx queues only! */
    10981121        {
    1099             LogFunc(("Rx bufs now available, releasing waiter..."));
    1100             return VINF_SUCCESS;
     1122            if (!IS_RX_QUEUE(idxQueue))
     1123                continue;
     1124
     1125            if (RT_SUCCESS(virtioNetR3IsRxQueuePrimed(pDevIns, pThis, idxQueue)))
     1126            {
     1127                LogFunc(("Rx bufs now available, releasing waiter..."));
     1128                return VINF_SUCCESS;
     1129            }
    11011130        }
    1102         LogFunc(("%s: Starved for guest Rx bufs, waiting %u ms ...\n", INSTANCE(pThis), timeoutMs));
    1103 
    1104         int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEventRxDescAvail, timeoutMs);
    1105         if (RT_FAILURE(rc) && rc != VERR_TIMEOUT && rc != VERR_INTERRUPTED)
     1131        LogFunc(("%s: Starved for guest Rx bufs, waiting %u ms ...\n",
     1132                 INSTANCE(pThis), timeoutMs));
     1133
     1134        int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns,
     1135                        pThis->hEventRxDescAvail, timeoutMs);
     1136
     1137        if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
     1138            continue;
     1139
     1140        if (RT_FAILURE(rc))
    11061141            RTThreadSleep(1);
    1107     }
     1142
     1143    } while (virtioNetAllSystemsGo(pThis, pDevIns));
     1144
    11081145    ASMAtomicXchgBool(&pThis->fLeafWantsRxBuffers, false);
    11091146
     
    12031240static bool virtioNetR3AddressFilter(PVIRTIONET pThis, const void *pvBuf, size_t cb)
    12041241{
     1242    LogFunc(("\n"));
     1243
    12051244    if (pThis->fPromiscuous)
    12061245        return true;
     
    12461285    return false;
    12471286}
    1248 
    1249 
    1250 
    12511287
    12521288/**
     
    12611297 * @param   pvBuf           The available data.
    12621298 * @param   cb              Number of bytes available in the buffer.
     1299 * @param   pGso            Pointer to Global Segmentation Offload structure
     1300 * @param   idxQueue            Queue to work with
    12631301 * @thread  RX
    12641302 */
    1265 
    1266 /*  static void virtioNetR3Receive(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC, uint16_t qIdx, PVIRTIO_DESC_CHAIN_T pDescChain)
    1267 {
    1268     RT_NOREF5(pDevIns, pThis, pThisCC, qIdx, pDescChain);
    1269 }
    1270 */
    12711303static int virtioNetR3HandleRxPacket(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC,
    1272                                 const void *pvBuf, size_t cb, PCPDMNETWORKGSO pGso)
     1304                                const void *pvBuf, size_t cb, PCPDMNETWORKGSO pGso, uint16_t idxQueue)
    12731305{
    12741306    RT_NOREF(pThisCC);
    12751307
     1308    LogFunc(("\n"));
    12761309    VIRTIONET_PKT_HDR_T rxPktHdr;
    12771310
     
    13271360    {
    13281361        PVIRTIO_DESC_CHAIN_T pDescChain;
    1329         int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(0), &pDescChain, true);
     1362        int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue), &pDescChain, true);
    13301363
    13311364        AssertRC(rc == VINF_SUCCESS || rc == VERR_NOT_AVAILABLE);
     
    13401373         * Assert it to reduce complexity. Robust solution would entail finding seg idx and offset of
    13411374         * virtio_net_header.num_buffers (to update field *after* hdr & pkts copied to gcPhys) */
     1375
    13421376        AssertMsgReturn(pDescChain->pSgPhysReturn->paSegs[0].cbSeg >= sizeof(VIRTIONET_PKT_HDR_T),
    13431377                        ("Desc chain's first seg has insufficient space for pkt header!\n"),
     
    13601394            if (cSegs++ >= cSegsAllocated)
    13611395            {
    1362                 cSegsAllocated <<= 1;
     1396                cSegsAllocated <<= 1; /* double the allocation size */
    13631397                paVirtSegsToGuest = (PRTSGSEG)RTMemRealloc(paVirtSegsToGuest, sizeof(RTSGSEG) * cSegsAllocated);
    13641398                AssertReturn(paVirtSegsToGuest, VERR_NO_MEMORY);
     
    13701404
    13711405        /* Append remaining Rx pkt or as much current desc chain has room for */
    1372         uint32_t uboundedSize = RT_MIN(cb, cbDescChainLeft);
    1373         paVirtSegsToGuest[cSegs].cbSeg = uboundedSize;
     1406        uint32_t cbLim = RT_MIN(cb, cbDescChainLeft);
     1407        paVirtSegsToGuest[cSegs].cbSeg = cbLim;
    13741408        paVirtSegsToGuest[cSegs++].pvSeg = ((uint8_t *)pvBuf) + uOffset;
    1375         uOffset += uboundedSize;
     1409        uOffset += cbLim;
    13761410        cDescs++;
    13771411
    13781412        RTSgBufInit(pVirtSegBufToGuest, paVirtSegsToGuest, cSegs);
    13791413
    1380         virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(0),
     1414        virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue),
    13811415                             pVirtSegBufToGuest, pDescChain, true);
    13821416
     
    13921426                      rc);
    13931427
    1394     virtioCoreQueueSync(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(0));
     1428    virtioCoreQueueSync(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue));
    13951429
    13961430    for (int i = 0; i < 2; i++)
     
    14181452    PPDMDEVINS      pDevIns = pThisCC->pDevIns;
    14191453    PVIRTIONET      pThis   = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET);
     1454
     1455    LogFunc(("\n"));
     1456
     1457    if (!pThis->fVirtioReady)
     1458    {
     1459        LogRelFunc(("VirtIO not ready, aborting downstream receive\n"));
     1460        return VERR_INTERRUPTED;
     1461    }
     1462    if (pThis->fQuiescing)
     1463    {
     1464        LogRelFunc(("Quiescing I/O for suspend or power off, aborting downstream receive\n"));
     1465        return VERR_INTERRUPTED;
     1466    }
    14201467
    14211468    if (pGso)
     
    14461493    }
    14471494
    1448     Log2Func(("pvBuf=%p cb=%u pGso=%p\n", INSTANCE(pThis), pvBuf, cb, pGso));
    1449 
    1450     int rc = virtioNetR3IsRxQueuePrimed(pDevIns, pThis);
    1451     if (RT_FAILURE(rc))
    1452         return rc;
    1453 
    1454     /* Drop packets if VM is not running or cable is disconnected. */
    1455     VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
    1456     if ((   enmVMState != VMSTATE_RUNNING
    1457          && enmVMState != VMSTATE_RUNNING_LS)
    1458         || !(pThis->virtioNetConfig.uStatus & VIRTIONET_F_LINK_UP))
    1459         return VINF_SUCCESS;
    1460 
    1461     virtioNetR3SetReadLed(pThisCC, true);
    1462     if (virtioNetR3AddressFilter(pThis, pvBuf, cb))
    1463     {
    1464         rc = virtioNetR3HandleRxPacket(pDevIns, pThis, pThisCC, pvBuf, cb, pGso);
    1465     }
    1466     virtioNetR3SetReadLed(pThisCC, false);
    1467     return rc;
     1495    Log2Func(("%s pvBuf=%p cb=%u pGso=%p\n", INSTANCE(pThis), pvBuf, cb, pGso));
     1496
     1497    /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue
     1498              selection algorithm feasible or even necessary to prevent starvation? */
     1499
     1500    for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue += 2) /* Skip odd queue #'s because Rx queues only */
     1501    {
     1502        if (RT_SUCCESS(!virtioNetR3IsRxQueuePrimed(pDevIns, pThis, idxQueue)))
     1503        {
     1504            /* Drop packets if VM is not running or cable is disconnected. */
     1505            if (!virtioNetAllSystemsGo(pThis, pDevIns) || !IS_LINK_UP(pThis))
     1506                return VINF_SUCCESS;
     1507
     1508            virtioNetR3SetReadLed(pThisCC, true);
     1509
     1510            int rc = VINF_SUCCESS;
     1511            if (virtioNetR3AddressFilter(pThis, pvBuf, cb))
     1512                rc = virtioNetR3HandleRxPacket(pDevIns, pThis, pThisCC, pvBuf, cb, pGso, idxQueue);
     1513
     1514            virtioNetR3SetReadLed(pThisCC, false);
     1515
     1516            return rc;
     1517        }
     1518    }
     1519    return VERR_INTERRUPTED;
    14681520}
    14691521
     
    14751527    return virtioNetR3NetworkDown_ReceiveGso(pInterface, pvBuf, cb, NULL);
    14761528}
    1477 
    1478 
    14791529
    14801530/* Read physical bytes from the out segment(s) of descriptor chain */
     
    14821532{
    14831533    uint8_t *pb = (uint8_t *)pv;
    1484     uint16_t cbMin = RT_MIN(pDescChain->cbPhysSend, cb);
    1485     while (cbMin)
    1486     {
    1487         size_t cbSeg = cbMin;
     1534    uint16_t cbLim = RT_MIN(pDescChain->cbPhysSend, cb);
     1535    while (cbLim)
     1536    {
     1537        size_t cbSeg = cbLim;
    14881538        RTGCPHYS GCPhys = virtioCoreSgBufGetNextSegment(pDescChain->pSgPhysSend, &cbSeg);
    14891539        PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pb, cbSeg);
    14901540        pb += cbSeg;
    1491         cbMin -= cbSeg;
    1492     }
    1493     LogFunc(("Pulled %d bytes out of %d bytes requested from descriptor chain\n", cbMin, cb));
    1494 }
    1495 
     1541        cbLim -= cbSeg;
     1542        pDescChain->cbPhysSend -= cbSeg;
     1543    }
     1544    LogFunc(("Pulled %d / %d bytes from desc chain (%d bytes left in desc chain)\n",
     1545             cb - cbLim, cb, pDescChain->cbPhysSend));
     1546}
    14961547
    14971548static uint8_t virtioNetR3CtrlRx(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC,
     
    15011552#define LOG_VIRTIONET_FLAG(fld) LogFunc(("%s = %d\n", #fld, pThis->fld))
    15021553
    1503     LogFunc((""));
     1554    LogFunc(("Processing CTRL Rx command\n"));
    15041555    switch(pCtrlPktHdr->uCmd)
    15051556    {
     
    15671618                                  PVIRTIONET_CTRL_HDR_T pCtrlPktHdr, PVIRTIO_DESC_CHAIN_T pDescChain)
    15681619{
    1569 RT_NOREF(pThisCC);
     1620    LogFunc(("Processing CTRL MAC command\n"));
     1621
     1622    RT_NOREF(pThisCC);
    15701623
    15711624#define ASSERT_CTRL_ADDR_SET(v) \
    1572     AssertMsgReturn((v), ("DESC chain too small to process CTRL_MAC_ADDR_SET cmd"), VIRTIONET_ERROR)
     1625    AssertMsgReturn((v), ("DESC chain too small to process CTRL_MAC_ADDR_SET cmd\n"), VIRTIONET_ERROR)
    15731626
    15741627#define ASSERT_CTRL_TABLE_SET(v) \
    1575     AssertMsgReturn((v), ("DESC chain too small to process CTRL_MAC_TABLE_SET cmd"), VIRTIONET_ERROR)
     1628    AssertMsgReturn((v), ("DESC chain too small to process CTRL_MAC_TABLE_SET cmd\n"), VIRTIONET_ERROR)
    15761629
    15771630    AssertMsgReturn(pDescChain->cbPhysSend >= sizeof(*pCtrlPktHdr),
     
    15791632                   VIRTIONET_ERROR);
    15801633
    1581     size_t cbRemaining = pDescChain->cbPhysSend - sizeof(*pCtrlPktHdr);
    1582 
     1634    size_t cbRemaining = pDescChain->cbPhysSend;
     1635Log6Func(("initial:cbRemaining=%d pDescChain->cbPhysSend=%d sizeof(*pCtrlPktHdr)=%d\n",
     1636         cbRemaining, pDescChain->cbPhysSend, sizeof(*pCtrlPktHdr)));
    15831637    switch(pCtrlPktHdr->uCmd)
    15841638    {
     
    15981652            virtioNetR3PullChain(pDevIns, pDescChain, &cMacs, sizeof(cMacs));
    15991653            cbRemaining -= sizeof(cMacs);
    1600             uint32_t cbMacs = cMacs * sizeof(RTMAC);
    1601             ASSERT_CTRL_TABLE_SET(cbRemaining >= cbMacs);
    1602             virtioNetR3PullChain(pDevIns, pDescChain, &pThis->aMacUnicastFilter, cbMacs);
    1603             cbRemaining -= cbMacs;
     1654            Log6Func(("Guest provided %d unicast MAC Table entries\n", cMacs));
     1655            if (cMacs)
     1656            {
     1657                uint32_t cbMacs = cMacs * sizeof(RTMAC);
     1658                ASSERT_CTRL_TABLE_SET(cbRemaining >= cbMacs);
     1659                virtioNetR3PullChain(pDevIns, pDescChain, &pThis->aMacUnicastFilter, cbMacs);
     1660                cbRemaining -= cbMacs;
     1661            }
    16041662            pThis->cUnicastFilterMacs = cMacs;
    16051663
     
    16081666            virtioNetR3PullChain(pDevIns, pDescChain, &cMacs, sizeof(cMacs));
    16091667            cbRemaining -= sizeof(cMacs);
    1610             cbMacs = cMacs * sizeof(RTMAC);
    1611             ASSERT_CTRL_TABLE_SET(cbRemaining >= cbMacs);
    1612             virtioNetR3PullChain(pDevIns, pDescChain, &pThis->aMacMulticastFilter, cbMacs);
    1613             cbRemaining -= cbMacs;
     1668            Log6Func(("Guest provided %d multicast MAC Table entries\n", cMacs));
     1669            if (cMacs)
     1670            {
     1671                uint32_t cbMacs = cMacs * sizeof(RTMAC);
     1672                ASSERT_CTRL_TABLE_SET(cbRemaining >= cbMacs);
     1673                virtioNetR3PullChain(pDevIns, pDescChain, &pThis->aMacMulticastFilter, cbMacs);
     1674                cbRemaining -= cbMacs;
     1675            }
    16141676            pThis->cMulticastFilterMacs = cMacs;
    16151677
     
    16321694                                   PVIRTIONET_CTRL_HDR_T pCtrlPktHdr, PVIRTIO_DESC_CHAIN_T pDescChain)
    16331695{
     1696    LogFunc(("Processing CTRL VLAN command\n"));
     1697
    16341698    RT_NOREF(pThisCC);
    16351699
     
    16591723                            PVIRTIO_DESC_CHAIN_T pDescChain)
    16601724{
    1661 
    1662 #define SIZEOF_SEND(descChain, ctrlHdr) RT_MIN(descChain->cbPhysSend, sizeof(ctrlHdr))
     1725    LogFunc(("Received CTRL packet from guest\n"));
    16631726
    16641727    if (pDescChain->cbPhysSend < 2)
     
    16801743    AssertPtrReturnVoid(pCtrlPktHdr);
    16811744
    1682     AssertMsgReturnVoid(pDescChain->cbPhysSend >= sizeof(*pCtrlPktHdr),
     1745    AssertMsgReturnVoid(pDescChain->cbPhysSend >= sizeof(VIRTIONET_CTRL_HDR_T),
    16831746                        ("DESC chain too small for CTRL pkt header"));
    16841747
    1685     virtioNetR3PullChain(pDevIns, pDescChain, pCtrlPktHdr, SIZEOF_SEND(pDescChain, VIRTIONET_CTRL_HDR_T));
     1748    virtioNetR3PullChain(pDevIns, pDescChain, pCtrlPktHdr,
     1749                         RT_MIN(pDescChain->cbPhysSend, sizeof(VIRTIONET_CTRL_HDR_T)));
     1750
     1751    Log6Func(("CTRL pkt hdr: class=%d cmd=%d\n", pCtrlPktHdr->uClass, pCtrlPktHdr->uCmd));
    16861752
    16871753    uint8_t uAck;
     
    16971763            uAck = virtioNetR3CtrlVlan(pDevIns, pThis, pThisCC, pCtrlPktHdr, pDescChain);
    16981764            break;
     1765        case VIRTIONET_CTRL_ANNOUNCE:
     1766            uAck = VIRTIONET_OK;
     1767            if (FEATURE_DISABLED(STATUS) || FEATURE_DISABLED(GUEST_ANNOUNCE))
     1768            {
     1769                LogFunc(("Ignoring CTRL class VIRTIONET_CTRL_ANNOUNCE. Not configured to handle it\n"));
     1770                virtioNetPrintFeatures(pThis, pThis->fNegotiatedFeatures, "Features");
     1771                break;
     1772            }
     1773            if (pCtrlPktHdr->uCmd != VIRTIONET_CTRL_ANNOUNCE_ACK)
     1774            {
     1775                LogFunc(("Ignoring CTRL class VIRTIONET_CTRL_ANNOUNCE. Unrecognized uCmd\n"));
     1776                break;
     1777            }
     1778            pThis->virtioNetConfig.uStatus &= ~VIRTIONET_F_ANNOUNCE;
     1779            Log6Func(("Clearing VIRTIONET_F_ANNOUNCE in config status\n"));
     1780            break;
     1781
    16991782        default:
     1783            LogRelFunc(("Unrecognized CTRL pkt hdr class (%d)\n", pCtrlPktHdr->uClass));
    17001784            uAck = VIRTIONET_ERROR;
    17011785    }
    17021786
    1703     int cSegs = 2;
     1787    /* Currently CTRL pkt header just returns ack, but keeping segment logic generic/flexible
     1788     * in case that changes to make adapting more straightforward */
     1789    int cSegs = 1;
    17041790
    17051791    /* Return CTRL packet Ack byte (result code) to guest driver */
    1706     PRTSGSEG paSegs = (PRTSGSEG)RTMemAllocZ(sizeof(RTSGSEG) * cSegs);
    1707     AssertMsgReturnVoid(paSegs, ("Out of memory"));
    1708 
    1709     RTSGSEG aSegs[] = { { &uAck, sizeof(uAck) } };
    1710     memcpy(paSegs, aSegs, sizeof(aSegs));
    1711 
    1712     PRTSGBUF pSegBuf = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF));
    1713     AssertMsgReturnVoid(pSegBuf, ("Out of memory"));
    1714 
     1792    PRTSGSEG paReturnSegs = (PRTSGSEG)RTMemAllocZ(sizeof(RTSGSEG));
     1793    AssertMsgReturnVoid(paReturnSegs, ("Out of memory"));
     1794
     1795    RTSGSEG aStaticSegs[] = { { &uAck, sizeof(uAck) } };
     1796    memcpy(paReturnSegs, aStaticSegs, sizeof(RTSGSEG));
     1797
     1798    PRTSGBUF pReturnSegBuf = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF));
     1799    AssertMsgReturnVoid(pReturnSegBuf, ("Out of memory"));
    17151800
    17161801    /* Copy segment data to malloc'd memory to avoid stack out-of-scope errors sanitizer doesn't detect */
    17171802    for (int i = 0; i < cSegs; i++)
    17181803    {
    1719         void *pv = paSegs[i].pvSeg;
    1720         paSegs[i].pvSeg = RTMemAlloc(paSegs[i].cbSeg);
    1721         AssertMsgReturnVoid(paSegs[i].pvSeg, ("Out of memory"));
    1722         memcpy(paSegs[i].pvSeg, pv, paSegs[i].cbSeg);
    1723     }
    1724 
    1725     RTSgBufInit(pSegBuf, paSegs, cSegs);
    1726 
    1727     virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, CTRLQIDX, pSegBuf, pDescChain, true);
     1804        void *pv = paReturnSegs[i].pvSeg;
     1805        paReturnSegs[i].pvSeg = RTMemAlloc(aStaticSegs[i].cbSeg);
     1806        AssertMsgReturnVoid(paReturnSegs[i].pvSeg, ("Out of memory"));
     1807        memcpy(paReturnSegs[i].pvSeg, pv, aStaticSegs[i].cbSeg);
     1808    }
     1809
     1810    RTSgBufInit(pReturnSegBuf, paReturnSegs, cSegs);
     1811
     1812    virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, CTRLQIDX, pReturnSegBuf, pDescChain, true);
    17281813    virtioCoreQueueSync(pDevIns, &pThis->Virtio, CTRLQIDX);
    17291814
    17301815    for (int i = 0; i < cSegs; i++)
    1731         RTMemFree(paSegs[i].pvSeg);
    1732 
    1733     RTMemFree(paSegs);
    1734     RTMemFree(pSegBuf);
    1735 
    1736     LogFunc(("Processed ctrl message class/cmd/subcmd = %u/%u/%u. Ack=%u.\n",
    1737               pCtrlPktHdr->uClass, pCtrlPktHdr->uCmd, pCtrlPktHdr->uCmdSpecific, uAck));
     1816        RTMemFree(paReturnSegs[i].pvSeg);
     1817
     1818    RTMemFree(paReturnSegs);
     1819    RTMemFree(pReturnSegBuf);
     1820
     1821    LogFunc(("Processed ctrl message class/cmd = %u/%u. Ack=%u.\n",
     1822              pCtrlPktHdr->uClass, pCtrlPktHdr->uCmd, uAck));
    17381823
    17391824}
     
    18331918
    18341919static void virtioNetR3TransmitPendingPackets(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC,
    1835                                          uint16_t qIdx, bool fOnWorkerThread)
     1920                                         uint16_t idxQueue, bool fOnWorkerThread)
    18361921{
    18371922    PVIRTIOCORE pVirtio = &pThis->Virtio;
     
    18731958
    18741959    Log3Func(("%s: About to transmit %d pending packets\n", INSTANCE(pThis),
    1875               virtioCoreR3QueuePendingCount(pVirtio->pDevIns, pVirtio, TXQIDX_QPAIR(0))));
     1960              virtioCoreR3QueuePendingCount(pVirtio->pDevIns, pVirtio, idxQueue)));
    18761961
    18771962    virtioNetR3SetWriteLed(pThisCC, true);
    18781963
    1879 
     1964    int rc;
    18801965    PVIRTIO_DESC_CHAIN_T pDescChain;
    1881     while (virtioCoreR3QueuePeek(pVirtio->pDevIns, pVirtio, TXQIDX_QPAIR(0), &pDescChain))
    1882     {
     1966    while ((rc = virtioCoreR3QueuePeek(pVirtio->pDevIns, pVirtio, idxQueue, &pDescChain)))
     1967    {
     1968        if (RT_SUCCESS(rc))
     1969            Log6Func(("fetched descriptor chain from %s\n", VIRTQNAME(idxQueue)));
     1970        else
     1971        {
     1972            LogFunc(("Failed find expected data on %s, rc = %Rrc\n", VIRTQNAME(idxQueue), rc));
     1973            break;
     1974        }
     1975
    18831976        uint32_t cSegsFromGuest = pDescChain->pSgPhysSend->cSegs;
    18841977        PVIRTIOSGSEG paSegsFromGuest = pDescChain->pSgPhysSend->paSegs;
    1885 
    1886         Log6Func(("fetched descriptor chain from %s\n", VIRTQNAME(qIdx)));
    18871978
    18881979        if (cSegsFromGuest < 2 || paSegsFromGuest[0].cbSeg != cbPktHdr)
     
    19152006            /** @todo Optimize away the extra copying! (lazy bird) */
    19162007            PPDMSCATTERGATHER pSgBufToPdmLeafDevice;
    1917             int rc = pThisCC->pDrv->pfnAllocBuf(pThisCC->pDrv, uSize, pGso, &pSgBufToPdmLeafDevice);
     2008            rc = pThisCC->pDrv->pfnAllocBuf(pThisCC->pDrv, uSize, pGso, &pSgBufToPdmLeafDevice);
    19182009            if (RT_SUCCESS(rc))
    19192010            {
     
    19332024                }
    19342025                rc = virtioNetR3TransmitFrame(pThis, pThisCC, pSgBufToPdmLeafDevice, pGso, &PktHdr);
     2026                if (RT_FAILURE(rc))
     2027                {
     2028                    LogFunc(("Failed to transmit frame, rc = %Rrc\n", rc));
     2029                    pThisCC->pDrv->pfnFreeBuf(pThisCC->pDrv, pSgBufToPdmLeafDevice);
     2030                }
    19352031            }
    19362032            else
    19372033            {
    1938                 Log4Func(("Failed to allocate SG buffer: size=%u rc=%Rrc\n", uSize, rc));
     2034                Log4Func(("Failed to allocate S/G buffer: size=%u rc=%Rrc\n", uSize, rc));
    19392035                /* Stop trying to fetch TX descriptors until we get more bandwidth. */
    19402036                break;
    19412037            }
    19422038        }
    1943 
    19442039        /* Remove this descriptor chain from the available ring */
    1945         virtioCoreR3QueueSkip(pVirtio, TXQIDX_QPAIR(0));
     2040        virtioCoreR3QueueSkip(pVirtio, idxQueue);
    19462041
    19472042        /* No data to return to guest, but call is needed put elem (e.g. desc chain) on used ring */
    1948         virtioCoreR3QueuePut(pVirtio->pDevIns, pVirtio, TXQIDX_QPAIR(0), NULL, pDescChain, false);
    1949 
    1950         virtioCoreQueueSync(pVirtio->pDevIns, pVirtio, TXQIDX_QPAIR(0));
     2043        virtioCoreR3QueuePut(pVirtio->pDevIns, pVirtio, idxQueue, NULL, pDescChain, false);
     2044
     2045        virtioCoreQueueSync(pVirtio->pDevIns, pVirtio, idxQueue);
    19512046
    19522047    }
     
    19552050    if (pDrv)
    19562051        pDrv->pfnEndXmit(pDrv);
     2052
    19572053    ASMAtomicWriteU32(&pThis->uIsTransmitting, 0);
    19582054}
     
    19662062    PPDMDEVINS      pDevIns = pThisCC->pDevIns;
    19672063    PVIRTIONET      pThis   = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVIRTIONET);
     2064
     2065    /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue
     2066          selection algorithm feasible or even necessary */
     2067
    19682068    virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC, TXQIDX_QPAIR(0), false /*fOnWorkerThread*/);
    19692069}
     
    19722072 * @callback_method_impl{VIRTIOCORER3,pfnQueueNotified}
    19732073 */
    1974 static DECLCALLBACK(void) virtioNetR3QueueNotified(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, uint16_t qIdx)
     2074static DECLCALLBACK(void) virtioNetR3QueueNotified(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, uint16_t idxQueue)
    19752075{
    19762076    PVIRTIONET         pThis     = RT_FROM_MEMBER(pVirtio, VIRTIONET, Virtio);
    19772077    PVIRTIONETCC       pThisCC   = RT_FROM_MEMBER(pVirtioCC, VIRTIONETCC, Virtio);
    19782078    PPDMDEVINS         pDevIns   = pThisCC->pDevIns;
    1979     PVIRTIONETWORKER   pWorker   = &pThis->aWorkers[qIdx];
    1980     PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[qIdx];
    1981     AssertReturnVoid(qIdx < pThis->cVirtQueues);
     2079    PVIRTIONETWORKER   pWorker   = &pThis->aWorkers[idxQueue];
     2080    PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxQueue];
     2081    AssertReturnVoid(idxQueue < pThis->cVirtQueues);
    19822082
    19832083#ifdef LOG_ENABLED
     
    19852085#endif
    19862086
    1987     Log6Func(("%s has available buffers\n", VIRTQNAME(qIdx)));
    1988 
    1989     if (IS_RX_QUEUE(qIdx))
     2087    Log6Func(("%s has available buffers\n", VIRTQNAME(idxQueue)));
     2088
     2089    if (IS_RX_QUEUE(idxQueue))
    19902090    {
    19912091        LogFunc(("%s Receive buffers has been added, waking up receive thread.\n",
     
    20002100            if (ASMAtomicReadBool(&pWorkerR3->fSleeping))
    20012101            {
    2002                 Log6Func(("waking %s worker.\n", VIRTQNAME(qIdx)));
     2102                Log6Func(("waking %s worker.\n", VIRTQNAME(idxQueue)));
    20032103                int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pWorker->hEvtProcess);
    20042104                AssertRC(rc);
     
    20132113static DECLCALLBACK(int) virtioNetR3WorkerThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
    20142114{
    2015     uint16_t const     qIdx      = (uint16_t)(uintptr_t)pThread->pvUser;
     2115    uint16_t const     idxQueue      = (uint16_t)(uintptr_t)pThread->pvUser;
    20162116    PVIRTIONET         pThis     = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET);
    20172117    PVIRTIONETCC       pThisCC   = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC);
    2018     PVIRTIONETWORKER   pWorker   = &pThis->aWorkers[qIdx];
    2019     PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[qIdx];
    2020 
     2118    PVIRTIONETWORKER   pWorker   = &pThis->aWorkers[idxQueue];
     2119    PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxQueue];
    20212120    if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
     2121    {
    20222122        return VINF_SUCCESS;
    2023 
     2123    }
     2124    LogFunc(("%s\n", VIRTQNAME(idxQueue)));
    20242125    while (pThread->enmState == PDMTHREADSTATE_RUNNING)
    20252126    {
    2026 
    2027         virtioCoreQueueSetNotify(&pThis->Virtio,  qIdx, true);
    2028 
    2029         if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, qIdx))
     2127        virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, true);
     2128
     2129        if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, idxQueue))
    20302130        {
    20312131            /* Atomic interlocks avoid missing alarm while going to sleep & notifier waking the awoken */
     
    20342134            if (!fNotificationSent)
    20352135            {
    2036                 Log6Func(("%s worker sleeping...\n", VIRTQNAME(qIdx)));
     2136                Log6Func(("%s worker sleeping...\n", VIRTQNAME(idxQueue)));
    20372137                Assert(ASMAtomicReadBool(&pWorkerR3->fSleeping));
    20382138                int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pWorker->hEvtProcess, RT_INDEFINITE_WAIT);
     
    20422142                if (rc == VERR_INTERRUPTED)
    20432143                {
    2044                     virtioCoreQueueSetNotify(&pThis->Virtio, qIdx, false);
     2144                    virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, false);
    20452145                    continue;
    20462146                }
    2047                 Log6Func(("%s worker woken\n", VIRTQNAME(qIdx)));
     2147                Log6Func(("%s worker woken\n", VIRTQNAME(idxQueue)));
    20482148                ASMAtomicWriteBool(&pWorkerR3->fNotified, false);
    20492149            }
    20502150            ASMAtomicWriteBool(&pWorkerR3->fSleeping, false);
    20512151        }
    2052 
    2053         virtioCoreQueueSetNotify(&pThis->Virtio, qIdx, false);
    2054 
    2055         if (!pThis->afQueueAttached[qIdx])
    2056         {
    2057             LogFunc(("%s queue not attached, worker aborting...\n", VIRTQNAME(qIdx)));
    2058             break;
    2059         }
     2152        virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, false);
    20602153
    20612154        /* Dispatch to the handler for the queue this worker is set up to drive */
     
    20632156        if (!pThisCC->fQuiescing)
    20642157        {
    2065              if (IS_CTRL_QUEUE(qIdx))
     2158             if (IS_CTRL_QUEUE(idxQueue))
    20662159             {
    2067                  Log6Func(("fetching next descriptor chain from %s\n", VIRTQNAME(qIdx)));
     2160                 Log6Func(("fetching next descriptor chain from %s\n", VIRTQNAME(idxQueue)));
    20682161                 PVIRTIO_DESC_CHAIN_T pDescChain;
    2069                  int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, qIdx, &pDescChain, true);
     2162                 int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, idxQueue, &pDescChain, true);
    20702163                 if (rc == VERR_NOT_AVAILABLE)
    20712164                 {
    2072                     Log6Func(("Nothing found in %s\n", VIRTQNAME(qIdx)));
     2165                    Log6Func(("Nothing found in %s\n", VIRTQNAME(idxQueue)));
    20732166                    continue;
    20742167                 }
    20752168                 virtioNetR3Ctrl(pDevIns, pThis, pThisCC, pDescChain);
    20762169             }
    2077              else if (IS_TX_QUEUE(qIdx))
     2170             else if (IS_TX_QUEUE(idxQueue))
    20782171             {
    20792172                 Log6Func(("Notified of data to transmit\n"));
    20802173                 virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC,
    2081                                                    qIdx, true /* fOnWorkerThread */);
     2174                                                   idxQueue, true /* fOnWorkerThread */);
    20822175             }
     2176
    20832177             /* Rx queues aren't handled by our worker threads. Instead, the PDM network
    20842178              * leaf driver invokes PDMINETWORKDOWN.pfnWaitReceiveAvail() callback,
     
    21242218
    21252219    LogFunc(("%s: Link is up\n", INSTANCE(pThis)));
     2220
    21262221    if (pThisCC->pDrv)
    21272222        pThisCC->pDrv->pfnNotifyLinkChanged(pThisCC->pDrv, PDMNETWORKLINKSTATE_UP);
     
    21752270    PVIRTIONET   pThis   = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET);
    21762271
    2177     bool fOldUp = !!(pThis->virtioNetConfig.uStatus & VIRTIONET_F_LINK_UP);
    2178     bool fNewUp = enmState == PDMNETWORKLINKSTATE_UP;
    2179 
    2180     Log(("%s virtioNetR3NetworkConfig_SetLinkState: enmState=%d\n", INSTANCE(pThis), enmState));
     2272    bool fCachedLinkIsUp = IS_LINK_UP(pThis);
     2273    bool fActiveLinkIsUp = (enmState == PDMNETWORKLINKSTATE_UP);
     2274
     2275    LogFunc(("%s: enmState=%d\n", INSTANCE(pThis), enmState));
    21812276    if (enmState == PDMNETWORKLINKSTATE_DOWN_RESUME)
    21822277    {
    2183         if (fOldUp)
     2278        if (fCachedLinkIsUp)
    21842279        {
    21852280            /*
     
    21922287        }
    21932288    }
    2194     else if (fNewUp != fOldUp)
    2195     {
    2196         if (fNewUp)
     2289    else if (fActiveLinkIsUp != fCachedLinkIsUp)
     2290    {
     2291        if (fCachedLinkIsUp)
    21972292        {
    21982293            Log(("%s Link is up\n", INSTANCE(pThis)));
    21992294            pThis->fCableConnected = true;
    2200             pThis->virtioNetConfig.uStatus |= VIRTIONET_F_LINK_UP;
     2295            SET_LINK_UP(pThis);
    22012296            virtioCoreNotifyConfigChanged(&pThis->Virtio);
    22022297        }
    2203         else
     2298        else /* cached Link state is down */
    22042299        {
    22052300            /* The link was brought down explicitly, make sure it won't come up by timer.  */
     
    22072302            Log(("%s Link is down\n", INSTANCE(pThis)));
    22082303            pThis->fCableConnected = false;
    2209             pThis->virtioNetConfig.uStatus &= ~VIRTIONET_F_LINK_UP;
     2304            SET_LINK_DOWN(pThis);
    22102305            virtioCoreNotifyConfigChanged(&pThis->Virtio);
    22112306        }
     
    22162311}
    22172312
     2313static int virtioNetR3DestroyWorkerThreads(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC)
     2314{
     2315    LogFunc(("\n"));
     2316    int rc = VINF_SUCCESS;
     2317    for (unsigned idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++)
     2318    {
     2319        PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxQueue];
     2320        if (pWorker->hEvtProcess != NIL_SUPSEMEVENT)
     2321        {
     2322            PDMDevHlpSUPSemEventClose(pDevIns, pWorker->hEvtProcess);
     2323            pWorker->hEvtProcess = NIL_SUPSEMEVENT;
     2324        }
     2325        if (pThisCC->aWorkers[idxQueue].pThread)
     2326        {
     2327            int rcThread;
     2328            rc = PDMDevHlpThreadDestroy(pDevIns, pThisCC->aWorkers[idxQueue].pThread, &rcThread);
     2329            if (RT_FAILURE(rc) || RT_FAILURE(rcThread))
     2330                AssertMsgFailed(("%s Failed to destroythread rc=%Rrc rcThread=%Rrc\n", __FUNCTION__, rc, rcThread));
     2331           pThisCC->aWorkers[idxQueue].pThread = NULL;
     2332        }
     2333    }
     2334    return rc;
     2335}
     2336
     2337static int virtioNetR3CreateWorkerThreads(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC)
     2338{
     2339    LogFunc(("\n"));
     2340
     2341    int rc = VINF_SUCCESS;
     2342    /* Attach the queues and create worker threads for them: */
     2343    for (uint16_t idxQueue = 1; idxQueue < pThis->cVirtQueues; idxQueue++)
     2344    {
     2345        /* Skip creating threads for receive queues, only create for transmit queues & control queue */
     2346        if (IS_RX_QUEUE(idxQueue))
     2347            continue;
     2348
     2349        rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->aWorkers[idxQueue].hEvtProcess);
     2350
     2351        if (RT_FAILURE(rc))
     2352            return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
     2353                                       N_("DevVirtioNET: Failed to create SUP event semaphore"));
     2354
     2355        rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->aWorkers[idxQueue].pThread,
     2356                                   (void *)(uintptr_t)idxQueue, virtioNetR3WorkerThread,
     2357                                   virtioNetR3WakeupWorker, 0, RTTHREADTYPE_IO, VIRTQNAME(idxQueue));
     2358        if (rc != VINF_SUCCESS)
     2359        {
     2360            LogRel(("Error creating thread for Virtual Queue %s: %Rrc\n", VIRTQNAME(idxQueue), rc));
     2361            return rc;
     2362        }
     2363
     2364        pThis->afQueueAttached[idxQueue] = true;
     2365    }
     2366    return rc;
     2367}
    22182368
    22192369/**
     
    22252375    PVIRTIONETCC   pThisCC   = RT_FROM_MEMBER(pVirtioCC, VIRTIONETCC, Virtio);
    22262376
    2227     LogFunc((""));
     2377    LogFunc(("\n"));
    22282378
    22292379    pThis->fVirtioReady = fVirtioReady;
     
    22322382    {
    22332383        LogFunc(("VirtIO ready\n-----------------------------------------------------------------------------------------\n"));
    2234 //        uint64_t fFeatures   = virtioCoreGetNegotiatedFeatures(pThis->Virtio);
     2384
    22352385        pThis->fResetting    = false;
    22362386        pThisCC->fQuiescing  = false;
    2237 
    2238         for (unsigned i = 0; i < VIRTIONET_MAX_QUEUES; i++)
    2239             pThis->afQueueAttached[i] = true;
     2387        pThis->fNegotiatedFeatures = virtioCoreGetAcceptedFeatures(pVirtio);
     2388        for (unsigned idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++)
     2389        {
     2390            (void) virtioCoreR3QueueAttach(&pThis->Virtio, idxQueue, VIRTQNAME(idxQueue));
     2391            pThis->afQueueAttached[idxQueue] = true;
     2392            virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, true);
     2393        }
    22402394    }
    22412395    else
     
    22622416        pThisCC->pDrv->pfnSetPromiscuousMode(pThisCC->pDrv, true);
    22632417
    2264         for (unsigned i = 0; i < VIRTIONET_MAX_QUEUES; i++)
    2265             pThis->afQueueAttached[i] = false;
     2418        for (unsigned idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++)
     2419            pThis->afQueueAttached[idxQueue] = false;
    22662420    }
    22672421}
     
    22812435    PVIRTIONETCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC);
    22822436
    2283     LogFunc((""));
     2437    LogFunc(("\n"));
    22842438    AssertLogRelReturnVoid(iLUN == 0);
    22852439
     
    23072461
    23082462    RT_NOREF(fFlags);
     2463
    23092464    LogFunc(("%s",  INSTANCE(pThis)));
    23102465
     
    23692524    PVIRTIONETCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC);
    23702525
    2371     for (unsigned qIdx = 0; qIdx < pThis->cVirtQueues; qIdx++)
    2372     {
    2373         PVIRTIONETWORKER pWorker = &pThis->aWorkers[qIdx];
    2374         if (pWorker->hEvtProcess != NIL_SUPSEMEVENT)
    2375         {
    2376             PDMDevHlpSUPSemEventClose(pDevIns, pWorker->hEvtProcess);
    2377             pWorker->hEvtProcess = NIL_SUPSEMEVENT;
    2378         }
    2379         if (pThisCC->aWorkers[qIdx].pThread)
    2380         {
    2381             /* Destroy the thread. */
    2382             int rcThread;
    2383             int rc = PDMDevHlpThreadDestroy(pDevIns, pThisCC->aWorkers[qIdx].pThread, &rcThread);
    2384             if (RT_FAILURE(rc) || RT_FAILURE(rcThread))
    2385                 AssertMsgFailed(("%s Failed to destroythread rc=%Rrc rcThread=%Rrc\n", __FUNCTION__, rc, rcThread));
    2386            pThisCC->aWorkers[qIdx].pThread = NULL;
    2387         }
    2388     }
     2526    Log(("%s Destroying instance\n", INSTANCE(pThis)));
     2527
     2528    if (pThis->hEventRxDescAvail != NIL_SUPSEMEVENT)
     2529    {
     2530        PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventRxDescAvail);
     2531        PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEventRxDescAvail);
     2532        pThis->hEventRxDescAvail = NIL_SUPSEMEVENT;
     2533    }
     2534
     2535    virtioNetR3DestroyWorkerThreads(pDevIns, pThis, pThisCC);
    23892536
    23902537    virtioCoreR3Term(pDevIns, &pThis->Virtio, &pThisCC->Virtio);
     2538
    23912539    return VINF_SUCCESS;
    23922540}
     
    24832631    VirtioPciParams.uInterruptPin           = 0x01;
    24842632
     2633    /*
     2634     * Initialize VirtIO core. This will result in a "status changed" callback
     2635     * when VirtIO is ready, at which time the Rx queue and ctrl queue worker threads will be created.
     2636     */
    24852637    rc = virtioCoreR3Init(pDevIns, &pThis->Virtio, &pThisCC->Virtio, &VirtioPciParams, INSTANCE(pThis),
    24862638                          VIRTIONET_HOST_FEATURES_OFFERED,
     
    24952647    pThis->cVirtqPairs =   pThis->fNegotiatedFeatures & VIRTIONET_F_MQ
    24962648                         ? pThis->virtioNetConfig.uMaxVirtqPairs : 1;
    2497     pThis->cVirtQueues += pThis->cVirtqPairs * 2;
     2649
     2650    pThis->cVirtQueues += pThis->cVirtqPairs * 2 + 1;
    24982651
    24992652    /* Create Link Up Timer */
     
    25062659    virtioNetR3SetVirtqNames(pThis);
    25072660
    2508     /* Attach the queues and create worker threads for them: */
    2509     for (uint16_t qIdx = 0; qIdx < pThis->cVirtQueues + 1; qIdx++)
    2510     {
    2511 
    2512         rc = virtioCoreR3QueueAttach(&pThis->Virtio, qIdx, VIRTQNAME(qIdx));
    2513         if (RT_FAILURE(rc))
    2514         {
    2515             pThis->afQueueAttached[qIdx] = true;
    2516             continue;
    2517         }
    2518 
    2519         /* Skip creating threads for receive queues, only create for transmit queues & control queue */
    2520         if (IS_RX_QUEUE(qIdx))
    2521             continue;
    2522 
    2523         rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->aWorkers[qIdx].pThread,
    2524                                    (void *)(uintptr_t)qIdx, virtioNetR3WorkerThread,
    2525                                    virtioNetR3WakeupWorker, 0, RTTHREADTYPE_IO, VIRTQNAME(qIdx));
    2526         if (rc != VINF_SUCCESS)
    2527         {
    2528             LogRel(("Error creating thread for Virtual Queue %s: %Rrc\n", VIRTQNAME(qIdx), rc));
    2529             return rc;
    2530         }
    2531 
    2532         rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->aWorkers[qIdx].hEvtProcess);
    2533         if (RT_FAILURE(rc))
    2534             return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
    2535                                        N_("DevVirtioNET: Failed to create SUP event semaphore"));
    2536         pThis->afQueueAttached[qIdx] = true;
    2537     }
    2538 
    25392661    /*
    2540      * Status driver (optional).
     2662     * Create queue workers for life of instance. (I.e. they persist through VirtIO bounces)
     2663     */
     2664    rc = virtioNetR3CreateWorkerThreads(pDevIns, pThis, pThisCC);
     2665    if (RT_FAILURE(rc))
     2666        return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to worker threads"));
     2667
     2668    /*
     2669     * Create the semaphore that will be used to synchronize/throttle
     2670     * the downstream LUN's Rx waiter thread.
     2671     */
     2672    rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEventRxDescAvail);
     2673    if (RT_FAILURE(rc))
     2674        return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to create event semaphore"));
     2675
     2676    /*
     2677     * Attach network driver instance
     2678     */
     2679    rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
     2680    if (RT_SUCCESS(rc))
     2681    {
     2682        pThisCC->pDrv = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
     2683        AssertMsgStmt(pThisCC->pDrv, ("Failed to obtain the PDMINETWORKUP interface!\n"),
     2684                      rc = VERR_PDM_MISSING_INTERFACE_BELOW);
     2685    }
     2686    else if (   rc == VERR_PDM_NO_ATTACHED_DRIVER
     2687             || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
     2688                    Log(("%s No attached driver!\n", INSTANCE(pThis)));
     2689
     2690    /*
     2691     * Status driver
    25412692     */
    25422693    PPDMIBASE pUpBase;
     
    25442695    if (RT_FAILURE(rc) && rc != VERR_PDM_NO_ATTACHED_DRIVER)
    25452696        return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
     2697
    25462698    pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pUpBase, PDMILEDCONNECTORS);
    25472699
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette