VirtualBox

Changeset 83913 in vbox for trunk/src/VBox/Devices/Network


Ignore:
Timestamp:
Apr 22, 2020 4:52:12 AM (5 years ago)
Author:
vboxsync
Message:

Network/DevVirtioNet_1_0.cpp: Various fixes and improvements, most notably: Implemented SSM (save, restore, pause, resume, stop) all tested, implemented STAM similarly to DevVirtioNet.cpp, verified, added support for VIRTIO_F_EVENT_IDX feature, improved internal handling of Rx/Tx queue pair mgt (will facility multiqueue (MQ) feature better), fixed various @todo's added by bird

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/Network/DevVirtioNet_1_0.cpp

    r83664 r83913  
    3131#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
    3232#define VIRTIONET_WITH_GSO
     33#include <iprt/types.h>
    3334
    3435#include <VBox/vmm/pdmdev.h>
     36#include <VBox/vmm/stam.h>
    3537#include <VBox/vmm/pdmcritsect.h>
    3638#include <VBox/vmm/pdmnetifs.h>
     
    5658#include "../VirtIO/Virtio_1_0.h"
    5759
    58 //#include "VBoxNET.h"
    5960#include "VBoxDD.h"
    60 
    61 /** @todo FIX UP THESE HACKS AFTER DEBUGGING */
    6261
    6362/* After debugging single instance case, restore instance name logging */
    6463#define INSTANCE(pState) (char *)(pState->szInstanceName ? "" : "") // Avoid requiring RT_NOREF in some funcs
    65 
    6664
    6765#define VIRTIONET_SAVED_STATE_VERSION          UINT32_C(1)
     
    7371#define VIRTIONET_PREALLOCATE_RX_SEG_COUNT     32
    7472
    75 
    76 #define QUEUE_NAME(a_pVirtio, a_idxQueue) ((a_pVirtio)->virtqState[(a_idxQueue)].szVirtqName)
    7773#define VIRTQNAME(idxQueue)       (pThis->aszVirtqNames[idxQueue])
    7874#define CBVIRTQNAME(idxQueue)     RTStrNLen(VIRTQNAME(idxQueue), sizeof(VIRTQNAME(idxQueue)))
     
    9894#define IS_RX_QUEUE(n)    ((n) != CTRLQIDX && !IS_TX_QUEUE(n))
    9995#define IS_CTRL_QUEUE(n)  ((n) == CTRLQIDX)
    100 #define RXQIDX_QPAIR(qPairIdx)  (qPairIdx * 2)
    101 #define TXQIDX_QPAIR(qPairIdx)  (qPairIdx * 2 + 1)
     96#define RXQIDX(qPairIdx)  (qPairIdx * 2)
     97#define TXQIDX(qPairIdx)  (qPairIdx * 2 + 1)
    10298#define CTRLQIDX          (FEATURE_ENABLED(MQ) ? ((VIRTIONET_MAX_QPAIRS - 1) * 2 + 2) : 2)
    10399
    104 #define RXVIRTQNAME(qPairIdx)  (pThis->aszVirtqNames[RXQIDX_QPAIR(qPairIdx)])
    105 #define TXVIRTQNAME(qPairIdx)  (pThis->aszVirtqNames[TXQIDX_QPAIR(qPairIdx)])
    106 #define CTLVIRTQNAME(qPairIdx) (pThis->aszVirtqNames[CTRLQIDX])
    107 
    108100#define LUN0    0
    109 
    110101
    111102/*
     
    165156    | VIRTIONET_F_CTRL_VLAN             \
    166157    | VIRTIONET_HOST_FEATURES_GSO       \
    167     | VIRTIONET_F_MRG_RXBUF
     158    | VIRTIONET_F_MRG_RXBUF             \
     159    | VIRTIO_F_EVENT_IDX  /** @todo  Trying this experimentally as potential workaround for bug
     160                           *         where virtio seems to expect interrupt for Rx/Used even though
     161                           *         its set the used ring flag in the Rx queue to skip the notification by device */
    168162
    169163#define PCI_DEVICE_ID_VIRTIONET_HOST               0x1041      /**< Informs guest driver of type of VirtIO device   */
    170 #define PCI_CLASS_BASE_NETWORK_CONTROLLER          0x02        /**< PCI Network device class                   */
     164#define PCI_CLASS_BASE_NETWORK_CONTROLLER          0x02        /**< PCI Network device class                        */
    171165#define PCI_CLASS_SUB_NET_ETHERNET_CONTROLLER      0x00        /**< PCI NET Controller subclass                     */
    172166#define PCI_CLASS_PROG_UNSPECIFIED                 0x00        /**< Programming interface. N/A.                     */
     
    312306    bool volatile                   fSleeping;                  /**< Flags whether worker thread is sleeping or not    */
    313307    bool volatile                   fNotified;                  /**< Flags whether worker thread notified              */
     308    uint16_t                        idxQueue;                   /**< Index of associated queue                         */
    314309} VIRTIONETWORKERR3;
    315310/** Pointer to a VirtIO SCSI worker. */
     
    345340    uint16_t                cVirtQueues;
    346341
     342    uint16_t                cWorkers;
     343
    347344    uint64_t                fNegotiatedFeatures;
    348345
     
    417414    uint8_t                 aVlanFilter[VIRTIONET_MAX_VLAN_ID / sizeof(uint8_t)];
    418415
    419     /* Receive-blocking-related fields ***************************************/
    420 
     416    /** @name Statistic
     417     * @{ */
     418    STAMCOUNTER             StatReceiveBytes;
     419    STAMCOUNTER             StatTransmitBytes;
     420    STAMCOUNTER             StatReceiveGSO;
     421    STAMCOUNTER             StatTransmitPackets;
     422    STAMCOUNTER             StatTransmitGSO;
     423    STAMCOUNTER             StatTransmitCSum;
     424#ifdef VBOX_WITH_STATISTICS
     425    STAMPROFILE             StatReceive;
     426    STAMPROFILE             StatReceiveStore;
     427    STAMPROFILEADV          StatTransmit;
     428    STAMPROFILE             StatTransmitSend;
     429    STAMPROFILE             StatRxOverflow;
     430    STAMCOUNTER             StatRxOverflowWakeup;
     431    STAMCOUNTER             StatTransmitByNetwork;
     432    STAMCOUNTER             StatTransmitByThread;
     433    /** @}  */
     434#endif
    421435} VIRTIONET;
    422436/** Pointer to the shared state of the VirtIO Host NET device. */
     
    537551    AssertReturnVoid(pThis->hEventRxDescAvail != NIL_SUPSEMEVENT);
    538552
     553    STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
     554
    539555    Log10Func(("%s Waking downstream driver's Rx buf waiter thread\n", INSTANCE(pThis)));
    540556    int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventRxDescAvail);
     
    546562    for (uint16_t qPairIdx = 0; qPairIdx < pThis->cVirtqPairs; qPairIdx++)
    547563    {
    548         RTStrPrintf(pThis->aszVirtqNames[RXQIDX_QPAIR(qPairIdx)], VIRTIO_MAX_QUEUE_NAME_SIZE, "receiveq<%d>",  qPairIdx);
    549         RTStrPrintf(pThis->aszVirtqNames[TXQIDX_QPAIR(qPairIdx)], VIRTIO_MAX_QUEUE_NAME_SIZE, "transmitq<%d>", qPairIdx);
     564        RTStrPrintf(pThis->aszVirtqNames[RXQIDX(qPairIdx)], VIRTIO_MAX_QUEUE_NAME_SIZE, "receiveq<%d>",  qPairIdx);
     565        RTStrPrintf(pThis->aszVirtqNames[TXQIDX(qPairIdx)], VIRTIO_MAX_QUEUE_NAME_SIZE, "transmitq<%d>", qPairIdx);
    550566    }
    551567    RTStrCopy(pThis->aszVirtqNames[CTRLQIDX], VIRTIO_MAX_QUEUE_NAME_SIZE, "controlq");
     
    596612}
    597613
    598 DECLINLINE(void) virtioNetPrintFeatures(uint32_t fFeatures, const char *pcszText)
     614DECLINLINE(void) virtioNetPrintFeatures(VIRTIONET *pThis)
    599615{
    600616#ifdef LOG_ENABLED
    601617    static struct
    602618    {
    603         uint32_t fMask;
     619        uint64_t fFeatureBit;
    604620        const char *pcszDesc;
    605621    } const s_aFeatures[] =
    606622    {
    607         { VIRTIONET_F_CSUM,                "   CSUM:                Host handles packets with partial checksum.\n" },
    608         { VIRTIONET_F_GUEST_CSUM,          "   GUEST_CSUM:          Guest handles packets with partial checksum.\n" },
    609         { VIRTIONET_F_CTRL_GUEST_OFFLOADS, "   CTRL_GUEST_OFFLOADS: Control channel offloads reconfiguration support.\n" },
    610         { VIRTIONET_F_MAC,                 "   MAC:                 Host has given MAC address.\n" },
    611         { VIRTIONET_F_GUEST_TSO4,          "   GUEST_TSO4:          Guest can receive TSOv4.\n" },
    612         { VIRTIONET_F_GUEST_TSO6,          "   GUEST_TSO6:          Guest can receive TSOv6.\n" },
    613         { VIRTIONET_F_GUEST_ECN,           "   GUEST_ECN:           Guest can receive TSO with ECN.\n" },
    614         { VIRTIONET_F_GUEST_UFO,           "   GUEST_UFO:           Guest can receive UFO.\n" },
    615         { VIRTIONET_F_HOST_TSO4,           "   HOST_TSO4:           Host can receive TSOv4.\n" },
    616         { VIRTIONET_F_HOST_TSO6,           "   HOST_TSO6:           Host can receive TSOv6.\n" },
    617         { VIRTIONET_F_HOST_ECN,            "   HOST_ECN:            Host can receive TSO with ECN.\n" },
    618         { VIRTIONET_F_HOST_UFO,            "   HOST_UFO:            Host can receive UFO.\n" },
    619         { VIRTIONET_F_MRG_RXBUF,           "   MRG_RXBUF:           Guest can merge receive buffers.\n" },
    620         { VIRTIONET_F_STATUS,              "   STATUS:              Configuration status field is available.\n" },
    621         { VIRTIONET_F_CTRL_VQ,             "   CTRL_VQ:             Control channel is available.\n" },
    622         { VIRTIONET_F_CTRL_RX,             "   CTRL_RX:             Control channel RX mode support.\n" },
    623         { VIRTIONET_F_CTRL_VLAN,           "   CTRL_VLAN:           Control channel VLAN filtering.\n" },
    624         { VIRTIONET_F_GUEST_ANNOUNCE,      "   GUEST_ANNOUNCE:      Guest can send gratuitous packets.\n" },
    625         { VIRTIONET_F_MQ,                  "   MQ:                  Host supports multiqueue with automatic receive steering.\n" },
    626         { VIRTIONET_F_CTRL_MAC_ADDR,       "   CTRL_MAC_ADDR:       Set MAC address through control channel.\n" }
     623        { VIRTIONET_F_CSUM,                "   CSUM                 Host handles packets with partial checksum.\n" },
     624        { VIRTIONET_F_GUEST_CSUM,          "   GUEST_CSUM           Guest handles packets with partial checksum.\n" },
     625        { VIRTIONET_F_CTRL_GUEST_OFFLOADS, "   CTRL_GUEST_OFFLOADS  Control channel offloads reconfiguration support.\n" },
     626        { VIRTIONET_F_MAC,                 "   MAC                  Host has given MAC address.\n" },
     627        { VIRTIONET_F_GUEST_TSO4,          "   GUEST_TSO4           Guest can receive TSOv4.\n" },
     628        { VIRTIONET_F_GUEST_TSO6,          "   GUEST_TSO6           Guest can receive TSOv6.\n" },
     629        { VIRTIONET_F_GUEST_ECN,           "   GUEST_ECN            Guest can receive TSO with ECN.\n" },
     630        { VIRTIONET_F_GUEST_UFO,           "   GUEST_UFO            Guest can receive UFO.\n" },
     631        { VIRTIONET_F_HOST_TSO4,           "   HOST_TSO4            Host can receive TSOv4.\n" },
     632        { VIRTIONET_F_HOST_TSO6,           "   HOST_TSO6            Host can receive TSOv6.\n" },
     633        { VIRTIONET_F_HOST_ECN,            "   HOST_ECN             Host can receive TSO with ECN.\n" },
     634        { VIRTIONET_F_HOST_UFO,            "   HOST_UFO             Host can receive UFO.\n" },
     635        { VIRTIONET_F_MRG_RXBUF,           "   MRG_RXBUF            Guest can merge receive buffers.\n" },
     636        { VIRTIONET_F_STATUS,              "   STATUS               Configuration status field is available.\n" },
     637        { VIRTIONET_F_CTRL_VQ,             "   CTRL_VQ              Control channel is available.\n" },
     638        { VIRTIONET_F_CTRL_RX,             "   CTRL_RX              Control channel RX mode support.\n" },
     639        { VIRTIONET_F_CTRL_VLAN,           "   CTRL_VLAN            Control channel VLAN filtering.\n" },
     640        { VIRTIONET_F_GUEST_ANNOUNCE,      "   GUEST_ANNOUNCE       Guest can send gratuitous packets.\n" },
     641        { VIRTIONET_F_MQ,                  "   MQ                   Host supports multiqueue with automatic receive steering.\n" },
     642        { VIRTIONET_F_CTRL_MAC_ADDR,       "   CTRL_MAC_ADDR        Set MAC address through control channel.\n" }
    627643    };
    628644
    629645#define MAXLINE 80
    630646    /* Display as a single buf to prevent interceding log messages */
    631     char *pszBuf = (char *)RTMemAllocZ(RT_ELEMENTS(s_aFeatures) * 80), *cp = pszBuf;
     647    uint64_t fFeaturesOfferedMask = VIRTIONET_HOST_FEATURES_OFFERED;
     648    uint16_t cbBuf = RT_ELEMENTS(s_aFeatures) * 132;
     649    char *pszBuf = (char *)RTMemAllocZ(cbBuf);
    632650    Assert(pszBuf);
     651    char *cp = pszBuf;
    633652    for (unsigned i = 0; i < RT_ELEMENTS(s_aFeatures); ++i)
    634         if (s_aFeatures[i].fMask & fFeatures) {
    635             int len = RTStrNLen(s_aFeatures[i].pcszDesc, MAXLINE);
    636             memcpy(cp, s_aFeatures[i].pcszDesc, len); /* intentionally drop trailing '\0' */
    637             cp += len;
    638         }
    639     Log3(("%s:\n%s\n", pcszText, pszBuf));
     653    {
     654        bool isOffered = fFeaturesOfferedMask & s_aFeatures[i].fFeatureBit;
     655        bool isNegotiated = pThis->fNegotiatedFeatures & s_aFeatures[i].fFeatureBit;
     656        cp += RTStrPrintf(cp, cbBuf - (cp - pszBuf), "        %s       %s   %s",
     657                          isOffered ? "+" : "-", isNegotiated ? "x" : " ", s_aFeatures[i].pcszDesc);
     658    }
     659    Log3(("VirtIO Net Features Configuration\n\n"
     660          "    Offered  Accepted  Feature              Description\n"
     661          "    -------  --------  -------              -----------\n"
     662          "%s\n", pszBuf));
    640663    RTMemFree(pszBuf);
    641664
     
    702725             && offConfig + cb <= RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \
    703726                                + RT_SIZEOFMEMB(VIRTIONET_CONFIG_T, member)) )
    704 
    705 /*         || (   offConfig == RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \
    706                && cb == RT_SIZEOFMEMB(VIRTIONET_CONFIG_T, member)) )
    707 */
    708727
    709728#ifdef LOG_ENABLED
     
    835854    virtioNetR3SetVirtqNames(pThis);
    836855
     856    pHlp->pfnSSMGetU64(     pSSM, &pThis->fNegotiatedFeatures);
     857
     858    pHlp->pfnSSMGetU16(     pSSM, &pThis->cVirtQueues);
     859    pHlp->pfnSSMGetU16(     pSSM, &pThis->cWorkers);
     860
    837861    for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++)
    838862        pHlp->pfnSSMGetBool(pSSM, &pThis->afQueueAttached[idxQueue]);
     863
     864    int rc;
     865
     866    if (uPass == SSM_PASS_FINAL)
     867    {
     868
     869    /* Load config area */
     870#if FEATURE_OFFERED(STATUS)
     871    /* config checks */
     872    RTMAC macConfigured;
     873    rc = pHlp->pfnSSMGetMem(pSSM, &macConfigured.au8, sizeof(macConfigured.au8));
     874    AssertRCReturn(rc, rc);
     875    if (memcmp(&macConfigured.au8, &pThis->macConfigured.au8, sizeof(macConfigured.au8))
     876        && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)))
     877        LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n",
     878            INSTANCE(pThis), &pThis->macConfigured, &macConfigured));
     879#endif
     880#if FEATURE_OFFERED(MQ)
     881        pHlp->pfnSSMGetU16( pSSM, &pThis->virtioNetConfig.uMaxVirtqPairs);
     882#endif
     883        /* Save device-specific part */
     884        pHlp->pfnSSMGetBool(    pSSM, &pThis->fCableConnected);
     885        pHlp->pfnSSMGetU8(      pSSM, &pThis->fPromiscuous);
     886        pHlp->pfnSSMGetU8(      pSSM, &pThis->fAllMulticast);
     887        pHlp->pfnSSMGetU8(      pSSM, &pThis->fAllUnicast);
     888        pHlp->pfnSSMGetU8(      pSSM, &pThis->fNoMulticast);
     889        pHlp->pfnSSMGetU8(      pSSM, &pThis->fNoUnicast);
     890        pHlp->pfnSSMGetU8(      pSSM, &pThis->fNoBroadcast);
     891
     892        pHlp->pfnSSMGetU32(     pSSM, &pThis->cMulticastFilterMacs);
     893        pHlp->pfnSSMGetMem(     pSSM, pThis->aMacMulticastFilter, pThis->cMulticastFilterMacs * sizeof(RTMAC));
     894
     895        if (pThis->cMulticastFilterMacs < VIRTIONET_MAC_FILTER_LEN)
     896            memset(&pThis->aMacMulticastFilter[pThis->cMulticastFilterMacs], 0,
     897                   (VIRTIONET_MAC_FILTER_LEN - pThis->cMulticastFilterMacs) * sizeof(RTMAC));
     898
     899        pHlp->pfnSSMGetU32(     pSSM, &pThis->cUnicastFilterMacs);
     900        pHlp->pfnSSMGetMem(     pSSM, pThis->aMacUnicastFilter, pThis->cUnicastFilterMacs * sizeof(RTMAC));
     901
     902        if (pThis->cUnicastFilterMacs < VIRTIONET_MAC_FILTER_LEN)
     903            memset(&pThis->aMacUnicastFilter[pThis->cUnicastFilterMacs], 0,
     904                   (VIRTIONET_MAC_FILTER_LEN - pThis->cUnicastFilterMacs) * sizeof(RTMAC));
     905
     906        rc = pHlp->pfnSSMGetMem(pSSM, pThis->aVlanFilter, sizeof(pThis->aVlanFilter));
     907        AssertRCReturn(rc, rc);
     908    }
    839909
    840910    /*
    841911     * Call the virtio core to let it load its state.
    842912     */
    843     int rc = virtioCoreR3LoadExec(&pThis->Virtio, pDevIns->pHlpR3, pSSM);
     913    rc = virtioCoreR3LoadExec(&pThis->Virtio, pDevIns->pHlpR3, pSSM);
    844914
    845915    /*
    846916     * Nudge queue workers
    847917     */
    848     for (int idxQueue = 0; idxQueue < pThis->cVirtqPairs; idxQueue++)
    849     {
     918    for (int idxWorker = 0; idxWorker < pThis->cWorkers; idxWorker++)
     919    {
     920        uint16_t idxQueue = pThisCC->aWorkers[idxWorker].idxQueue;
    850921        if (pThis->afQueueAttached[idxQueue])
    851922        {
    852923            Log7Func(("%s Waking %s worker.\n", INSTANCE(pThis), VIRTQNAME(idxQueue)));
    853             rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[idxQueue].hEvtProcess);
     924            rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[idxWorker].hEvtProcess);
    854925            AssertRCReturn(rc, rc);
    855926        }
     
    868939
    869940    RT_NOREF(pThisCC);
    870 
    871941    Log7Func(("%s SAVE EXEC!!\n", INSTANCE(pThis)));
     942
     943    pHlp->pfnSSMPutU64(     pSSM, pThis->fNegotiatedFeatures);
     944
     945    pHlp->pfnSSMPutU16(     pSSM, pThis->cVirtQueues);
     946    pHlp->pfnSSMPutU16(     pSSM, pThis->cWorkers);
    872947
    873948    for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++)
    874949        pHlp->pfnSSMPutBool(pSSM, pThis->afQueueAttached[idxQueue]);
     950
     951    /* Save config area */
     952#if FEATURE_OFFERED(STATUS)
     953    pHlp->pfnSSMPutMem(     pSSM, pThis->virtioNetConfig.uMacAddress.au8,
     954                            sizeof(pThis->virtioNetConfig.uMacAddress.au8));
     955#endif
     956#if FEATURE_OFFERED(MQ)
     957    pHlp->pfnSSMPutU16(     pSSM, pThis->virtioNetConfig.uMaxVirtqPairs);
     958#endif
     959
     960    /* Save device-specific part */
     961    pHlp->pfnSSMPutBool(    pSSM, pThis->fCableConnected);
     962    pHlp->pfnSSMPutU8(      pSSM, pThis->fPromiscuous);
     963    pHlp->pfnSSMPutU8(      pSSM, pThis->fAllMulticast);
     964    pHlp->pfnSSMPutU8(      pSSM, pThis->fAllUnicast);
     965    pHlp->pfnSSMPutU8(      pSSM, pThis->fNoMulticast);
     966    pHlp->pfnSSMPutU8(      pSSM, pThis->fNoUnicast);
     967    pHlp->pfnSSMPutU8(      pSSM, pThis->fNoBroadcast);
     968
     969    pHlp->pfnSSMPutU32(     pSSM, pThis->cMulticastFilterMacs);
     970    pHlp->pfnSSMPutMem(     pSSM, pThis->aMacMulticastFilter, pThis->cMulticastFilterMacs * sizeof(RTMAC));
     971
     972    pHlp->pfnSSMPutU32(     pSSM, pThis->cUnicastFilterMacs);
     973    pHlp->pfnSSMPutMem(     pSSM, pThis->aMacUnicastFilter, pThis->cUnicastFilterMacs * sizeof(RTMAC));
     974
     975    int rc = pHlp->pfnSSMPutMem(pSSM, pThis->aVlanFilter, sizeof(pThis->aVlanFilter));
     976    AssertRCReturn(rc, rc);
    875977
    876978    /*
     
    884986*   Device interface.                                                                                                            *
    885987*********************************************************************************************************************************/
    886 
     988/*xx*/
    887989/**
    888990 * @callback_method_impl{FNPDMDEVASYNCNOTIFY}
     
    9321034        PDMDevHlpAsyncNotificationCompleted(pDevIns);
    9331035
    934     /** @todo make sure Rx and Tx are really quiesced (how to we synchronize w/downstream driver?) */
     1036    /** @todo make sure Rx and Tx are really quiesced (how do we synchronize w/downstream driver?) */
    9351037}
    9361038
     
    9591061
    9601062    virtioNetR3QuiesceDevice(pDevIns, enmType);
     1063    virtioNetR3WakeupRxBufWaiter(pDevIns);
    9611064}
    9621065
     
    9841087/**
    9851088 * @interface_method_impl{PDMDEVREGR3,pfnResume}
     1089 *
     1090 * Just process the VM device-related state change itself.
     1091 * Unlike SCSI driver, there are no packets to redo. No I/O was halted or saved while
     1092 * quiescing for pfnSuspend(). Any packets in process were simply dropped by the upper
     1093 * layer driver, presumably to be retried or cause erring out at the upper layers
     1094 * of the network stack.
    9861095 */
    9871096static DECLCALLBACK(void) virtioNetR3Resume(PPDMDEVINS pDevIns)
     
    9931102    pThisCC->fQuiescing = false;
    9941103
    995 
    996     /** @todo implement this function properly */
    997 
    998     /* Wake worker threads flagged to skip pulling queue entries during quiesce
    999      * to ensure they re-check their queues. Active request queues may already
    1000      * be awake due to new reqs coming in.
    1001      */
    1002 /*
    1003     for (uint16_t idxQueue = 0; idxQueue < VIRTIONET_REQ_QUEUE_CNT; idxQueue++)
    1004     {
    1005         if (ASMAtomicReadBool(&pThisCC->aWorkers[idxQueue].fSleeping))
    1006         {
    1007             Log7Func(("%s waking %s worker.\n", INSTANCE(pThis), VIRTQNAME(idxQueue)));
    1008             int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[idxQueue].hEvtProcess);
    1009             AssertRC(rc);
    1010         }
    1011     }
    1012 */
    1013     /* Ensure guest is working the queues too. */
     1104    /* Ensure guest is working the queues */
    10141105    virtioCoreR3VmStateChanged(&pThis->Virtio, kvirtIoVmStateChangedResume);
    10151106}
     
    10841175 * @thread  RX
    10851176 */
    1086 static int virtioNetR3IsRxQueuePrimed(PPDMDEVINS pDevIns, PVIRTIONET pThis, uint16_t idxQueue)
    1087 {
    1088 #define LOGPARAMS INSTANCE(pThis), VIRTQNAME(idxQueue)
     1177static int virtioNetR3IsRxQueuePrimed(PPDMDEVINS pDevIns, PVIRTIONET pThis, uint16_t idxRxQueue)
     1178{
     1179#define LOGPARAMS INSTANCE(pThis), VIRTQNAME(idxRxQueue)
    10891180
    10901181    if (!pThis->fVirtioReady)
     
    10921183        Log8Func(("%s %s VirtIO not ready (rc = VERR_NET_NO_BUFFER_SPACE)\n", LOGPARAMS));
    10931184    }
    1094     else if (!virtioCoreIsQueueEnabled(&pThis->Virtio, RXQIDX_QPAIR(idxQueue)))
     1185    else if (!virtioCoreIsQueueEnabled(&pThis->Virtio, idxRxQueue))
    10951186    {
    10961187        Log8Func(("%s %s queue not enabled (rc = VERR_NET_NO_BUFFER_SPACE)\n", LOGPARAMS));
    10971188    }
    1098     else if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue)))
     1189    else if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, idxRxQueue))
    10991190    {
    11001191        Log8Func(("%s %s queue is empty (rc = VERR_NET_NO_BUFFER_SPACE)\n", LOGPARAMS));
    1101         virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR(idxQueue), true);
     1192        virtioCoreQueueSetNotify(&pThis->Virtio, idxRxQueue, true);
    11021193    }
    11031194    else
    11041195    {
    11051196        Log8Func(("%s %s ready with available buffers\n", LOGPARAMS));
    1106         virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR(idxQueue), false);
     1197        virtioCoreQueueSetNotify(&pThis->Virtio, idxRxQueue, false);
    11071198        return VINF_SUCCESS;
    11081199    }
     
    11151206    /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue
    11161207              selection algorithm feasible or even necessary to prevent starvation? */
    1117     for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue += 2) /* Skip odd queue #'s because Rx queues only! */
    1118     {
    1119         if (!IS_RX_QUEUE(idxQueue))
    1120             continue;
    1121 
    1122         if (RT_SUCCESS(virtioNetR3IsRxQueuePrimed(pDevIns, pThis, idxQueue)))
     1208    for (int idxQueuePair = 0; idxQueuePair < pThis->cVirtqPairs; idxQueuePair++)
     1209        if (RT_SUCCESS(virtioNetR3IsRxQueuePrimed(pDevIns, pThis, RXQIDX(idxQueuePair))))
    11231210            return true;
    1124     }
    11251211    return false;
    11261212}
     
    11281214 * Returns true if VirtIO core and device are in a running and operational state
    11291215 */
    1130 DECLINLINE(bool) virtioNetAllSystemsGo(PVIRTIONET pThis, PPDMDEVINS pDevIns)
     1216DECLINLINE(bool) virtioNetIsOperational(PVIRTIONET pThis, PPDMDEVINS pDevIns)
    11311217{
    11321218    if (!pThis->fVirtioReady)
     
    11631249
    11641250    ASMAtomicXchgBool(&pThis->fLeafWantsRxBuffers, true);
     1251    STAM_PROFILE_START(&pThis->StatRxOverflow, a);
    11651252
    11661253    do {
     
    11811268            RTThreadSleep(1);
    11821269
    1183     } while (virtioNetAllSystemsGo(pThis, pDevIns));
    1184 
     1270    } while (virtioNetIsOperational(pThis, pDevIns));
     1271
     1272    STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
    11851273    ASMAtomicXchgBool(&pThis->fLeafWantsRxBuffers, false);
    11861274
     
    12351323}
    12361324
    1237 
    12381325/**
    12391326 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
     
    13461433    }
    13471434
    1348     /** @todo Original combined unicast & multicast into one table. Should we distinguish? */
    1349 
    13501435    for (uint16_t i = 0; i < pThis->cUnicastFilterMacs; i++)
    13511436        if (!memcmp(&pThis->aMacUnicastFilter[i], pvBuf, sizeof(RTMAC)))
     
    13601445    return false;
    13611446}
     1447
     1448static int virtioNetR3CopyRxPktToGuest(PPDMDEVINS pDevIns, PVIRTIONET pThis, const void *pvBuf, size_t cb,
     1449                                       VIRTIONET_PKT_HDR_T *rxPktHdr, uint16_t cSegsAllocated,
     1450                                       PRTSGBUF pVirtSegBufToGuest, PRTSGSEG paVirtSegsToGuest,
     1451                                       uint16_t idxRxQueue)
     1452{
     1453    uint8_t fAddPktHdr = true;
     1454    RTGCPHYS gcPhysPktHdrNumBuffers;
     1455    uint16_t cDescs;
     1456    uint32_t uOffset;
     1457    for (cDescs = uOffset = 0; uOffset < cb; )
     1458    {
     1459        PVIRTIO_DESC_CHAIN_T pDescChain = NULL;
     1460
     1461        int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, RXQIDX(idxRxQueue), &pDescChain, true);
     1462        AssertMsgReturn(rc == VINF_SUCCESS || rc == VERR_NOT_AVAILABLE, ("%Rrc\n", rc), rc);
     1463
     1464        /** @todo  Find a better way to deal with this */
     1465        AssertMsgReturnStmt(rc == VINF_SUCCESS && pDescChain->cbPhysReturn,
     1466                            ("Not enough Rx buffers in queue to accomodate ethernet packet\n"),
     1467                            virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain),
     1468                            VERR_INTERNAL_ERROR);
     1469
     1470        /* Length of first seg of guest Rx buf should never be less than sizeof(virtio_net_pkt_hdr).
     1471         * Otherwise code has to become more complicated, e.g. locate & cache seg idx & offset of
     1472         * virtio_net_header.num_buffers, to defer updating (in gcPhys). Re-visit if needed */
     1473
     1474        AssertMsgReturnStmt(pDescChain->pSgPhysReturn->paSegs[0].cbSeg >= sizeof(VIRTIONET_PKT_HDR_T),
     1475                            ("Desc chain's first seg has insufficient space for pkt header!\n"),
     1476                            virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain),
     1477                            VERR_INTERNAL_ERROR);
     1478
     1479        uint32_t cbDescChainLeft = pDescChain->cbPhysReturn;
     1480        uint8_t  cbHdr = sizeof(VIRTIONET_PKT_HDR_T);
     1481
     1482        /* Fill the Guest Rx buffer with data received from the interface */
     1483        for (uint16_t cSegs = 0; uOffset < cb && cbDescChainLeft; )
     1484        {
     1485            if (fAddPktHdr)
     1486            {
     1487                /* Lead with packet header */
     1488                paVirtSegsToGuest[0].cbSeg = cbHdr;
     1489                paVirtSegsToGuest[0].pvSeg = RTMemAlloc(cbHdr);
     1490                AssertReturn(paVirtSegsToGuest[0].pvSeg, VERR_NO_MEMORY);
     1491                cbDescChainLeft -= cbHdr;
     1492
     1493                memcpy(paVirtSegsToGuest[0].pvSeg, rxPktHdr, cbHdr);
     1494
     1495                /* Calculate & cache addr of field to update after final value is known, in gcPhys mem */
     1496                gcPhysPktHdrNumBuffers = pDescChain->pSgPhysReturn->paSegs[0].gcPhys
     1497                                         + RT_UOFFSETOF(VIRTIONET_PKT_HDR_T, uNumBuffers);
     1498                fAddPktHdr = false;
     1499                cSegs++;
     1500            }
     1501
     1502            if (cSegs >= cSegsAllocated)
     1503            {
     1504                cSegsAllocated <<= 1; /* double allocation size */
     1505                paVirtSegsToGuest = (PRTSGSEG)RTMemRealloc(paVirtSegsToGuest, sizeof(RTSGSEG) * cSegsAllocated);
     1506                if (!paVirtSegsToGuest)
     1507                    virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain);
     1508                AssertReturn(paVirtSegsToGuest, VERR_NO_MEMORY);
     1509            }
     1510
     1511            /* Append remaining Rx pkt or as much current desc chain has room for */
     1512            uint32_t cbCropped = RT_MIN(cb, cbDescChainLeft);
     1513            paVirtSegsToGuest[cSegs].cbSeg = cbCropped;
     1514            paVirtSegsToGuest[cSegs].pvSeg = ((uint8_t *)pvBuf) + uOffset;
     1515            cbDescChainLeft -= cbCropped;
     1516            uOffset += cbCropped;
     1517            cDescs++;
     1518            cSegs++;
     1519            RTSgBufInit(pVirtSegBufToGuest, paVirtSegsToGuest, cSegs);
     1520            Log7Func(("Send Rx pkt to guest...\n"));
     1521            STAM_PROFILE_START(&pThis->StatReceiveStore, a);
     1522            virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, idxRxQueue,
     1523                                 pVirtSegBufToGuest, pDescChain, true);
     1524            STAM_PROFILE_STOP(&pThis->StatReceiveStore, a);
     1525
     1526            if (FEATURE_DISABLED(MRG_RXBUF))
     1527                break;
     1528        }
     1529
     1530        virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain);
     1531    }
     1532
     1533    if (uOffset < cb)
     1534    {
     1535        LogFunc(("%s Packet did not fit into RX queue (packet size=%u)!\n", INSTANCE(pThis), cb));
     1536        return VERR_TOO_MUCH_DATA;
     1537    }
     1538
     1539    /* Fix-up pkthdr (in guest phys. memory) with number buffers (descriptors) processed */
     1540
     1541    int rc = PDMDevHlpPCIPhysWrite(pDevIns, gcPhysPktHdrNumBuffers, &cDescs, sizeof(cDescs));
     1542    AssertMsgRCReturn(rc,
     1543                  ("Failure updating descriptor count in pkt hdr in guest physical memory\n"),
     1544                  rc);
     1545
     1546    /** @todo   WHY *must* we *force* notifying guest that we filled its Rx buffer(s)?
     1547     *          If we don't notify the guest, it doesn't detect it and stalls, even though
     1548     *          guest is responsible for setting the used-ring flag in the Rx queue that tells
     1549     *          us to skip the notification interrupt! Obviously forcing the interrupt is
     1550     *          non-optimal performance-wise and seems to contradict the Virtio spec.
     1551     *          Is that a bug in the linux virtio_net.c driver? */
     1552
     1553    virtioCoreQueueSync(pDevIns, &pThis->Virtio, RXQIDX(idxRxQueue), /* fForce */ true);
     1554
     1555    return VINF_SUCCESS;
     1556}
     1557
    13621558
    13631559/**
     
    13731569 * @param   cb              Number of bytes available in the buffer.
    13741570 * @param   pGso            Pointer to Global Segmentation Offload structure
    1375  * @param   idxQueue            Queue to work with
     1571 * @param   idxRxQueue      Rx queue to work with
    13761572 * @thread  RX
    13771573 */
    13781574static int virtioNetR3HandleRxPacket(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC,
    1379                                 const void *pvBuf, size_t cb, PCPDMNETWORKGSO pGso, uint16_t idxQueue)
     1575                                const void *pvBuf, size_t cb, PCPDMNETWORKGSO pGso, uint16_t idxRxQueue)
    13801576{
    13811577    RT_NOREF(pThisCC);
     
    14111607        rxPktHdr.uGsoSize = pGso->cbMaxSeg;
    14121608        rxPktHdr.uChksumStart = pGso->offHdr2;
     1609        STAM_REL_COUNTER_INC(&pThis->StatReceiveGSO);
    14131610    }
    14141611    else
     
    14201617    uint16_t cSegsAllocated = VIRTIONET_PREALLOCATE_RX_SEG_COUNT;
    14211618
    1422     /** @todo r=bird: error codepaths below are almost all leaky!  Maybe keep
    1423      *         allocations and cleanup here and put the code doing the complicated
    1424      *         work into a helper that can AssertReturn at will without needing to
    1425      *         care about cleaning stuff up. */
    1426     PRTSGBUF pVirtSegBufToGuest = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF)); /** @todo r=bird: Missing check. */
     1619    PRTSGBUF pVirtSegBufToGuest = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF));
     1620    AssertReturn(pVirtSegBufToGuest, VERR_NO_MEMORY);
     1621
    14271622    PRTSGSEG paVirtSegsToGuest  = (PRTSGSEG)RTMemAllocZ(sizeof(RTSGSEG) * cSegsAllocated);
    1428     AssertReturn(paVirtSegsToGuest, VERR_NO_MEMORY);
    1429 
    1430 
    1431     uint8_t fAddPktHdr = true;
    1432     RTGCPHYS gcPhysPktHdrNumBuffers;
    1433     uint16_t cDescs;
    1434     uint32_t uOffset;
    1435     for (cDescs = uOffset = 0; uOffset < cb; )
    1436     {
    1437         PVIRTIO_DESC_CHAIN_T pDescChain = NULL;
    1438 
    1439         int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue), &pDescChain, true);
    1440         AssertMsgReturn(rc == VINF_SUCCESS || rc == VERR_NOT_AVAILABLE, ("%Rrc\n", rc), rc);
    1441 
    1442         /** @todo  Find a better way to deal with this */
    1443         AssertMsgReturnStmt(rc == VINF_SUCCESS && pDescChain->cbPhysReturn,
    1444                             ("Not enough Rx buffers in queue to accomodate ethernet packet\n"),
    1445                             virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain),
    1446                             VERR_INTERNAL_ERROR);
    1447 
    1448         /* Unlikely that len of 1st seg of guest Rx (IN) buf is less than sizeof(virtio_net_pkt_hdr) == 12.
    1449          * Assert it to reduce complexity. Robust solution would entail finding seg idx and offset of
    1450          * virtio_net_header.num_buffers (to update field *after* hdr & pkts copied to gcPhys) */
    1451         AssertMsgReturnStmt(pDescChain->pSgPhysReturn->paSegs[0].cbSeg >= sizeof(VIRTIONET_PKT_HDR_T),
    1452                             ("Desc chain's first seg has insufficient space for pkt header!\n"),
    1453                             virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain),
    1454                             VERR_INTERNAL_ERROR);
    1455 
    1456         uint32_t cbDescChainLeft = pDescChain->cbPhysReturn;
    1457         uint8_t  cbHdr = sizeof(VIRTIONET_PKT_HDR_T);
    1458         /* Fill the Guest Rx buffer with data received from the interface */
    1459         for (uint16_t cSegs = 0; uOffset < cb && cbDescChainLeft; )
    1460         {
    1461             if (fAddPktHdr)
    1462             {
    1463                 /* Lead with packet header */
    1464                 paVirtSegsToGuest[0].cbSeg = cbHdr;
    1465                 paVirtSegsToGuest[0].pvSeg = RTMemAlloc(cbHdr);
    1466                 AssertReturn(paVirtSegsToGuest[0].pvSeg, VERR_NO_MEMORY);
    1467                 cbDescChainLeft -= cbHdr;
    1468 
    1469                 memcpy(paVirtSegsToGuest[0].pvSeg, &rxPktHdr, cbHdr);
    1470 
    1471                 /* Calculate & cache the field we will need to update later in gcPhys memory */
    1472                 gcPhysPktHdrNumBuffers = pDescChain->pSgPhysReturn->paSegs[0].gcPhys
    1473                                          + RT_UOFFSETOF(VIRTIONET_PKT_HDR_T, uNumBuffers);
    1474                 fAddPktHdr = false;
    1475                 cSegs++;
    1476             }
    1477 
    1478             if (cSegs >= cSegsAllocated)
    1479             {
    1480                 cSegsAllocated <<= 1; /* double the allocation size */
    1481                 paVirtSegsToGuest = (PRTSGSEG)RTMemRealloc(paVirtSegsToGuest, sizeof(RTSGSEG) * cSegsAllocated);
    1482                 AssertReturn(paVirtSegsToGuest, VERR_NO_MEMORY);
    1483             }
    1484 
    1485             /* Append remaining Rx pkt or as much current desc chain has room for */
    1486             uint32_t cbCropped = RT_MIN(cb, cbDescChainLeft);
    1487             paVirtSegsToGuest[cSegs].cbSeg = cbCropped;
    1488             paVirtSegsToGuest[cSegs].pvSeg = ((uint8_t *)pvBuf) + uOffset;
    1489             cbDescChainLeft -= cbCropped;
    1490             uOffset += cbCropped;
    1491             cDescs++;
    1492             cSegs++;
    1493             RTSgBufInit(pVirtSegBufToGuest, paVirtSegsToGuest, cSegs);
    1494             Log7Func(("Send Rx pkt to guest...\n"));
    1495             virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue),
    1496                                  pVirtSegBufToGuest, pDescChain, true);
    1497 
    1498             if (FEATURE_DISABLED(MRG_RXBUF))
    1499                 break;
    1500         }
    1501 
    1502         virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain);
    1503     }
    1504 
    1505     /* Fix-up pkthdr (in guest phys. memory) with number buffers (descriptors) processed */
    1506 
    1507     int rc = PDMDevHlpPCIPhysWrite(pDevIns, gcPhysPktHdrNumBuffers, &cDescs, sizeof(cDescs));
    1508     AssertMsgRCReturn(rc,
    1509                   ("Failure updating descriptor count in pkt hdr in guest physical memory\n"),
    1510                   rc);
    1511 
    1512     virtioCoreQueueSync(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue));
     1623    AssertReturnStmt(paVirtSegsToGuest, RTMemFree(pVirtSegBufToGuest), VERR_NO_MEMORY);
     1624
     1625    int rc = virtioNetR3CopyRxPktToGuest(pDevIns, pThis, pvBuf, cb, &rxPktHdr, cSegsAllocated,
     1626                                        pVirtSegBufToGuest, paVirtSegsToGuest, idxRxQueue);
    15131627
    15141628    RTMemFree(paVirtSegsToGuest);
     
    15161630
    15171631    Log7(("\n"));
    1518     if (uOffset < cb)
    1519     {
    1520         LogFunc(("%s Packet did not fit into RX queue (packet size=%u)!\n", INSTANCE(pThis), cb));
    1521         return VERR_TOO_MUCH_DATA;
    1522     }
    1523     return VINF_SUCCESS;
     1632    return rc;
    15241633}
    15251634
     
    15771686              selection algorithm feasible or even necessary to prevent starvation? */
    15781687
    1579     for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue += 2) /* Skip odd queue #'s because Rx queues only */
    1580     {
    1581         if (RT_SUCCESS(!virtioNetR3IsRxQueuePrimed(pDevIns, pThis, idxQueue)))
     1688    for (int idxQueuePair = 0; idxQueuePair < pThis->cVirtqPairs; idxQueuePair++)
     1689    {
     1690        if (RT_SUCCESS(!virtioNetR3IsRxQueuePrimed(pDevIns, pThis, RXQIDX(idxQueuePair))))
    15821691        {
    15831692            /* Drop packets if VM is not running or cable is disconnected. */
    1584             if (!virtioNetAllSystemsGo(pThis, pDevIns) || !IS_LINK_UP(pThis))
     1693            if (!virtioNetIsOperational(pThis, pDevIns) || !IS_LINK_UP(pThis))
    15851694                return VINF_SUCCESS;
    15861695
     1696            STAM_PROFILE_START(&pThis->StatReceive, a);
    15871697            virtioNetR3SetReadLed(pThisCC, true);
    15881698
    15891699            int rc = VINF_SUCCESS;
    15901700            if (virtioNetR3AddressFilter(pThis, pvBuf, cb))
    1591                 rc = virtioNetR3HandleRxPacket(pDevIns, pThis, pThisCC, pvBuf, cb, pGso, idxQueue);
     1701            {
     1702                rc = virtioNetR3HandleRxPacket(pDevIns, pThis, pThisCC, pvBuf, cb, pGso, RXQIDX(idxQueuePair));
     1703                STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
     1704            }
    15921705
    15931706            virtioNetR3SetReadLed(pThisCC, false);
    1594 
     1707            STAM_PROFILE_STOP(&pThis->StatReceive, a);
    15951708            return rc;
    15961709        }
     
    18421955            if (FEATURE_DISABLED(STATUS) || FEATURE_DISABLED(GUEST_ANNOUNCE))
    18431956            {
    1844                 LogFunc(("%s Ignoring CTRL class VIRTIONET_CTRL_ANNOUNCE. Not configured to handle it\n", INSTANCE(pThis)));
    1845                 virtioNetPrintFeatures(pThis->fNegotiatedFeatures, "Features");
     1957                LogFunc(("%s Ignoring CTRL class VIRTIONET_CTRL_ANNOUNCE.\n"
     1958                         "VIRTIO_F_STATUS or VIRTIO_F_GUEST_ANNOUNCE feature not enabled\n", INSTANCE(pThis)));
    18461959                break;
    18471960            }
     
    18861999
    18872000    virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, CTRLQIDX, pReturnSegBuf, pDescChain, true);
    1888     virtioCoreQueueSync(pDevIns, &pThis->Virtio, CTRLQIDX);
     2001    virtioCoreQueueSync(pDevIns, &pThis->Virtio, CTRLQIDX, false);
    18892002
    18902003    for (int i = 0; i < cSegs; i++)
     
    19812094                  INSTANCE(pThis), pGso->u8Type, pGso->cbHdrsTotal, pGso->cbHdrsSeg,
    19822095                  pGso->cbMaxSeg, pGso->offHdr1, pGso->offHdr2));
     2096        STAM_REL_COUNTER_INC(&pThis->StatTransmitGSO);
    19832097    }
    19842098    else if (pPktHdr->uFlags & VIRTIONET_HDR_F_NEEDS_CSUM)
    19852099    {
     2100        STAM_REL_COUNTER_INC(&pThis->StatTransmitCSum);
    19862101        /*
    19872102         * This is not GSO frame but checksum offloading is requested.
     
    19952110
    19962111static void virtioNetR3TransmitPendingPackets(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC,
    1997                                          uint16_t idxQueue, bool fOnWorkerThread)
     2112                                         uint16_t idxTxQueue, bool fOnWorkerThread)
    19982113{
    19992114
     
    20332148    }
    20342149
    2035     int cPkts = virtioCoreR3QueuePendingCount(pVirtio->pDevIns, pVirtio, idxQueue);
     2150    int cPkts = virtioCoreR3QueuePendingCount(pVirtio->pDevIns, pVirtio, idxTxQueue);
    20362151    if (!cPkts)
    20372152    {
    2038         LogFunc(("%s No packets to send found on %s\n", INSTANCE(pThis), VIRTQNAME(idxQueue)));
     2153        LogFunc(("%s No packets to send found on %s\n", INSTANCE(pThis), VIRTQNAME(idxTxQueue)));
    20392154
    20402155        if (pDrv)
     
    20502165    int rc;
    20512166    PVIRTIO_DESC_CHAIN_T pDescChain = NULL;
    2052     while ((rc = virtioCoreR3QueuePeek(pVirtio->pDevIns, pVirtio, idxQueue, &pDescChain)) == VINF_SUCCESS)
    2053     {
    2054         if (RT_SUCCESS(rc)) /** @todo r=bird: pointless, see loop condition. */
    2055             Log10Func(("%s fetched descriptor chain from %s\n", INSTANCE(pThis), VIRTQNAME(idxQueue)));
    2056         else
    2057         {
    2058             LogFunc(("%s failed to find expected data on %s, rc = %Rrc\n", INSTANCE(pThis), VIRTQNAME(idxQueue), rc));
    2059             virtioCoreR3DescChainRelease(pVirtio, pDescChain);
    2060             break;
    2061         }
     2167    while ((rc = virtioCoreR3QueuePeek(pVirtio->pDevIns, pVirtio, idxTxQueue, &pDescChain)) == VINF_SUCCESS)
     2168    {
     2169        Log10Func(("%s fetched descriptor chain from %s\n", INSTANCE(pThis), VIRTQNAME(idxTxQueue)));
    20622170
    20632171        PVIRTIOSGBUF pSgPhysSend = pDescChain->pSgPhysSend;
     
    20852193            PDMNETWORKGSO  Gso;
    20862194            PPDMNETWORKGSO pGso = virtioNetR3SetupGsoCtx(&Gso, &PktHdr);
     2195            uint64_t uOffset;
    20872196
    20882197            /** @todo Optimize away the extra copying! (lazy bird) */
     
    20912200            if (RT_SUCCESS(rc))
    20922201            {
     2202                STAM_REL_COUNTER_INC(&pThis->StatTransmitPackets);
     2203                STAM_PROFILE_START(&pThis->StatTransmitSend, a);
     2204
    20932205                uSize -= sizeof(PktHdr);
    20942206                rc = virtioNetR3ReadHeader(pDevIns, paSegsFromGuest[0].gcPhys, &PktHdr, uSize);
     
    20972209                virtioCoreSgBufAdvance(pSgPhysSend, sizeof(PktHdr));
    20982210
    2099                 uint64_t uOffset = 0;
    21002211                size_t cbCopied = 0;
    21012212                size_t cbTotal = 0;
    21022213                size_t cbRemain = pSgBufToPdmLeafDevice->cbUsed = uSize;
     2214                uOffset = 0;
    21032215                while (cbRemain)
    21042216                {
     
    21242236                {
    21252237                    LogFunc(("%s Failed to transmit frame, rc = %Rrc\n", INSTANCE(pThis), rc));
     2238                    STAM_PROFILE_STOP(&pThis->StatTransmitSend, a);
     2239                    STAM_PROFILE_ADV_STOP(&pThis->StatTransmit, a);
    21262240                    pThisCC->pDrv->pfnFreeBuf(pThisCC->pDrv, pSgBufToPdmLeafDevice);
    21272241                }
     2242                STAM_PROFILE_STOP(&pThis->StatTransmitSend, a);
     2243                STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, uOffset);
    21282244            }
    21292245            else
     
    21362252
    21372253            /* Remove this descriptor chain from the available ring */
    2138             virtioCoreR3QueueSkip(pVirtio, idxQueue);
     2254            virtioCoreR3QueueSkip(pVirtio, idxTxQueue);
    21392255
    21402256            /* No data to return to guest, but call is needed put elem (e.g. desc chain) on used ring */
    2141             virtioCoreR3QueuePut(pVirtio->pDevIns, pVirtio, idxQueue, NULL, pDescChain, false);
    2142 
    2143             virtioCoreQueueSync(pVirtio->pDevIns, pVirtio, idxQueue);
     2257            virtioCoreR3QueuePut(pVirtio->pDevIns, pVirtio, idxTxQueue, NULL, pDescChain, false);
     2258
     2259            /* Update used ring idx and notify guest that we've transmitted the data it sent */
     2260            virtioCoreQueueSync(pVirtio->pDevIns, pVirtio, idxTxQueue, false);
    21442261        }
    21452262
     
    21652282    PVIRTIONET      pThis   = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVIRTIONET);
    21662283
     2284    STAM_COUNTER_INC(&pThis->StatTransmitByNetwork);
     2285
    21672286    /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue
    21682287          selection algorithm feasible or even necessary */
    2169     virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC, TXQIDX_QPAIR(0), false /*fOnWorkerThread*/);
     2288    virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC, TXQIDX(0), false /*fOnWorkerThread*/);
    21702289}
    21712290
     
    21782297    PVIRTIONETCC       pThisCC   = RT_FROM_MEMBER(pVirtioCC, VIRTIONETCC, Virtio);
    21792298    PPDMDEVINS         pDevIns   = pThisCC->pDevIns;
    2180     PVIRTIONETWORKER   pWorker   = &pThis->aWorkers[idxQueue];
    2181     PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxQueue];
     2299
     2300    uint16_t idxWorker;
     2301    if (idxQueue == CTRLQIDX)
     2302        idxWorker = pThis->cWorkers - 1;
     2303    else
     2304        idxWorker = idxQueue / 2;
     2305
     2306    PVIRTIONETWORKER   pWorker   = &pThis->aWorkers[idxWorker];
     2307    PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxWorker];
    21822308    AssertReturnVoid(idxQueue < pThis->cVirtQueues);
    21832309
     
    21962322    else
    21972323    {
    2198         /* Wake queue's worker thread up if sleeping */
     2324        /* Wake queue's worker thread up if sleeping (e.g. a Tx queue, or the control queue */
    21992325        if (!ASMAtomicXchgBool(&pWorkerR3->fNotified, true))
    22002326        {
     
    22142340static DECLCALLBACK(int) virtioNetR3WorkerThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread)
    22152341{
    2216     uint16_t const     idxQueue  = (uint16_t)(uintptr_t)pThread->pvUser;
    22172342    PVIRTIONET         pThis     = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET);
    22182343    PVIRTIONETCC       pThisCC   = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC);
    2219     PVIRTIONETWORKER   pWorker   = &pThis->aWorkers[idxQueue];
    2220     PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxQueue];
     2344    uint16_t const     idxWorker = (uint16_t)(uintptr_t)pThread->pvUser;
     2345    PVIRTIONETWORKER   pWorker   = &pThis->aWorkers[idxWorker];
     2346    PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxWorker];
     2347    uint16_t const     idxQueue  = pWorkerR3->idxQueue;
     2348
    22212349    if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
    22222350    {
    22232351        return VINF_SUCCESS;
    22242352    }
    2225     LogFunc(("%s %s\n", INSTANCE(pThis), VIRTQNAME(idxQueue)));
     2353    LogFunc(("%s worker thread started for %s\n", INSTANCE(pThis), VIRTQNAME(idxQueue)));
     2354    virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, false);
    22262355    while (pThread->enmState == PDMTHREADSTATE_RUNNING)
    22272356    {
    2228         virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, true);
    2229 
    22302357        if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, idxQueue))
    22312358        {
     2359            virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, true);
    22322360            /* Atomic interlocks avoid missing alarm while going to sleep & notifier waking the awoken */
    22332361            ASMAtomicWriteBool(&pWorkerR3->fSleeping, true);
     
    22352363            if (!fNotificationSent)
    22362364            {
     2365                virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, true);
    22372366                Log10Func(("%s %s worker sleeping...\n", INSTANCE(pThis), VIRTQNAME(idxQueue)));
    22382367                Assert(ASMAtomicReadBool(&pWorkerR3->fSleeping));
    22392368                int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pWorker->hEvtProcess, RT_INDEFINITE_WAIT);
     2369                STAM_COUNTER_INC(&pThis->StatTransmitByThread);
    22402370                AssertLogRelMsgReturn(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc), rc);
    22412371                if (RT_UNLIKELY(pThread->enmState != PDMTHREADSTATE_RUNNING))
     
    22502380            }
    22512381            ASMAtomicWriteBool(&pWorkerR3->fSleeping, false);
     2382            virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, false);
    22522383        }
    2253         virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, false);
    22542384
    22552385        /* Dispatch to the handler for the queue this worker is set up to drive */
     
    22802410              * leaf driver invokes PDMINETWORKDOWN.pfnWaitReceiveAvail() callback,
    22812411              * which waits until notified directly by virtioNetR3QueueNotified()
    2282               * that guest IN buffers have been added to receive virt queue. */
     2412              * that guest IN buffers have been added to receive virt queue.
     2413              */
    22832414        }
    22842415    }
     
    24172548    Log10Func(("%s\n", INSTANCE(pThis)));
    24182549    int rc = VINF_SUCCESS;
    2419     for (unsigned idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++)
    2420     {
    2421         PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxQueue];
     2550    for (unsigned idxWorker = 0; idxWorker < pThis->cWorkers; idxWorker++)
     2551    {
     2552        PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxWorker];
    24222553        if (pWorker->hEvtProcess != NIL_SUPSEMEVENT)
    24232554        {
     
    24252556            pWorker->hEvtProcess = NIL_SUPSEMEVENT;
    24262557        }
    2427         if (pThisCC->aWorkers[idxQueue].pThread)
     2558        if (pThisCC->aWorkers[idxWorker].pThread)
    24282559        {
    24292560            int rcThread;
    2430             rc = PDMDevHlpThreadDestroy(pDevIns, pThisCC->aWorkers[idxQueue].pThread, &rcThread);
     2561            rc = PDMDevHlpThreadDestroy(pDevIns, pThisCC->aWorkers[idxWorker].pThread, &rcThread);
    24312562            if (RT_FAILURE(rc) || RT_FAILURE(rcThread))
    24322563                AssertMsgFailed(("%s Failed to destroythread rc=%Rrc rcThread=%Rrc\n", __FUNCTION__, rc, rcThread));
    2433            pThisCC->aWorkers[idxQueue].pThread = NULL;
     2564           pThisCC->aWorkers[idxWorker].pThread = NULL;
    24342565        }
    24352566    }
     
    24372568}
    24382569
    2439 static int virtioNetR3CreateWorkerThreads(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC)
     2570static int virtioNetR3CreateOneWorkerThread(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC,
     2571                                            uint16_t idxWorker, uint16_t idxQueue)
    24402572{
    24412573    Log10Func(("%s\n", INSTANCE(pThis)));
    24422574    int rc = VINF_SUCCESS;
    2443     /* Attach the queues and create worker threads for them: */
    2444     for (uint16_t idxQueue = 1; idxQueue < pThis->cVirtQueues; idxQueue++)
    2445     {
    2446         /* Skip creating threads for receive queues, only create for transmit queues & control queue */
    2447         if (!IS_RX_QUEUE(idxQueue))
    2448         {
    2449             rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->aWorkers[idxQueue].hEvtProcess);
    2450 
    2451             if (RT_FAILURE(rc))
    2452                 return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
    2453                                            N_("DevVirtioNET: Failed to create SUP event semaphore"));
    2454 
    2455             rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->aWorkers[idxQueue].pThread,
    2456                                        (void *)(uintptr_t)idxQueue, virtioNetR3WorkerThread,
    2457                                        virtioNetR3WakeupWorker, 0, RTTHREADTYPE_IO, VIRTQNAME(idxQueue));
    2458             if (rc != VINF_SUCCESS)
    2459             {
    2460                 LogRel(("Error creating thread for Virtual Queue %s: %Rrc\n", VIRTQNAME(idxQueue), rc));
    2461                 return rc;
    2462             }
    2463         }
    2464         pThis->afQueueAttached[idxQueue] = true;
    2465     }
     2575    rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->aWorkers[idxWorker].hEvtProcess);
     2576
     2577    if (RT_FAILURE(rc))
     2578        return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,
     2579                                   N_("DevVirtioNET: Failed to create SUP event semaphore"));
     2580
     2581    LogFunc(("creating thread, idxWorker=%d, idxQueue=%d\n", idxWorker, idxQueue));
     2582    rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->aWorkers[idxWorker].pThread,
     2583                               (void *)(uintptr_t)idxWorker, virtioNetR3WorkerThread,
     2584                               virtioNetR3WakeupWorker, 0, RTTHREADTYPE_IO, VIRTQNAME(idxQueue));
     2585    if (rc != VINF_SUCCESS)
     2586    {
     2587        LogRel(("Error creating thread for Virtual Queue %s: %Rrc\n", VIRTQNAME(idxQueue), rc));
     2588        return rc;
     2589    }
     2590    pThisCC->aWorkers[idxWorker].idxQueue = idxQueue;
     2591    pThis->afQueueAttached[idxQueue] = true;
    24662592    return rc;
    24672593}
    24682594
     2595static int virtioNetR3CreateWorkerThreads(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC)
     2596{
     2597    Log10Func(("%s\n", INSTANCE(pThis)));
     2598
     2599    int rc;
     2600    uint16_t idxWorker = 0;
     2601    for (uint16_t idxQueuePair = 0; idxQueuePair < pThis->cVirtqPairs; idxQueuePair++)
     2602    {
     2603        rc = virtioNetR3CreateOneWorkerThread(pDevIns, pThis, pThisCC, idxWorker, TXQIDX(idxQueuePair));
     2604        AssertRCReturn(rc, rc);
     2605        idxWorker++;
     2606    }
     2607    rc = virtioNetR3CreateOneWorkerThread(pDevIns, pThis, pThisCC, idxWorker++, CTRLQIDX);
     2608    pThis->cWorkers = idxWorker;
     2609    return rc;
     2610}
    24692611/**
    24702612 * @callback_method_impl{VIRTIOCORER3,pfnStatusChanged}
     
    24862628        pThisCC->fQuiescing  = false;
    24872629        pThis->fNegotiatedFeatures = virtioCoreGetAcceptedFeatures(pVirtio);
    2488         virtioNetPrintFeatures(VIRTIONET_HOST_FEATURES_OFFERED, "Offered Features");
    2489         virtioNetPrintFeatures(pThis->fNegotiatedFeatures, "Negotiated Features");
     2630        virtioPrintFeatures(pVirtio);
     2631        virtioNetPrintFeatures(pThis);
    24902632        for (unsigned idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++)
    24912633        {
     
    25032645        Log7Func(("%s Link is %s\n", INSTANCE(pThis), pThis->fCableConnected ? "up" : "down"));
    25042646
    2505         pThis->fPromiscuous  = true;
    2506         pThis->fAllMulticast = false;
    2507         pThis->fAllUnicast   = false;
    2508         pThis->fNoMulticast  = false;
    2509         pThis->fNoUnicast    = false;
    2510         pThis->fNoBroadcast  = false;
     2647        pThis->fPromiscuous         = true;
     2648        pThis->fAllMulticast        = false;
     2649        pThis->fAllUnicast          = false;
     2650        pThis->fNoMulticast         = false;
     2651        pThis->fNoUnicast           = false;
     2652        pThis->fNoBroadcast         = false;
    25112653        pThis->uIsTransmitting      = 0;
    25122654        pThis->cUnicastFilterMacs   = 0;
     
    28102952                              virtioNetR3SaveExec, virtioNetR3LoadExec);
    28112953    AssertRCReturn(rc, rc);
     2954
     2955
     2956
     2957   /*
     2958     * Statistics and debug stuff.
     2959     * The /Public/ bits are official and used by session info in the GUI.
     2960     */
     2961    PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
     2962                           "Amount of data received",    "/Public/NetAdapter/%u/BytesReceived", uStatNo);
     2963    PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
     2964                           "Amount of data transmitted", "/Public/NetAdapter/%u/BytesTransmitted", uStatNo);
     2965    PDMDevHlpSTAMRegisterF(pDevIns, &pDevIns->iInstance,       STAMTYPE_U32,     STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
     2966                           "Device instance number",     "/Public/NetAdapter/%u/%s", uStatNo, pDevIns->pReg->szName);
     2967
     2968    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveBytes,        STAMTYPE_COUNTER, "ReceiveBytes",           STAMUNIT_BYTES,          "Amount of data received");
     2969    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitBytes,       STAMTYPE_COUNTER, "TransmitBytes",          STAMUNIT_BYTES,          "Amount of data transmitted");
     2970    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveGSO,          STAMTYPE_COUNTER, "Packets/ReceiveGSO",     STAMUNIT_COUNT,          "Number of received GSO packets");
     2971    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitPackets,     STAMTYPE_COUNTER, "Packets/Transmit",       STAMUNIT_COUNT,          "Number of sent packets");
     2972    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitGSO,         STAMTYPE_COUNTER, "Packets/Transmit-Gso",   STAMUNIT_COUNT,          "Number of sent GSO packets");
     2973    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitCSum,        STAMTYPE_COUNTER, "Packets/Transmit-Csum",  STAMUNIT_COUNT,          "Number of completed TX checksums");
     2974# ifdef VBOX_WITH_STATISTICS
     2975    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceive,             STAMTYPE_PROFILE, "Receive/Total",          STAMUNIT_TICKS_PER_CALL, "Profiling receive");
     2976    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveStore,        STAMTYPE_PROFILE, "Receive/Store",          STAMUNIT_TICKS_PER_CALL, "Profiling receive storing");
     2977    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflow,          STAMTYPE_PROFILE, "RxOverflow",             STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows");
     2978    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeup,    STAMTYPE_COUNTER, "RxOverflowWakeup",       STAMUNIT_OCCURENCES,     "Nr of RX overflow wakeups");
     2979    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmit,            STAMTYPE_PROFILE, "Transmit/Total",         STAMUNIT_TICKS_PER_CALL, "Profiling transmits in HC");
     2980    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSend,        STAMTYPE_PROFILE, "Transmit/Send",          STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in HC");
     2981    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitByNetwork,   STAMTYPE_COUNTER, "Transmit/ByNetwork",     STAMUNIT_COUNT,          "Network-initiated transmissions");
     2982    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitByThread,    STAMTYPE_COUNTER, "Transmit/ByThread",      STAMUNIT_COUNT,          "Thread-initiated transmissions");
     2983# endif
    28122984
    28132985    /*
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette