Changeset 83913 in vbox for trunk/src/VBox/Devices/Network
- Timestamp:
- Apr 22, 2020 4:52:12 AM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Network/DevVirtioNet_1_0.cpp
r83664 r83913 31 31 #define LOG_GROUP LOG_GROUP_DEV_VIRTIO 32 32 #define VIRTIONET_WITH_GSO 33 #include <iprt/types.h> 33 34 34 35 #include <VBox/vmm/pdmdev.h> 36 #include <VBox/vmm/stam.h> 35 37 #include <VBox/vmm/pdmcritsect.h> 36 38 #include <VBox/vmm/pdmnetifs.h> … … 56 58 #include "../VirtIO/Virtio_1_0.h" 57 59 58 //#include "VBoxNET.h"59 60 #include "VBoxDD.h" 60 61 /** @todo FIX UP THESE HACKS AFTER DEBUGGING */62 61 63 62 /* After debugging single instance case, restore instance name logging */ 64 63 #define INSTANCE(pState) (char *)(pState->szInstanceName ? "" : "") // Avoid requiring RT_NOREF in some funcs 65 66 64 67 65 #define VIRTIONET_SAVED_STATE_VERSION UINT32_C(1) … … 73 71 #define VIRTIONET_PREALLOCATE_RX_SEG_COUNT 32 74 72 75 76 #define QUEUE_NAME(a_pVirtio, a_idxQueue) ((a_pVirtio)->virtqState[(a_idxQueue)].szVirtqName)77 73 #define VIRTQNAME(idxQueue) (pThis->aszVirtqNames[idxQueue]) 78 74 #define CBVIRTQNAME(idxQueue) RTStrNLen(VIRTQNAME(idxQueue), sizeof(VIRTQNAME(idxQueue))) … … 98 94 #define IS_RX_QUEUE(n) ((n) != CTRLQIDX && !IS_TX_QUEUE(n)) 99 95 #define IS_CTRL_QUEUE(n) ((n) == CTRLQIDX) 100 #define RXQIDX _QPAIR(qPairIdx) (qPairIdx * 2)101 #define TXQIDX _QPAIR(qPairIdx) (qPairIdx * 2 + 1)96 #define RXQIDX(qPairIdx) (qPairIdx * 2) 97 #define TXQIDX(qPairIdx) (qPairIdx * 2 + 1) 102 98 #define CTRLQIDX (FEATURE_ENABLED(MQ) ? ((VIRTIONET_MAX_QPAIRS - 1) * 2 + 2) : 2) 103 99 104 #define RXVIRTQNAME(qPairIdx) (pThis->aszVirtqNames[RXQIDX_QPAIR(qPairIdx)])105 #define TXVIRTQNAME(qPairIdx) (pThis->aszVirtqNames[TXQIDX_QPAIR(qPairIdx)])106 #define CTLVIRTQNAME(qPairIdx) (pThis->aszVirtqNames[CTRLQIDX])107 108 100 #define LUN0 0 109 110 101 111 102 /* … … 165 156 | VIRTIONET_F_CTRL_VLAN \ 166 157 | VIRTIONET_HOST_FEATURES_GSO \ 167 | VIRTIONET_F_MRG_RXBUF 158 | VIRTIONET_F_MRG_RXBUF \ 159 | VIRTIO_F_EVENT_IDX /** @todo Trying this experimentally as potential workaround for bug 160 * where virtio seems to expect interrupt for Rx/Used even though 161 * its set the used ring flag in the Rx queue to skip the notification by device */ 168 162 169 163 #define PCI_DEVICE_ID_VIRTIONET_HOST 0x1041 /**< Informs guest driver of type of VirtIO device */ 170 #define PCI_CLASS_BASE_NETWORK_CONTROLLER 0x02 /**< PCI Network device class */164 #define PCI_CLASS_BASE_NETWORK_CONTROLLER 0x02 /**< PCI Network device class */ 171 165 #define PCI_CLASS_SUB_NET_ETHERNET_CONTROLLER 0x00 /**< PCI NET Controller subclass */ 172 166 #define PCI_CLASS_PROG_UNSPECIFIED 0x00 /**< Programming interface. N/A. */ … … 312 306 bool volatile fSleeping; /**< Flags whether worker thread is sleeping or not */ 313 307 bool volatile fNotified; /**< Flags whether worker thread notified */ 308 uint16_t idxQueue; /**< Index of associated queue */ 314 309 } VIRTIONETWORKERR3; 315 310 /** Pointer to a VirtIO SCSI worker. */ … … 345 340 uint16_t cVirtQueues; 346 341 342 uint16_t cWorkers; 343 347 344 uint64_t fNegotiatedFeatures; 348 345 … … 417 414 uint8_t aVlanFilter[VIRTIONET_MAX_VLAN_ID / sizeof(uint8_t)]; 418 415 419 /* Receive-blocking-related fields ***************************************/ 420 416 /** @name Statistic 417 * @{ */ 418 STAMCOUNTER StatReceiveBytes; 419 STAMCOUNTER StatTransmitBytes; 420 STAMCOUNTER StatReceiveGSO; 421 STAMCOUNTER StatTransmitPackets; 422 STAMCOUNTER StatTransmitGSO; 423 STAMCOUNTER StatTransmitCSum; 424 #ifdef VBOX_WITH_STATISTICS 425 STAMPROFILE StatReceive; 426 STAMPROFILE StatReceiveStore; 427 STAMPROFILEADV StatTransmit; 428 STAMPROFILE StatTransmitSend; 429 STAMPROFILE StatRxOverflow; 430 STAMCOUNTER StatRxOverflowWakeup; 431 STAMCOUNTER StatTransmitByNetwork; 432 STAMCOUNTER StatTransmitByThread; 433 /** @} */ 434 #endif 421 435 } VIRTIONET; 422 436 /** Pointer to the shared state of the VirtIO Host NET device. */ … … 537 551 AssertReturnVoid(pThis->hEventRxDescAvail != NIL_SUPSEMEVENT); 538 552 553 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup); 554 539 555 Log10Func(("%s Waking downstream driver's Rx buf waiter thread\n", INSTANCE(pThis))); 540 556 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventRxDescAvail); … … 546 562 for (uint16_t qPairIdx = 0; qPairIdx < pThis->cVirtqPairs; qPairIdx++) 547 563 { 548 RTStrPrintf(pThis->aszVirtqNames[RXQIDX _QPAIR(qPairIdx)], VIRTIO_MAX_QUEUE_NAME_SIZE, "receiveq<%d>", qPairIdx);549 RTStrPrintf(pThis->aszVirtqNames[TXQIDX _QPAIR(qPairIdx)], VIRTIO_MAX_QUEUE_NAME_SIZE, "transmitq<%d>", qPairIdx);564 RTStrPrintf(pThis->aszVirtqNames[RXQIDX(qPairIdx)], VIRTIO_MAX_QUEUE_NAME_SIZE, "receiveq<%d>", qPairIdx); 565 RTStrPrintf(pThis->aszVirtqNames[TXQIDX(qPairIdx)], VIRTIO_MAX_QUEUE_NAME_SIZE, "transmitq<%d>", qPairIdx); 550 566 } 551 567 RTStrCopy(pThis->aszVirtqNames[CTRLQIDX], VIRTIO_MAX_QUEUE_NAME_SIZE, "controlq"); … … 596 612 } 597 613 598 DECLINLINE(void) virtioNetPrintFeatures( uint32_t fFeatures, const char *pcszText)614 DECLINLINE(void) virtioNetPrintFeatures(VIRTIONET *pThis) 599 615 { 600 616 #ifdef LOG_ENABLED 601 617 static struct 602 618 { 603 uint 32_t fMask;619 uint64_t fFeatureBit; 604 620 const char *pcszDesc; 605 621 } const s_aFeatures[] = 606 622 { 607 { VIRTIONET_F_CSUM, " CSUM :Host handles packets with partial checksum.\n" },608 { VIRTIONET_F_GUEST_CSUM, " GUEST_CSUM :Guest handles packets with partial checksum.\n" },609 { VIRTIONET_F_CTRL_GUEST_OFFLOADS, " CTRL_GUEST_OFFLOADS :Control channel offloads reconfiguration support.\n" },610 { VIRTIONET_F_MAC, " MAC :Host has given MAC address.\n" },611 { VIRTIONET_F_GUEST_TSO4, " GUEST_TSO4 :Guest can receive TSOv4.\n" },612 { VIRTIONET_F_GUEST_TSO6, " GUEST_TSO6 :Guest can receive TSOv6.\n" },613 { VIRTIONET_F_GUEST_ECN, " GUEST_ECN :Guest can receive TSO with ECN.\n" },614 { VIRTIONET_F_GUEST_UFO, " GUEST_UFO :Guest can receive UFO.\n" },615 { VIRTIONET_F_HOST_TSO4, " HOST_TSO4 :Host can receive TSOv4.\n" },616 { VIRTIONET_F_HOST_TSO6, " HOST_TSO6 :Host can receive TSOv6.\n" },617 { VIRTIONET_F_HOST_ECN, " HOST_ECN :Host can receive TSO with ECN.\n" },618 { VIRTIONET_F_HOST_UFO, " HOST_UFO :Host can receive UFO.\n" },619 { VIRTIONET_F_MRG_RXBUF, " MRG_RXBUF :Guest can merge receive buffers.\n" },620 { VIRTIONET_F_STATUS, " STATUS :Configuration status field is available.\n" },621 { VIRTIONET_F_CTRL_VQ, " CTRL_VQ :Control channel is available.\n" },622 { VIRTIONET_F_CTRL_RX, " CTRL_RX :Control channel RX mode support.\n" },623 { VIRTIONET_F_CTRL_VLAN, " CTRL_VLAN :Control channel VLAN filtering.\n" },624 { VIRTIONET_F_GUEST_ANNOUNCE, " GUEST_ANNOUNCE :Guest can send gratuitous packets.\n" },625 { VIRTIONET_F_MQ, " MQ :Host supports multiqueue with automatic receive steering.\n" },626 { VIRTIONET_F_CTRL_MAC_ADDR, " CTRL_MAC_ADDR :Set MAC address through control channel.\n" }623 { VIRTIONET_F_CSUM, " CSUM Host handles packets with partial checksum.\n" }, 624 { VIRTIONET_F_GUEST_CSUM, " GUEST_CSUM Guest handles packets with partial checksum.\n" }, 625 { VIRTIONET_F_CTRL_GUEST_OFFLOADS, " CTRL_GUEST_OFFLOADS Control channel offloads reconfiguration support.\n" }, 626 { VIRTIONET_F_MAC, " MAC Host has given MAC address.\n" }, 627 { VIRTIONET_F_GUEST_TSO4, " GUEST_TSO4 Guest can receive TSOv4.\n" }, 628 { VIRTIONET_F_GUEST_TSO6, " GUEST_TSO6 Guest can receive TSOv6.\n" }, 629 { VIRTIONET_F_GUEST_ECN, " GUEST_ECN Guest can receive TSO with ECN.\n" }, 630 { VIRTIONET_F_GUEST_UFO, " GUEST_UFO Guest can receive UFO.\n" }, 631 { VIRTIONET_F_HOST_TSO4, " HOST_TSO4 Host can receive TSOv4.\n" }, 632 { VIRTIONET_F_HOST_TSO6, " HOST_TSO6 Host can receive TSOv6.\n" }, 633 { VIRTIONET_F_HOST_ECN, " HOST_ECN Host can receive TSO with ECN.\n" }, 634 { VIRTIONET_F_HOST_UFO, " HOST_UFO Host can receive UFO.\n" }, 635 { VIRTIONET_F_MRG_RXBUF, " MRG_RXBUF Guest can merge receive buffers.\n" }, 636 { VIRTIONET_F_STATUS, " STATUS Configuration status field is available.\n" }, 637 { VIRTIONET_F_CTRL_VQ, " CTRL_VQ Control channel is available.\n" }, 638 { VIRTIONET_F_CTRL_RX, " CTRL_RX Control channel RX mode support.\n" }, 639 { VIRTIONET_F_CTRL_VLAN, " CTRL_VLAN Control channel VLAN filtering.\n" }, 640 { VIRTIONET_F_GUEST_ANNOUNCE, " GUEST_ANNOUNCE Guest can send gratuitous packets.\n" }, 641 { VIRTIONET_F_MQ, " MQ Host supports multiqueue with automatic receive steering.\n" }, 642 { VIRTIONET_F_CTRL_MAC_ADDR, " CTRL_MAC_ADDR Set MAC address through control channel.\n" } 627 643 }; 628 644 629 645 #define MAXLINE 80 630 646 /* Display as a single buf to prevent interceding log messages */ 631 char *pszBuf = (char *)RTMemAllocZ(RT_ELEMENTS(s_aFeatures) * 80), *cp = pszBuf; 647 uint64_t fFeaturesOfferedMask = VIRTIONET_HOST_FEATURES_OFFERED; 648 uint16_t cbBuf = RT_ELEMENTS(s_aFeatures) * 132; 649 char *pszBuf = (char *)RTMemAllocZ(cbBuf); 632 650 Assert(pszBuf); 651 char *cp = pszBuf; 633 652 for (unsigned i = 0; i < RT_ELEMENTS(s_aFeatures); ++i) 634 if (s_aFeatures[i].fMask & fFeatures) { 635 int len = RTStrNLen(s_aFeatures[i].pcszDesc, MAXLINE); 636 memcpy(cp, s_aFeatures[i].pcszDesc, len); /* intentionally drop trailing '\0' */ 637 cp += len; 638 } 639 Log3(("%s:\n%s\n", pcszText, pszBuf)); 653 { 654 bool isOffered = fFeaturesOfferedMask & s_aFeatures[i].fFeatureBit; 655 bool isNegotiated = pThis->fNegotiatedFeatures & s_aFeatures[i].fFeatureBit; 656 cp += RTStrPrintf(cp, cbBuf - (cp - pszBuf), " %s %s %s", 657 isOffered ? "+" : "-", isNegotiated ? "x" : " ", s_aFeatures[i].pcszDesc); 658 } 659 Log3(("VirtIO Net Features Configuration\n\n" 660 " Offered Accepted Feature Description\n" 661 " ------- -------- ------- -----------\n" 662 "%s\n", pszBuf)); 640 663 RTMemFree(pszBuf); 641 664 … … 702 725 && offConfig + cb <= RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \ 703 726 + RT_SIZEOFMEMB(VIRTIONET_CONFIG_T, member)) ) 704 705 /* || ( offConfig == RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \706 && cb == RT_SIZEOFMEMB(VIRTIONET_CONFIG_T, member)) )707 */708 727 709 728 #ifdef LOG_ENABLED … … 835 854 virtioNetR3SetVirtqNames(pThis); 836 855 856 pHlp->pfnSSMGetU64( pSSM, &pThis->fNegotiatedFeatures); 857 858 pHlp->pfnSSMGetU16( pSSM, &pThis->cVirtQueues); 859 pHlp->pfnSSMGetU16( pSSM, &pThis->cWorkers); 860 837 861 for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++) 838 862 pHlp->pfnSSMGetBool(pSSM, &pThis->afQueueAttached[idxQueue]); 863 864 int rc; 865 866 if (uPass == SSM_PASS_FINAL) 867 { 868 869 /* Load config area */ 870 #if FEATURE_OFFERED(STATUS) 871 /* config checks */ 872 RTMAC macConfigured; 873 rc = pHlp->pfnSSMGetMem(pSSM, &macConfigured.au8, sizeof(macConfigured.au8)); 874 AssertRCReturn(rc, rc); 875 if (memcmp(&macConfigured.au8, &pThis->macConfigured.au8, sizeof(macConfigured.au8)) 876 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns))) 877 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", 878 INSTANCE(pThis), &pThis->macConfigured, &macConfigured)); 879 #endif 880 #if FEATURE_OFFERED(MQ) 881 pHlp->pfnSSMGetU16( pSSM, &pThis->virtioNetConfig.uMaxVirtqPairs); 882 #endif 883 /* Save device-specific part */ 884 pHlp->pfnSSMGetBool( pSSM, &pThis->fCableConnected); 885 pHlp->pfnSSMGetU8( pSSM, &pThis->fPromiscuous); 886 pHlp->pfnSSMGetU8( pSSM, &pThis->fAllMulticast); 887 pHlp->pfnSSMGetU8( pSSM, &pThis->fAllUnicast); 888 pHlp->pfnSSMGetU8( pSSM, &pThis->fNoMulticast); 889 pHlp->pfnSSMGetU8( pSSM, &pThis->fNoUnicast); 890 pHlp->pfnSSMGetU8( pSSM, &pThis->fNoBroadcast); 891 892 pHlp->pfnSSMGetU32( pSSM, &pThis->cMulticastFilterMacs); 893 pHlp->pfnSSMGetMem( pSSM, pThis->aMacMulticastFilter, pThis->cMulticastFilterMacs * sizeof(RTMAC)); 894 895 if (pThis->cMulticastFilterMacs < VIRTIONET_MAC_FILTER_LEN) 896 memset(&pThis->aMacMulticastFilter[pThis->cMulticastFilterMacs], 0, 897 (VIRTIONET_MAC_FILTER_LEN - pThis->cMulticastFilterMacs) * sizeof(RTMAC)); 898 899 pHlp->pfnSSMGetU32( pSSM, &pThis->cUnicastFilterMacs); 900 pHlp->pfnSSMGetMem( pSSM, pThis->aMacUnicastFilter, pThis->cUnicastFilterMacs * sizeof(RTMAC)); 901 902 if (pThis->cUnicastFilterMacs < VIRTIONET_MAC_FILTER_LEN) 903 memset(&pThis->aMacUnicastFilter[pThis->cUnicastFilterMacs], 0, 904 (VIRTIONET_MAC_FILTER_LEN - pThis->cUnicastFilterMacs) * sizeof(RTMAC)); 905 906 rc = pHlp->pfnSSMGetMem(pSSM, pThis->aVlanFilter, sizeof(pThis->aVlanFilter)); 907 AssertRCReturn(rc, rc); 908 } 839 909 840 910 /* 841 911 * Call the virtio core to let it load its state. 842 912 */ 843 intrc = virtioCoreR3LoadExec(&pThis->Virtio, pDevIns->pHlpR3, pSSM);913 rc = virtioCoreR3LoadExec(&pThis->Virtio, pDevIns->pHlpR3, pSSM); 844 914 845 915 /* 846 916 * Nudge queue workers 847 917 */ 848 for (int idxQueue = 0; idxQueue < pThis->cVirtqPairs; idxQueue++) 849 { 918 for (int idxWorker = 0; idxWorker < pThis->cWorkers; idxWorker++) 919 { 920 uint16_t idxQueue = pThisCC->aWorkers[idxWorker].idxQueue; 850 921 if (pThis->afQueueAttached[idxQueue]) 851 922 { 852 923 Log7Func(("%s Waking %s worker.\n", INSTANCE(pThis), VIRTQNAME(idxQueue))); 853 rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[idx Queue].hEvtProcess);924 rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[idxWorker].hEvtProcess); 854 925 AssertRCReturn(rc, rc); 855 926 } … … 868 939 869 940 RT_NOREF(pThisCC); 870 871 941 Log7Func(("%s SAVE EXEC!!\n", INSTANCE(pThis))); 942 943 pHlp->pfnSSMPutU64( pSSM, pThis->fNegotiatedFeatures); 944 945 pHlp->pfnSSMPutU16( pSSM, pThis->cVirtQueues); 946 pHlp->pfnSSMPutU16( pSSM, pThis->cWorkers); 872 947 873 948 for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++) 874 949 pHlp->pfnSSMPutBool(pSSM, pThis->afQueueAttached[idxQueue]); 950 951 /* Save config area */ 952 #if FEATURE_OFFERED(STATUS) 953 pHlp->pfnSSMPutMem( pSSM, pThis->virtioNetConfig.uMacAddress.au8, 954 sizeof(pThis->virtioNetConfig.uMacAddress.au8)); 955 #endif 956 #if FEATURE_OFFERED(MQ) 957 pHlp->pfnSSMPutU16( pSSM, pThis->virtioNetConfig.uMaxVirtqPairs); 958 #endif 959 960 /* Save device-specific part */ 961 pHlp->pfnSSMPutBool( pSSM, pThis->fCableConnected); 962 pHlp->pfnSSMPutU8( pSSM, pThis->fPromiscuous); 963 pHlp->pfnSSMPutU8( pSSM, pThis->fAllMulticast); 964 pHlp->pfnSSMPutU8( pSSM, pThis->fAllUnicast); 965 pHlp->pfnSSMPutU8( pSSM, pThis->fNoMulticast); 966 pHlp->pfnSSMPutU8( pSSM, pThis->fNoUnicast); 967 pHlp->pfnSSMPutU8( pSSM, pThis->fNoBroadcast); 968 969 pHlp->pfnSSMPutU32( pSSM, pThis->cMulticastFilterMacs); 970 pHlp->pfnSSMPutMem( pSSM, pThis->aMacMulticastFilter, pThis->cMulticastFilterMacs * sizeof(RTMAC)); 971 972 pHlp->pfnSSMPutU32( pSSM, pThis->cUnicastFilterMacs); 973 pHlp->pfnSSMPutMem( pSSM, pThis->aMacUnicastFilter, pThis->cUnicastFilterMacs * sizeof(RTMAC)); 974 975 int rc = pHlp->pfnSSMPutMem(pSSM, pThis->aVlanFilter, sizeof(pThis->aVlanFilter)); 976 AssertRCReturn(rc, rc); 875 977 876 978 /* … … 884 986 * Device interface. * 885 987 *********************************************************************************************************************************/ 886 988 /*xx*/ 887 989 /** 888 990 * @callback_method_impl{FNPDMDEVASYNCNOTIFY} … … 932 1034 PDMDevHlpAsyncNotificationCompleted(pDevIns); 933 1035 934 /** @todo make sure Rx and Tx are really quiesced (how to we synchronize w/downstream driver?) */1036 /** @todo make sure Rx and Tx are really quiesced (how do we synchronize w/downstream driver?) */ 935 1037 } 936 1038 … … 959 1061 960 1062 virtioNetR3QuiesceDevice(pDevIns, enmType); 1063 virtioNetR3WakeupRxBufWaiter(pDevIns); 961 1064 } 962 1065 … … 984 1087 /** 985 1088 * @interface_method_impl{PDMDEVREGR3,pfnResume} 1089 * 1090 * Just process the VM device-related state change itself. 1091 * Unlike SCSI driver, there are no packets to redo. No I/O was halted or saved while 1092 * quiescing for pfnSuspend(). Any packets in process were simply dropped by the upper 1093 * layer driver, presumably to be retried or cause erring out at the upper layers 1094 * of the network stack. 986 1095 */ 987 1096 static DECLCALLBACK(void) virtioNetR3Resume(PPDMDEVINS pDevIns) … … 993 1102 pThisCC->fQuiescing = false; 994 1103 995 996 /** @todo implement this function properly */ 997 998 /* Wake worker threads flagged to skip pulling queue entries during quiesce 999 * to ensure they re-check their queues. Active request queues may already 1000 * be awake due to new reqs coming in. 1001 */ 1002 /* 1003 for (uint16_t idxQueue = 0; idxQueue < VIRTIONET_REQ_QUEUE_CNT; idxQueue++) 1004 { 1005 if (ASMAtomicReadBool(&pThisCC->aWorkers[idxQueue].fSleeping)) 1006 { 1007 Log7Func(("%s waking %s worker.\n", INSTANCE(pThis), VIRTQNAME(idxQueue))); 1008 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[idxQueue].hEvtProcess); 1009 AssertRC(rc); 1010 } 1011 } 1012 */ 1013 /* Ensure guest is working the queues too. */ 1104 /* Ensure guest is working the queues */ 1014 1105 virtioCoreR3VmStateChanged(&pThis->Virtio, kvirtIoVmStateChangedResume); 1015 1106 } … … 1084 1175 * @thread RX 1085 1176 */ 1086 static int virtioNetR3IsRxQueuePrimed(PPDMDEVINS pDevIns, PVIRTIONET pThis, uint16_t idx Queue)1087 { 1088 #define LOGPARAMS INSTANCE(pThis), VIRTQNAME(idx Queue)1177 static int virtioNetR3IsRxQueuePrimed(PPDMDEVINS pDevIns, PVIRTIONET pThis, uint16_t idxRxQueue) 1178 { 1179 #define LOGPARAMS INSTANCE(pThis), VIRTQNAME(idxRxQueue) 1089 1180 1090 1181 if (!pThis->fVirtioReady) … … 1092 1183 Log8Func(("%s %s VirtIO not ready (rc = VERR_NET_NO_BUFFER_SPACE)\n", LOGPARAMS)); 1093 1184 } 1094 else if (!virtioCoreIsQueueEnabled(&pThis->Virtio, RXQIDX_QPAIR(idxQueue)))1185 else if (!virtioCoreIsQueueEnabled(&pThis->Virtio, idxRxQueue)) 1095 1186 { 1096 1187 Log8Func(("%s %s queue not enabled (rc = VERR_NET_NO_BUFFER_SPACE)\n", LOGPARAMS)); 1097 1188 } 1098 else if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue)))1189 else if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, idxRxQueue)) 1099 1190 { 1100 1191 Log8Func(("%s %s queue is empty (rc = VERR_NET_NO_BUFFER_SPACE)\n", LOGPARAMS)); 1101 virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR(idxQueue), true);1192 virtioCoreQueueSetNotify(&pThis->Virtio, idxRxQueue, true); 1102 1193 } 1103 1194 else 1104 1195 { 1105 1196 Log8Func(("%s %s ready with available buffers\n", LOGPARAMS)); 1106 virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR(idxQueue), false);1197 virtioCoreQueueSetNotify(&pThis->Virtio, idxRxQueue, false); 1107 1198 return VINF_SUCCESS; 1108 1199 } … … 1115 1206 /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue 1116 1207 selection algorithm feasible or even necessary to prevent starvation? */ 1117 for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue += 2) /* Skip odd queue #'s because Rx queues only! */ 1118 { 1119 if (!IS_RX_QUEUE(idxQueue)) 1120 continue; 1121 1122 if (RT_SUCCESS(virtioNetR3IsRxQueuePrimed(pDevIns, pThis, idxQueue))) 1208 for (int idxQueuePair = 0; idxQueuePair < pThis->cVirtqPairs; idxQueuePair++) 1209 if (RT_SUCCESS(virtioNetR3IsRxQueuePrimed(pDevIns, pThis, RXQIDX(idxQueuePair)))) 1123 1210 return true; 1124 }1125 1211 return false; 1126 1212 } … … 1128 1214 * Returns true if VirtIO core and device are in a running and operational state 1129 1215 */ 1130 DECLINLINE(bool) virtioNet AllSystemsGo(PVIRTIONET pThis, PPDMDEVINS pDevIns)1216 DECLINLINE(bool) virtioNetIsOperational(PVIRTIONET pThis, PPDMDEVINS pDevIns) 1131 1217 { 1132 1218 if (!pThis->fVirtioReady) … … 1163 1249 1164 1250 ASMAtomicXchgBool(&pThis->fLeafWantsRxBuffers, true); 1251 STAM_PROFILE_START(&pThis->StatRxOverflow, a); 1165 1252 1166 1253 do { … … 1181 1268 RTThreadSleep(1); 1182 1269 1183 } while (virtioNetAllSystemsGo(pThis, pDevIns)); 1184 1270 } while (virtioNetIsOperational(pThis, pDevIns)); 1271 1272 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a); 1185 1273 ASMAtomicXchgBool(&pThis->fLeafWantsRxBuffers, false); 1186 1274 … … 1235 1323 } 1236 1324 1237 1238 1325 /** 1239 1326 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac} … … 1346 1433 } 1347 1434 1348 /** @todo Original combined unicast & multicast into one table. Should we distinguish? */1349 1350 1435 for (uint16_t i = 0; i < pThis->cUnicastFilterMacs; i++) 1351 1436 if (!memcmp(&pThis->aMacUnicastFilter[i], pvBuf, sizeof(RTMAC))) … … 1360 1445 return false; 1361 1446 } 1447 1448 static int virtioNetR3CopyRxPktToGuest(PPDMDEVINS pDevIns, PVIRTIONET pThis, const void *pvBuf, size_t cb, 1449 VIRTIONET_PKT_HDR_T *rxPktHdr, uint16_t cSegsAllocated, 1450 PRTSGBUF pVirtSegBufToGuest, PRTSGSEG paVirtSegsToGuest, 1451 uint16_t idxRxQueue) 1452 { 1453 uint8_t fAddPktHdr = true; 1454 RTGCPHYS gcPhysPktHdrNumBuffers; 1455 uint16_t cDescs; 1456 uint32_t uOffset; 1457 for (cDescs = uOffset = 0; uOffset < cb; ) 1458 { 1459 PVIRTIO_DESC_CHAIN_T pDescChain = NULL; 1460 1461 int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, RXQIDX(idxRxQueue), &pDescChain, true); 1462 AssertMsgReturn(rc == VINF_SUCCESS || rc == VERR_NOT_AVAILABLE, ("%Rrc\n", rc), rc); 1463 1464 /** @todo Find a better way to deal with this */ 1465 AssertMsgReturnStmt(rc == VINF_SUCCESS && pDescChain->cbPhysReturn, 1466 ("Not enough Rx buffers in queue to accomodate ethernet packet\n"), 1467 virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain), 1468 VERR_INTERNAL_ERROR); 1469 1470 /* Length of first seg of guest Rx buf should never be less than sizeof(virtio_net_pkt_hdr). 1471 * Otherwise code has to become more complicated, e.g. locate & cache seg idx & offset of 1472 * virtio_net_header.num_buffers, to defer updating (in gcPhys). Re-visit if needed */ 1473 1474 AssertMsgReturnStmt(pDescChain->pSgPhysReturn->paSegs[0].cbSeg >= sizeof(VIRTIONET_PKT_HDR_T), 1475 ("Desc chain's first seg has insufficient space for pkt header!\n"), 1476 virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain), 1477 VERR_INTERNAL_ERROR); 1478 1479 uint32_t cbDescChainLeft = pDescChain->cbPhysReturn; 1480 uint8_t cbHdr = sizeof(VIRTIONET_PKT_HDR_T); 1481 1482 /* Fill the Guest Rx buffer with data received from the interface */ 1483 for (uint16_t cSegs = 0; uOffset < cb && cbDescChainLeft; ) 1484 { 1485 if (fAddPktHdr) 1486 { 1487 /* Lead with packet header */ 1488 paVirtSegsToGuest[0].cbSeg = cbHdr; 1489 paVirtSegsToGuest[0].pvSeg = RTMemAlloc(cbHdr); 1490 AssertReturn(paVirtSegsToGuest[0].pvSeg, VERR_NO_MEMORY); 1491 cbDescChainLeft -= cbHdr; 1492 1493 memcpy(paVirtSegsToGuest[0].pvSeg, rxPktHdr, cbHdr); 1494 1495 /* Calculate & cache addr of field to update after final value is known, in gcPhys mem */ 1496 gcPhysPktHdrNumBuffers = pDescChain->pSgPhysReturn->paSegs[0].gcPhys 1497 + RT_UOFFSETOF(VIRTIONET_PKT_HDR_T, uNumBuffers); 1498 fAddPktHdr = false; 1499 cSegs++; 1500 } 1501 1502 if (cSegs >= cSegsAllocated) 1503 { 1504 cSegsAllocated <<= 1; /* double allocation size */ 1505 paVirtSegsToGuest = (PRTSGSEG)RTMemRealloc(paVirtSegsToGuest, sizeof(RTSGSEG) * cSegsAllocated); 1506 if (!paVirtSegsToGuest) 1507 virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain); 1508 AssertReturn(paVirtSegsToGuest, VERR_NO_MEMORY); 1509 } 1510 1511 /* Append remaining Rx pkt or as much current desc chain has room for */ 1512 uint32_t cbCropped = RT_MIN(cb, cbDescChainLeft); 1513 paVirtSegsToGuest[cSegs].cbSeg = cbCropped; 1514 paVirtSegsToGuest[cSegs].pvSeg = ((uint8_t *)pvBuf) + uOffset; 1515 cbDescChainLeft -= cbCropped; 1516 uOffset += cbCropped; 1517 cDescs++; 1518 cSegs++; 1519 RTSgBufInit(pVirtSegBufToGuest, paVirtSegsToGuest, cSegs); 1520 Log7Func(("Send Rx pkt to guest...\n")); 1521 STAM_PROFILE_START(&pThis->StatReceiveStore, a); 1522 virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, idxRxQueue, 1523 pVirtSegBufToGuest, pDescChain, true); 1524 STAM_PROFILE_STOP(&pThis->StatReceiveStore, a); 1525 1526 if (FEATURE_DISABLED(MRG_RXBUF)) 1527 break; 1528 } 1529 1530 virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain); 1531 } 1532 1533 if (uOffset < cb) 1534 { 1535 LogFunc(("%s Packet did not fit into RX queue (packet size=%u)!\n", INSTANCE(pThis), cb)); 1536 return VERR_TOO_MUCH_DATA; 1537 } 1538 1539 /* Fix-up pkthdr (in guest phys. memory) with number buffers (descriptors) processed */ 1540 1541 int rc = PDMDevHlpPCIPhysWrite(pDevIns, gcPhysPktHdrNumBuffers, &cDescs, sizeof(cDescs)); 1542 AssertMsgRCReturn(rc, 1543 ("Failure updating descriptor count in pkt hdr in guest physical memory\n"), 1544 rc); 1545 1546 /** @todo WHY *must* we *force* notifying guest that we filled its Rx buffer(s)? 1547 * If we don't notify the guest, it doesn't detect it and stalls, even though 1548 * guest is responsible for setting the used-ring flag in the Rx queue that tells 1549 * us to skip the notification interrupt! Obviously forcing the interrupt is 1550 * non-optimal performance-wise and seems to contradict the Virtio spec. 1551 * Is that a bug in the linux virtio_net.c driver? */ 1552 1553 virtioCoreQueueSync(pDevIns, &pThis->Virtio, RXQIDX(idxRxQueue), /* fForce */ true); 1554 1555 return VINF_SUCCESS; 1556 } 1557 1362 1558 1363 1559 /** … … 1373 1569 * @param cb Number of bytes available in the buffer. 1374 1570 * @param pGso Pointer to Global Segmentation Offload structure 1375 * @param idx Queue Queue to work with1571 * @param idxRxQueue Rx queue to work with 1376 1572 * @thread RX 1377 1573 */ 1378 1574 static int virtioNetR3HandleRxPacket(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC, 1379 const void *pvBuf, size_t cb, PCPDMNETWORKGSO pGso, uint16_t idx Queue)1575 const void *pvBuf, size_t cb, PCPDMNETWORKGSO pGso, uint16_t idxRxQueue) 1380 1576 { 1381 1577 RT_NOREF(pThisCC); … … 1411 1607 rxPktHdr.uGsoSize = pGso->cbMaxSeg; 1412 1608 rxPktHdr.uChksumStart = pGso->offHdr2; 1609 STAM_REL_COUNTER_INC(&pThis->StatReceiveGSO); 1413 1610 } 1414 1611 else … … 1420 1617 uint16_t cSegsAllocated = VIRTIONET_PREALLOCATE_RX_SEG_COUNT; 1421 1618 1422 /** @todo r=bird: error codepaths below are almost all leaky! Maybe keep 1423 * allocations and cleanup here and put the code doing the complicated 1424 * work into a helper that can AssertReturn at will without needing to 1425 * care about cleaning stuff up. */ 1426 PRTSGBUF pVirtSegBufToGuest = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF)); /** @todo r=bird: Missing check. */ 1619 PRTSGBUF pVirtSegBufToGuest = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF)); 1620 AssertReturn(pVirtSegBufToGuest, VERR_NO_MEMORY); 1621 1427 1622 PRTSGSEG paVirtSegsToGuest = (PRTSGSEG)RTMemAllocZ(sizeof(RTSGSEG) * cSegsAllocated); 1428 AssertReturn(paVirtSegsToGuest, VERR_NO_MEMORY); 1429 1430 1431 uint8_t fAddPktHdr = true; 1432 RTGCPHYS gcPhysPktHdrNumBuffers; 1433 uint16_t cDescs; 1434 uint32_t uOffset; 1435 for (cDescs = uOffset = 0; uOffset < cb; ) 1436 { 1437 PVIRTIO_DESC_CHAIN_T pDescChain = NULL; 1438 1439 int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue), &pDescChain, true); 1440 AssertMsgReturn(rc == VINF_SUCCESS || rc == VERR_NOT_AVAILABLE, ("%Rrc\n", rc), rc); 1441 1442 /** @todo Find a better way to deal with this */ 1443 AssertMsgReturnStmt(rc == VINF_SUCCESS && pDescChain->cbPhysReturn, 1444 ("Not enough Rx buffers in queue to accomodate ethernet packet\n"), 1445 virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain), 1446 VERR_INTERNAL_ERROR); 1447 1448 /* Unlikely that len of 1st seg of guest Rx (IN) buf is less than sizeof(virtio_net_pkt_hdr) == 12. 1449 * Assert it to reduce complexity. Robust solution would entail finding seg idx and offset of 1450 * virtio_net_header.num_buffers (to update field *after* hdr & pkts copied to gcPhys) */ 1451 AssertMsgReturnStmt(pDescChain->pSgPhysReturn->paSegs[0].cbSeg >= sizeof(VIRTIONET_PKT_HDR_T), 1452 ("Desc chain's first seg has insufficient space for pkt header!\n"), 1453 virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain), 1454 VERR_INTERNAL_ERROR); 1455 1456 uint32_t cbDescChainLeft = pDescChain->cbPhysReturn; 1457 uint8_t cbHdr = sizeof(VIRTIONET_PKT_HDR_T); 1458 /* Fill the Guest Rx buffer with data received from the interface */ 1459 for (uint16_t cSegs = 0; uOffset < cb && cbDescChainLeft; ) 1460 { 1461 if (fAddPktHdr) 1462 { 1463 /* Lead with packet header */ 1464 paVirtSegsToGuest[0].cbSeg = cbHdr; 1465 paVirtSegsToGuest[0].pvSeg = RTMemAlloc(cbHdr); 1466 AssertReturn(paVirtSegsToGuest[0].pvSeg, VERR_NO_MEMORY); 1467 cbDescChainLeft -= cbHdr; 1468 1469 memcpy(paVirtSegsToGuest[0].pvSeg, &rxPktHdr, cbHdr); 1470 1471 /* Calculate & cache the field we will need to update later in gcPhys memory */ 1472 gcPhysPktHdrNumBuffers = pDescChain->pSgPhysReturn->paSegs[0].gcPhys 1473 + RT_UOFFSETOF(VIRTIONET_PKT_HDR_T, uNumBuffers); 1474 fAddPktHdr = false; 1475 cSegs++; 1476 } 1477 1478 if (cSegs >= cSegsAllocated) 1479 { 1480 cSegsAllocated <<= 1; /* double the allocation size */ 1481 paVirtSegsToGuest = (PRTSGSEG)RTMemRealloc(paVirtSegsToGuest, sizeof(RTSGSEG) * cSegsAllocated); 1482 AssertReturn(paVirtSegsToGuest, VERR_NO_MEMORY); 1483 } 1484 1485 /* Append remaining Rx pkt or as much current desc chain has room for */ 1486 uint32_t cbCropped = RT_MIN(cb, cbDescChainLeft); 1487 paVirtSegsToGuest[cSegs].cbSeg = cbCropped; 1488 paVirtSegsToGuest[cSegs].pvSeg = ((uint8_t *)pvBuf) + uOffset; 1489 cbDescChainLeft -= cbCropped; 1490 uOffset += cbCropped; 1491 cDescs++; 1492 cSegs++; 1493 RTSgBufInit(pVirtSegBufToGuest, paVirtSegsToGuest, cSegs); 1494 Log7Func(("Send Rx pkt to guest...\n")); 1495 virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue), 1496 pVirtSegBufToGuest, pDescChain, true); 1497 1498 if (FEATURE_DISABLED(MRG_RXBUF)) 1499 break; 1500 } 1501 1502 virtioCoreR3DescChainRelease(&pThis->Virtio, pDescChain); 1503 } 1504 1505 /* Fix-up pkthdr (in guest phys. memory) with number buffers (descriptors) processed */ 1506 1507 int rc = PDMDevHlpPCIPhysWrite(pDevIns, gcPhysPktHdrNumBuffers, &cDescs, sizeof(cDescs)); 1508 AssertMsgRCReturn(rc, 1509 ("Failure updating descriptor count in pkt hdr in guest physical memory\n"), 1510 rc); 1511 1512 virtioCoreQueueSync(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue)); 1623 AssertReturnStmt(paVirtSegsToGuest, RTMemFree(pVirtSegBufToGuest), VERR_NO_MEMORY); 1624 1625 int rc = virtioNetR3CopyRxPktToGuest(pDevIns, pThis, pvBuf, cb, &rxPktHdr, cSegsAllocated, 1626 pVirtSegBufToGuest, paVirtSegsToGuest, idxRxQueue); 1513 1627 1514 1628 RTMemFree(paVirtSegsToGuest); … … 1516 1630 1517 1631 Log7(("\n")); 1518 if (uOffset < cb) 1519 { 1520 LogFunc(("%s Packet did not fit into RX queue (packet size=%u)!\n", INSTANCE(pThis), cb)); 1521 return VERR_TOO_MUCH_DATA; 1522 } 1523 return VINF_SUCCESS; 1632 return rc; 1524 1633 } 1525 1634 … … 1577 1686 selection algorithm feasible or even necessary to prevent starvation? */ 1578 1687 1579 for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue += 2) /* Skip odd queue #'s because Rx queues only */1580 { 1581 if (RT_SUCCESS(!virtioNetR3IsRxQueuePrimed(pDevIns, pThis, idxQueue)))1688 for (int idxQueuePair = 0; idxQueuePair < pThis->cVirtqPairs; idxQueuePair++) 1689 { 1690 if (RT_SUCCESS(!virtioNetR3IsRxQueuePrimed(pDevIns, pThis, RXQIDX(idxQueuePair)))) 1582 1691 { 1583 1692 /* Drop packets if VM is not running or cable is disconnected. */ 1584 if (!virtioNet AllSystemsGo(pThis, pDevIns) || !IS_LINK_UP(pThis))1693 if (!virtioNetIsOperational(pThis, pDevIns) || !IS_LINK_UP(pThis)) 1585 1694 return VINF_SUCCESS; 1586 1695 1696 STAM_PROFILE_START(&pThis->StatReceive, a); 1587 1697 virtioNetR3SetReadLed(pThisCC, true); 1588 1698 1589 1699 int rc = VINF_SUCCESS; 1590 1700 if (virtioNetR3AddressFilter(pThis, pvBuf, cb)) 1591 rc = virtioNetR3HandleRxPacket(pDevIns, pThis, pThisCC, pvBuf, cb, pGso, idxQueue); 1701 { 1702 rc = virtioNetR3HandleRxPacket(pDevIns, pThis, pThisCC, pvBuf, cb, pGso, RXQIDX(idxQueuePair)); 1703 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb); 1704 } 1592 1705 1593 1706 virtioNetR3SetReadLed(pThisCC, false); 1594 1707 STAM_PROFILE_STOP(&pThis->StatReceive, a); 1595 1708 return rc; 1596 1709 } … … 1842 1955 if (FEATURE_DISABLED(STATUS) || FEATURE_DISABLED(GUEST_ANNOUNCE)) 1843 1956 { 1844 LogFunc(("%s Ignoring CTRL class VIRTIONET_CTRL_ANNOUNCE. Not configured to handle it\n", INSTANCE(pThis)));1845 virtioNetPrintFeatures(pThis->fNegotiatedFeatures, "Features");1957 LogFunc(("%s Ignoring CTRL class VIRTIONET_CTRL_ANNOUNCE.\n" 1958 "VIRTIO_F_STATUS or VIRTIO_F_GUEST_ANNOUNCE feature not enabled\n", INSTANCE(pThis))); 1846 1959 break; 1847 1960 } … … 1886 1999 1887 2000 virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, CTRLQIDX, pReturnSegBuf, pDescChain, true); 1888 virtioCoreQueueSync(pDevIns, &pThis->Virtio, CTRLQIDX );2001 virtioCoreQueueSync(pDevIns, &pThis->Virtio, CTRLQIDX, false); 1889 2002 1890 2003 for (int i = 0; i < cSegs; i++) … … 1981 2094 INSTANCE(pThis), pGso->u8Type, pGso->cbHdrsTotal, pGso->cbHdrsSeg, 1982 2095 pGso->cbMaxSeg, pGso->offHdr1, pGso->offHdr2)); 2096 STAM_REL_COUNTER_INC(&pThis->StatTransmitGSO); 1983 2097 } 1984 2098 else if (pPktHdr->uFlags & VIRTIONET_HDR_F_NEEDS_CSUM) 1985 2099 { 2100 STAM_REL_COUNTER_INC(&pThis->StatTransmitCSum); 1986 2101 /* 1987 2102 * This is not GSO frame but checksum offloading is requested. … … 1995 2110 1996 2111 static void virtioNetR3TransmitPendingPackets(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC, 1997 uint16_t idx Queue, bool fOnWorkerThread)2112 uint16_t idxTxQueue, bool fOnWorkerThread) 1998 2113 { 1999 2114 … … 2033 2148 } 2034 2149 2035 int cPkts = virtioCoreR3QueuePendingCount(pVirtio->pDevIns, pVirtio, idx Queue);2150 int cPkts = virtioCoreR3QueuePendingCount(pVirtio->pDevIns, pVirtio, idxTxQueue); 2036 2151 if (!cPkts) 2037 2152 { 2038 LogFunc(("%s No packets to send found on %s\n", INSTANCE(pThis), VIRTQNAME(idx Queue)));2153 LogFunc(("%s No packets to send found on %s\n", INSTANCE(pThis), VIRTQNAME(idxTxQueue))); 2039 2154 2040 2155 if (pDrv) … … 2050 2165 int rc; 2051 2166 PVIRTIO_DESC_CHAIN_T pDescChain = NULL; 2052 while ((rc = virtioCoreR3QueuePeek(pVirtio->pDevIns, pVirtio, idxQueue, &pDescChain)) == VINF_SUCCESS) 2053 { 2054 if (RT_SUCCESS(rc)) /** @todo r=bird: pointless, see loop condition. */ 2055 Log10Func(("%s fetched descriptor chain from %s\n", INSTANCE(pThis), VIRTQNAME(idxQueue))); 2056 else 2057 { 2058 LogFunc(("%s failed to find expected data on %s, rc = %Rrc\n", INSTANCE(pThis), VIRTQNAME(idxQueue), rc)); 2059 virtioCoreR3DescChainRelease(pVirtio, pDescChain); 2060 break; 2061 } 2167 while ((rc = virtioCoreR3QueuePeek(pVirtio->pDevIns, pVirtio, idxTxQueue, &pDescChain)) == VINF_SUCCESS) 2168 { 2169 Log10Func(("%s fetched descriptor chain from %s\n", INSTANCE(pThis), VIRTQNAME(idxTxQueue))); 2062 2170 2063 2171 PVIRTIOSGBUF pSgPhysSend = pDescChain->pSgPhysSend; … … 2085 2193 PDMNETWORKGSO Gso; 2086 2194 PPDMNETWORKGSO pGso = virtioNetR3SetupGsoCtx(&Gso, &PktHdr); 2195 uint64_t uOffset; 2087 2196 2088 2197 /** @todo Optimize away the extra copying! (lazy bird) */ … … 2091 2200 if (RT_SUCCESS(rc)) 2092 2201 { 2202 STAM_REL_COUNTER_INC(&pThis->StatTransmitPackets); 2203 STAM_PROFILE_START(&pThis->StatTransmitSend, a); 2204 2093 2205 uSize -= sizeof(PktHdr); 2094 2206 rc = virtioNetR3ReadHeader(pDevIns, paSegsFromGuest[0].gcPhys, &PktHdr, uSize); … … 2097 2209 virtioCoreSgBufAdvance(pSgPhysSend, sizeof(PktHdr)); 2098 2210 2099 uint64_t uOffset = 0;2100 2211 size_t cbCopied = 0; 2101 2212 size_t cbTotal = 0; 2102 2213 size_t cbRemain = pSgBufToPdmLeafDevice->cbUsed = uSize; 2214 uOffset = 0; 2103 2215 while (cbRemain) 2104 2216 { … … 2124 2236 { 2125 2237 LogFunc(("%s Failed to transmit frame, rc = %Rrc\n", INSTANCE(pThis), rc)); 2238 STAM_PROFILE_STOP(&pThis->StatTransmitSend, a); 2239 STAM_PROFILE_ADV_STOP(&pThis->StatTransmit, a); 2126 2240 pThisCC->pDrv->pfnFreeBuf(pThisCC->pDrv, pSgBufToPdmLeafDevice); 2127 2241 } 2242 STAM_PROFILE_STOP(&pThis->StatTransmitSend, a); 2243 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, uOffset); 2128 2244 } 2129 2245 else … … 2136 2252 2137 2253 /* Remove this descriptor chain from the available ring */ 2138 virtioCoreR3QueueSkip(pVirtio, idx Queue);2254 virtioCoreR3QueueSkip(pVirtio, idxTxQueue); 2139 2255 2140 2256 /* No data to return to guest, but call is needed put elem (e.g. desc chain) on used ring */ 2141 virtioCoreR3QueuePut(pVirtio->pDevIns, pVirtio, idxQueue, NULL, pDescChain, false); 2142 2143 virtioCoreQueueSync(pVirtio->pDevIns, pVirtio, idxQueue); 2257 virtioCoreR3QueuePut(pVirtio->pDevIns, pVirtio, idxTxQueue, NULL, pDescChain, false); 2258 2259 /* Update used ring idx and notify guest that we've transmitted the data it sent */ 2260 virtioCoreQueueSync(pVirtio->pDevIns, pVirtio, idxTxQueue, false); 2144 2261 } 2145 2262 … … 2165 2282 PVIRTIONET pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVIRTIONET); 2166 2283 2284 STAM_COUNTER_INC(&pThis->StatTransmitByNetwork); 2285 2167 2286 /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue 2168 2287 selection algorithm feasible or even necessary */ 2169 virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC, TXQIDX _QPAIR(0), false /*fOnWorkerThread*/);2288 virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC, TXQIDX(0), false /*fOnWorkerThread*/); 2170 2289 } 2171 2290 … … 2178 2297 PVIRTIONETCC pThisCC = RT_FROM_MEMBER(pVirtioCC, VIRTIONETCC, Virtio); 2179 2298 PPDMDEVINS pDevIns = pThisCC->pDevIns; 2180 PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxQueue]; 2181 PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxQueue]; 2299 2300 uint16_t idxWorker; 2301 if (idxQueue == CTRLQIDX) 2302 idxWorker = pThis->cWorkers - 1; 2303 else 2304 idxWorker = idxQueue / 2; 2305 2306 PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxWorker]; 2307 PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxWorker]; 2182 2308 AssertReturnVoid(idxQueue < pThis->cVirtQueues); 2183 2309 … … 2196 2322 else 2197 2323 { 2198 /* Wake queue's worker thread up if sleeping */2324 /* Wake queue's worker thread up if sleeping (e.g. a Tx queue, or the control queue */ 2199 2325 if (!ASMAtomicXchgBool(&pWorkerR3->fNotified, true)) 2200 2326 { … … 2214 2340 static DECLCALLBACK(int) virtioNetR3WorkerThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread) 2215 2341 { 2216 uint16_t const idxQueue = (uint16_t)(uintptr_t)pThread->pvUser;2217 2342 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 2218 2343 PVIRTIONETCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC); 2219 PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxQueue]; 2220 PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxQueue]; 2344 uint16_t const idxWorker = (uint16_t)(uintptr_t)pThread->pvUser; 2345 PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxWorker]; 2346 PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxWorker]; 2347 uint16_t const idxQueue = pWorkerR3->idxQueue; 2348 2221 2349 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING) 2222 2350 { 2223 2351 return VINF_SUCCESS; 2224 2352 } 2225 LogFunc(("%s %s\n", INSTANCE(pThis), VIRTQNAME(idxQueue))); 2353 LogFunc(("%s worker thread started for %s\n", INSTANCE(pThis), VIRTQNAME(idxQueue))); 2354 virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, false); 2226 2355 while (pThread->enmState == PDMTHREADSTATE_RUNNING) 2227 2356 { 2228 virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, true);2229 2230 2357 if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, idxQueue)) 2231 2358 { 2359 virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, true); 2232 2360 /* Atomic interlocks avoid missing alarm while going to sleep & notifier waking the awoken */ 2233 2361 ASMAtomicWriteBool(&pWorkerR3->fSleeping, true); … … 2235 2363 if (!fNotificationSent) 2236 2364 { 2365 virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, true); 2237 2366 Log10Func(("%s %s worker sleeping...\n", INSTANCE(pThis), VIRTQNAME(idxQueue))); 2238 2367 Assert(ASMAtomicReadBool(&pWorkerR3->fSleeping)); 2239 2368 int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pWorker->hEvtProcess, RT_INDEFINITE_WAIT); 2369 STAM_COUNTER_INC(&pThis->StatTransmitByThread); 2240 2370 AssertLogRelMsgReturn(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc), rc); 2241 2371 if (RT_UNLIKELY(pThread->enmState != PDMTHREADSTATE_RUNNING)) … … 2250 2380 } 2251 2381 ASMAtomicWriteBool(&pWorkerR3->fSleeping, false); 2382 virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, false); 2252 2383 } 2253 virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, false);2254 2384 2255 2385 /* Dispatch to the handler for the queue this worker is set up to drive */ … … 2280 2410 * leaf driver invokes PDMINETWORKDOWN.pfnWaitReceiveAvail() callback, 2281 2411 * which waits until notified directly by virtioNetR3QueueNotified() 2282 * that guest IN buffers have been added to receive virt queue. */ 2412 * that guest IN buffers have been added to receive virt queue. 2413 */ 2283 2414 } 2284 2415 } … … 2417 2548 Log10Func(("%s\n", INSTANCE(pThis))); 2418 2549 int rc = VINF_SUCCESS; 2419 for (unsigned idx Queue = 0; idxQueue < pThis->cVirtQueues; idxQueue++)2420 { 2421 PVIRTIONETWORKER pWorker = &pThis->aWorkers[idx Queue];2550 for (unsigned idxWorker = 0; idxWorker < pThis->cWorkers; idxWorker++) 2551 { 2552 PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxWorker]; 2422 2553 if (pWorker->hEvtProcess != NIL_SUPSEMEVENT) 2423 2554 { … … 2425 2556 pWorker->hEvtProcess = NIL_SUPSEMEVENT; 2426 2557 } 2427 if (pThisCC->aWorkers[idx Queue].pThread)2558 if (pThisCC->aWorkers[idxWorker].pThread) 2428 2559 { 2429 2560 int rcThread; 2430 rc = PDMDevHlpThreadDestroy(pDevIns, pThisCC->aWorkers[idx Queue].pThread, &rcThread);2561 rc = PDMDevHlpThreadDestroy(pDevIns, pThisCC->aWorkers[idxWorker].pThread, &rcThread); 2431 2562 if (RT_FAILURE(rc) || RT_FAILURE(rcThread)) 2432 2563 AssertMsgFailed(("%s Failed to destroythread rc=%Rrc rcThread=%Rrc\n", __FUNCTION__, rc, rcThread)); 2433 pThisCC->aWorkers[idx Queue].pThread = NULL;2564 pThisCC->aWorkers[idxWorker].pThread = NULL; 2434 2565 } 2435 2566 } … … 2437 2568 } 2438 2569 2439 static int virtioNetR3CreateWorkerThreads(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC) 2570 static int virtioNetR3CreateOneWorkerThread(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC, 2571 uint16_t idxWorker, uint16_t idxQueue) 2440 2572 { 2441 2573 Log10Func(("%s\n", INSTANCE(pThis))); 2442 2574 int rc = VINF_SUCCESS; 2443 /* Attach the queues and create worker threads for them: */ 2444 for (uint16_t idxQueue = 1; idxQueue < pThis->cVirtQueues; idxQueue++) 2445 { 2446 /* Skip creating threads for receive queues, only create for transmit queues & control queue */ 2447 if (!IS_RX_QUEUE(idxQueue)) 2448 { 2449 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->aWorkers[idxQueue].hEvtProcess); 2450 2451 if (RT_FAILURE(rc)) 2452 return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, 2453 N_("DevVirtioNET: Failed to create SUP event semaphore")); 2454 2455 rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->aWorkers[idxQueue].pThread, 2456 (void *)(uintptr_t)idxQueue, virtioNetR3WorkerThread, 2457 virtioNetR3WakeupWorker, 0, RTTHREADTYPE_IO, VIRTQNAME(idxQueue)); 2458 if (rc != VINF_SUCCESS) 2459 { 2460 LogRel(("Error creating thread for Virtual Queue %s: %Rrc\n", VIRTQNAME(idxQueue), rc)); 2461 return rc; 2462 } 2463 } 2464 pThis->afQueueAttached[idxQueue] = true; 2465 } 2575 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->aWorkers[idxWorker].hEvtProcess); 2576 2577 if (RT_FAILURE(rc)) 2578 return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, 2579 N_("DevVirtioNET: Failed to create SUP event semaphore")); 2580 2581 LogFunc(("creating thread, idxWorker=%d, idxQueue=%d\n", idxWorker, idxQueue)); 2582 rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->aWorkers[idxWorker].pThread, 2583 (void *)(uintptr_t)idxWorker, virtioNetR3WorkerThread, 2584 virtioNetR3WakeupWorker, 0, RTTHREADTYPE_IO, VIRTQNAME(idxQueue)); 2585 if (rc != VINF_SUCCESS) 2586 { 2587 LogRel(("Error creating thread for Virtual Queue %s: %Rrc\n", VIRTQNAME(idxQueue), rc)); 2588 return rc; 2589 } 2590 pThisCC->aWorkers[idxWorker].idxQueue = idxQueue; 2591 pThis->afQueueAttached[idxQueue] = true; 2466 2592 return rc; 2467 2593 } 2468 2594 2595 static int virtioNetR3CreateWorkerThreads(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC) 2596 { 2597 Log10Func(("%s\n", INSTANCE(pThis))); 2598 2599 int rc; 2600 uint16_t idxWorker = 0; 2601 for (uint16_t idxQueuePair = 0; idxQueuePair < pThis->cVirtqPairs; idxQueuePair++) 2602 { 2603 rc = virtioNetR3CreateOneWorkerThread(pDevIns, pThis, pThisCC, idxWorker, TXQIDX(idxQueuePair)); 2604 AssertRCReturn(rc, rc); 2605 idxWorker++; 2606 } 2607 rc = virtioNetR3CreateOneWorkerThread(pDevIns, pThis, pThisCC, idxWorker++, CTRLQIDX); 2608 pThis->cWorkers = idxWorker; 2609 return rc; 2610 } 2469 2611 /** 2470 2612 * @callback_method_impl{VIRTIOCORER3,pfnStatusChanged} … … 2486 2628 pThisCC->fQuiescing = false; 2487 2629 pThis->fNegotiatedFeatures = virtioCoreGetAcceptedFeatures(pVirtio); 2488 virtio NetPrintFeatures(VIRTIONET_HOST_FEATURES_OFFERED, "Offered Features");2489 virtioNetPrintFeatures(pThis ->fNegotiatedFeatures, "Negotiated Features");2630 virtioPrintFeatures(pVirtio); 2631 virtioNetPrintFeatures(pThis); 2490 2632 for (unsigned idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++) 2491 2633 { … … 2503 2645 Log7Func(("%s Link is %s\n", INSTANCE(pThis), pThis->fCableConnected ? "up" : "down")); 2504 2646 2505 pThis->fPromiscuous = true;2506 pThis->fAllMulticast = false;2507 pThis->fAllUnicast = false;2508 pThis->fNoMulticast = false;2509 pThis->fNoUnicast = false;2510 pThis->fNoBroadcast = false;2647 pThis->fPromiscuous = true; 2648 pThis->fAllMulticast = false; 2649 pThis->fAllUnicast = false; 2650 pThis->fNoMulticast = false; 2651 pThis->fNoUnicast = false; 2652 pThis->fNoBroadcast = false; 2511 2653 pThis->uIsTransmitting = 0; 2512 2654 pThis->cUnicastFilterMacs = 0; … … 2810 2952 virtioNetR3SaveExec, virtioNetR3LoadExec); 2811 2953 AssertRCReturn(rc, rc); 2954 2955 2956 2957 /* 2958 * Statistics and debug stuff. 2959 * The /Public/ bits are official and used by session info in the GUI. 2960 */ 2961 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 2962 "Amount of data received", "/Public/NetAdapter/%u/BytesReceived", uStatNo); 2963 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 2964 "Amount of data transmitted", "/Public/NetAdapter/%u/BytesTransmitted", uStatNo); 2965 PDMDevHlpSTAMRegisterF(pDevIns, &pDevIns->iInstance, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 2966 "Device instance number", "/Public/NetAdapter/%u/%s", uStatNo, pDevIns->pReg->szName); 2967 2968 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, "ReceiveBytes", STAMUNIT_BYTES, "Amount of data received"); 2969 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, "TransmitBytes", STAMUNIT_BYTES, "Amount of data transmitted"); 2970 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveGSO, STAMTYPE_COUNTER, "Packets/ReceiveGSO", STAMUNIT_COUNT, "Number of received GSO packets"); 2971 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitPackets, STAMTYPE_COUNTER, "Packets/Transmit", STAMUNIT_COUNT, "Number of sent packets"); 2972 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitGSO, STAMTYPE_COUNTER, "Packets/Transmit-Gso", STAMUNIT_COUNT, "Number of sent GSO packets"); 2973 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitCSum, STAMTYPE_COUNTER, "Packets/Transmit-Csum", STAMUNIT_COUNT, "Number of completed TX checksums"); 2974 # ifdef VBOX_WITH_STATISTICS 2975 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, "Receive/Total", STAMUNIT_TICKS_PER_CALL, "Profiling receive"); 2976 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, "Receive/Store", STAMUNIT_TICKS_PER_CALL, "Profiling receive storing"); 2977 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, "RxOverflow", STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows"); 2978 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, "RxOverflowWakeup", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups"); 2979 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmit, STAMTYPE_PROFILE, "Transmit/Total", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in HC"); 2980 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSend, STAMTYPE_PROFILE, "Transmit/Send", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in HC"); 2981 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitByNetwork, STAMTYPE_COUNTER, "Transmit/ByNetwork", STAMUNIT_COUNT, "Network-initiated transmissions"); 2982 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitByThread, STAMTYPE_COUNTER, "Transmit/ByThread", STAMUNIT_COUNT, "Thread-initiated transmissions"); 2983 # endif 2812 2984 2813 2985 /*
Note:
See TracChangeset
for help on using the changeset viewer.