Changeset 82961 in vbox for trunk/src/VBox/Devices/Network
- Timestamp:
- Feb 3, 2020 4:59:10 PM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Network/DevVirtioNet_1_0.cpp
r82863 r82961 60 60 61 61 #define VIRTIONET_SAVED_STATE_VERSION UINT32_C(1) 62 #define VIRTIONET_MAX_QPAIRS 51262 #define VIRTIONET_MAX_QPAIRS 1 63 63 #define VIRTIONET_MAX_QUEUES (VIRTIONET_MAX_QPAIRS * 2 + 1) 64 64 #define VIRTIONET_MAX_FRAME_SIZE 65535 + 18 /**< Max IP pkt size + Ethernet header with VLAN tag */ … … 69 69 #define INSTANCE(pState) pState->szInstanceName 70 70 #define QUEUE_NAME(a_pVirtio, a_idxQueue) ((a_pVirtio)->virtqState[(a_idxQueue)].szVirtqName) 71 #define VIRTQNAME( qIdx) (pThis->aszVirtqNames[qIdx])72 #define CBVIRTQNAME( qIdx) RTStrNLen(VIRTQNAME(qIdx), sizeof(VIRTQNAME(qIdx)))71 #define VIRTQNAME(idxQueue) (pThis->aszVirtqNames[idxQueue]) 72 #define CBVIRTQNAME(idxQueue) RTStrNLen(VIRTQNAME(idxQueue), sizeof(VIRTQNAME(idxQueue))) 73 73 #define FEATURE_ENABLED(feature) (pThis->fNegotiatedFeatures & VIRTIONET_F_##feature) 74 74 #define FEATURE_DISABLED(feature) (!FEATURE_ENABLED(feature)) 75 #define FEATURE_OFFERED(feature) (VIRTIONET_HOST_FEATURES_OFFERED & VIRTIONET_F_##feature)75 #define FEATURE_OFFERED(feature) VIRTIONET_HOST_FEATURES_OFFERED & VIRTIONET_F_##feature 76 76 77 77 #define SET_LINK_UP(pState) \ … … 92 92 #define RXQIDX_QPAIR(qPairIdx) (qPairIdx * 2) 93 93 #define TXQIDX_QPAIR(qPairIdx) (qPairIdx * 2 + 1) 94 #define CTRLQIDX ( (pThis->fNegotiatedFeatures & VIRTIONET_F_MQ) ? ((VIRTIONET_MAX_QPAIRS - 1) * 2 + 2) : (2))94 #define CTRLQIDX (FEATURE_ENABLED(MQ) ? ((VIRTIONET_MAX_QPAIRS - 1) * 2 + 2) : 2) 95 95 96 96 #define RXVIRTQNAME(qPairIdx) (pThis->aszVirtqNames[RXQIDX_QPAIR(qPairIdx)]) … … 150 150 151 151 #define VIRTIONET_HOST_FEATURES_OFFERED \ 152 VIRTIONET_F_MAC \ 153 | VIRTIONET_F_STATUS \ 152 VIRTIONET_F_STATUS \ 153 | VIRTIONET_F_GUEST_ANNOUNCE \ 154 | VIRTIONET_F_MAC \ 154 155 | VIRTIONET_F_CTRL_VQ \ 155 156 | VIRTIONET_F_CTRL_RX \ … … 220 221 uint8_t uClass; /**< class */ 221 222 uint8_t uCmd; /**< command */ 222 uint8_t uCmdSpecific; /**< command specific */223 223 }; 224 224 #pragma pack() … … 280 280 uint64_t uOffloads; /**< offloads */ 281 281 282 /** @name Offload State Configuration Flags (VirtIO 1.0, 5.1.6.5.6.1)283 * @{ */284 //#define VIRTIONET_F_GUEST_CSUM 1 /**< Guest offloads Chksum */285 //#define VIRTIONET_F_GUEST_TSO4 7 /**< Guest offloads TSO4 */286 //#define VIRTIONET_F_GUEST_TSO6 8 /**< Guest Offloads TSO6 */287 //#define VIRTIONET_F_GUEST_ECN 9 /**< Guest Offloads ECN */288 //#define VIRTIONET_F_GUEST_UFO 10 /**< Guest Offloads UFO */289 /** @} */290 291 282 /** @name Control virtq: Setting Offloads State (VirtIO 1.0, 5.1.6.5.6.1) 292 283 * @{ */ … … 294 285 #define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0 /** Apply new offloads configuration */ 295 286 /** @} */ 296 297 287 298 288 /** … … 374 364 bool volatile fLeafWantsRxBuffers; 375 365 366 SUPSEMEVENT hEventRxDescAvail; 367 376 368 /** Flags whether VirtIO core is in ready state */ 377 369 uint8_t fVirtioReady; … … 380 372 uint8_t fResetting; 381 373 374 /** Quiescing I/O activity flag */ 375 uint8_t fQuiescing; 376 377 382 378 /** Promiscuous mode -- RX filter accepts all packets. */ 383 379 uint8_t fPromiscuous; … … 414 410 415 411 /* Receive-blocking-related fields ***************************************/ 416 417 /** EMT: Gets signalled when more RX descriptors become available. */418 SUPSEMEVENT hEventRxDescAvail;419 412 420 413 } VIRTIONET; … … 520 513 static DECLCALLBACK(int) virtioNetR3WakeupWorker(PPDMDEVINS pDevIns, PPDMTHREAD pThread) 521 514 { 515 LogFunc(("\n")); 522 516 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 523 517 return PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[(uintptr_t)pThread->pvUser].hEvtProcess); … … 532 526 533 527 AssertReturnVoid(pThis->hEventRxDescAvail != NIL_SUPSEMEVENT); 534 AssertReturnVoid(ASMAtomicReadBool(&pThis->fLeafWantsRxBuffers)); 535 536 Log(("%s Waking downstream driver's Rx buf waiter thread\n", INSTANCE(pThis))); 528 529 LogFunc(("%s Waking downstream driver's Rx buf waiter thread\n", INSTANCE(pThis))); 537 530 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventRxDescAvail); 538 531 AssertRC(rc); … … 619 612 DECLINLINE(bool) virtioNetValidateRequiredFeatures(uint32_t fFeatures) 620 613 { 621 uint32_t fGuestChksumRequired = fFeatures & VIRTIONET_F_GUEST_TSO4 622 || fFeatures & VIRTIONET_F_GUEST_TSO6 623 || fFeatures & VIRTIONET_F_GUEST_UFO; 624 625 uint32_t fHostChksumRequired = fFeatures & VIRTIONET_F_HOST_TSO4 626 || fFeatures & VIRTIONET_F_HOST_TSO6 627 || fFeatures & VIRTIONET_F_HOST_UFO; 628 629 uint32_t fCtrlVqRequired = fFeatures & VIRTIONET_F_CTRL_RX 630 || fFeatures & VIRTIONET_F_CTRL_VLAN 631 || fFeatures & VIRTIONET_F_GUEST_ANNOUNCE 632 || fFeatures & VIRTIONET_F_MQ 633 || fFeatures & VIRTIONET_F_CTRL_MAC_ADDR; 614 LogFunc(("\n")); 615 uint32_t fGuestChksumRequired = fFeatures & VIRTIONET_F_GUEST_TSO4 616 || fFeatures & VIRTIONET_F_GUEST_TSO6 617 || fFeatures & VIRTIONET_F_GUEST_UFO; 618 619 uint32_t fHostChksumRequired = fFeatures & VIRTIONET_F_HOST_TSO4 620 || fFeatures & VIRTIONET_F_HOST_TSO6 621 || fFeatures & VIRTIONET_F_HOST_UFO; 622 623 uint32_t fCtrlVqRequired = fFeatures & VIRTIONET_F_CTRL_RX 624 || fFeatures & VIRTIONET_F_CTRL_VLAN 625 || fFeatures & VIRTIONET_F_GUEST_ANNOUNCE 626 || fFeatures & VIRTIONET_F_MQ 627 || fFeatures & VIRTIONET_F_CTRL_MAC_ADDR; 634 628 635 629 if (fGuestChksumRequired && !(fFeatures & VIRTIONET_F_GUEST_CSUM)) … … 653 647 return true; 654 648 } 655 656 657 658 649 659 650 /********************************************************************************************************************************* … … 675 666 || offConfig == RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) + sizeof(uint32_t)) \ 676 667 && cb == sizeof(uint32_t)) \ 677 || ( offConfig == RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \ 678 && cb == RT_SIZEOFMEMB(VIRTIONET_CONFIG_T, member)) ) 668 || ( offConfig >= RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \ 669 && offConfig + cb <= RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \ 670 + RT_SIZEOFMEMB(VIRTIONET_CONFIG_T, member)) ) 671 672 /* || ( offConfig == RT_UOFFSETOF(VIRTIONET_CONFIG_T, member) \ 673 && cb == RT_SIZEOFMEMB(VIRTIONET_CONFIG_T, member)) ) 674 */ 679 675 680 676 #ifdef LOG_ENABLED … … 745 741 static DECLCALLBACK(int) virtioNetR3DevCapRead(PPDMDEVINS pDevIns, uint32_t uOffset, void *pv, uint32_t cb) 746 742 { 743 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 744 745 LogFunc(("%s: uOffset: %d, cb: %d\n", INSTANCE(pThis), uOffset, cb)); 747 746 return virtioNetR3CfgAccessed(PDMDEVINS_2_DATA(pDevIns, PVIRTIONET), uOffset, pv, cb, false /*fRead*/); 748 747 } … … 753 752 static DECLCALLBACK(int) virtioNetR3DevCapWrite(PPDMDEVINS pDevIns, uint32_t uOffset, const void *pv, uint32_t cb) 754 753 { 754 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 755 756 LogFunc(("%s: uOffset: %d, cb: %d: %.*Rhxs\n", INSTANCE(pThis), uOffset, cb, RT_MAX(cb, 8) , pv)); 755 757 return virtioNetR3CfgAccessed(PDMDEVINS_2_DATA(pDevIns, PVIRTIONET), uOffset, (void *)pv, cb, true /*fWrite*/); 756 758 } … … 799 801 800 802 virtioNetR3SetVirtqNames(pThis); 801 for (int qIdx = 0; qIdx < pThis->cVirtQueues; qIdx++) 802 pHlp->pfnSSMGetBool(pSSM, &pThis->afQueueAttached[qIdx]); 803 804 for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++) 805 pHlp->pfnSSMGetBool(pSSM, &pThis->afQueueAttached[idxQueue]); 803 806 804 807 /* … … 810 813 * Nudge queue workers 811 814 */ 812 for (int qIdx = 0; qIdx < pThis->cVirtqPairs; qIdx++)813 { 814 if (pThis->afQueueAttached[ qIdx])815 for (int idxQueue = 0; idxQueue < pThis->cVirtqPairs; idxQueue++) 816 { 817 if (pThis->afQueueAttached[idxQueue]) 815 818 { 816 LogFunc(("Waking %s worker.\n", VIRTQNAME( qIdx)));817 rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[ qIdx].hEvtProcess);819 LogFunc(("Waking %s worker.\n", VIRTQNAME(idxQueue))); 820 rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[idxQueue].hEvtProcess); 818 821 AssertRCReturn(rc, rc); 819 822 } … … 829 832 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 830 833 PVIRTIONETCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC); 831 PCPDMDEVHLPR3 834 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3; 832 835 833 836 RT_NOREF(pThisCC); … … 835 838 LogFunc(("SAVE EXEC!!\n")); 836 839 837 for (int qIdx = 0; qIdx < pThis->cVirtQueues; qIdx++)838 pHlp->pfnSSMPutBool(pSSM, pThis->afQueueAttached[ qIdx]);840 for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++) 841 pHlp->pfnSSMPutBool(pSSM, pThis->afQueueAttached[idxQueue]); 839 842 840 843 /* … … 857 860 PVIRTIONETCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC); 858 861 859 // if (ASMAtomicReadu(&pThis->cActiveReqs)) 860 // return false; 862 /** @todo create test to conclusively determine I/O has been quiesced and add it here: */ 861 863 862 864 LogFunc(("Device I/O activity quiesced: %s\n", … … 885 887 pThisCC->enmQuiescingFor = enmQuiescingFor; 886 888 889 /* 890 * Wake downstream network driver thread that's waiting for Rx buffers to be available 891 * to tell it that's going to happen... 892 */ 893 virtioNetR3WakeupRxBufWaiter(pDevIns); 894 887 895 PDMDevHlpSetAsyncNotification(pDevIns, virtioNetR3DeviceQuiesced); 888 896 889 897 /* If already quiesced invoke async callback. */ 890 // if (!ASMAtomicReadu(&pThis->cActiveReqs)) 891 // PDMDevHlpAsyncNotificationCompleted(pDevIns); 898 if (!ASMAtomicReadBool(&pThis->fLeafWantsRxBuffers)) 899 PDMDevHlpAsyncNotificationCompleted(pDevIns); 900 901 /** @todo make sure Rx and Tx are really quiesced (how to we synchronize w/downstream driver?) */ 892 902 } 893 903 … … 915 925 RT_NOREF2(pThis, pThisCC); 916 926 917 /* VM is halted, thus no new I/O being dumped into queues by the guest.918 * Workers have been flagged to stop pulling stuff already queued-up by the guest.919 * Now tell lower-level to to suspend reqs (for example, DrvVD suspends all reqs920 * on its wait queue, and we will get a callback as the state changes to921 * suspended (and later, resumed) for each).922 */923 924 virtioNetR3WakeupRxBufWaiter(pDevIns);925 926 927 virtioNetR3QuiesceDevice(pDevIns, enmType); 927 928 928 } 929 929 … … 956 956 957 957 pThisCC->fQuiescing = false; 958 959 960 /** @todo implement this function properly */ 958 961 959 962 /* Wake worker threads flagged to skip pulling queue entries during quiesce … … 962 965 */ 963 966 /* 964 for (uint16_t qIdx = 0; qIdx < VIRTIONET_REQ_QUEUE_CNT; qIdx++)965 { 966 if (ASMAtomicReadBool(&pThisCC->aWorkers[ qIdx].fSleeping))967 for (uint16_t idxQueue = 0; idxQueue < VIRTIONET_REQ_QUEUE_CNT; idxQueue++) 968 { 969 if (ASMAtomicReadBool(&pThisCC->aWorkers[idxQueue].fSleeping)) 967 970 { 968 Log6Func(("waking %s worker.\n", VIRTQNAME( qIdx)));969 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[ qIdx].hEvtProcess);971 Log6Func(("waking %s worker.\n", VIRTQNAME(idxQueue))); 972 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->aWorkers[idxQueue].hEvtProcess); 970 973 AssertRC(rc); 971 974 } … … 976 979 } 977 980 978 979 981 #ifdef IN_RING3 980 981 982 982 983 DECLINLINE(uint16_t) virtioNetR3Checkum16(const void *pvBuf, size_t cb) … … 1037 1038 1038 1039 /** 1039 * Check if the device can receive data now.1040 * This must be called before the pfnRecieve() method is called.1040 * Check whether specific queue is ready and has Rx buffers (virtqueue descriptors) 1041 * available. This must be called before the pfnRecieve() method is called. 1041 1042 * 1042 1043 * @remarks As a side effect this function enables queue notification … … 1047 1048 * @thread RX 1048 1049 */ 1049 static int virtioNetR3IsRxQueuePrimed(PPDMDEVINS pDevIns, PVIRTIONET pThis )1050 static int virtioNetR3IsRxQueuePrimed(PPDMDEVINS pDevIns, PVIRTIONET pThis, uint16_t idxQueue) 1050 1051 { 1051 1052 int rc; 1052 1053 1053 LogFlowFunc(("%s: \n", INSTANCE(pThis)));1054 LogFlowFunc(("%s: idxQueue = %d\n", INSTANCE(pThis), idxQueue)); 1054 1055 1055 1056 if (!pThis->fVirtioReady) 1056 1057 rc = VERR_NET_NO_BUFFER_SPACE; 1057 1058 1058 else if (!virtioCoreIsQueueEnabled(&pThis->Virtio, RXQIDX_QPAIR( 0)))1059 else if (!virtioCoreIsQueueEnabled(&pThis->Virtio, RXQIDX_QPAIR(idxQueue))) 1059 1060 rc = VERR_NET_NO_BUFFER_SPACE; 1060 1061 1061 else if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, RXQIDX_QPAIR( 0)))1062 { 1063 virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR( 0), true);1062 else if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue))) 1063 { 1064 virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR(idxQueue), true); 1064 1065 rc = VERR_NET_NO_BUFFER_SPACE; 1065 1066 } 1066 1067 else 1067 1068 { 1068 virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR( 0), false);1069 virtioCoreQueueSetNotify(&pThis->Virtio, RXQIDX_QPAIR(idxQueue), false); 1069 1070 rc = VINF_SUCCESS; 1070 1071 } 1071 1072 1072 LogFlowFunc(("%s: -> %Rrc\n", INSTANCE(pThis), rc));1073 LogFlowFunc(("%s: idxQueue = %d -> %Rrc\n", INSTANCE(pThis), idxQueue, rc)); 1073 1074 return rc; 1075 } 1076 1077 /* 1078 * Returns true if VirtIO core and device are in a running and operational state 1079 */ 1080 DECLINLINE(bool) virtioNetAllSystemsGo(PVIRTIONET pThis, PPDMDEVINS pDevIns) 1081 { 1082 if (!pThis->fVirtioReady) 1083 return false; 1084 1085 if (pThis->fQuiescing) 1086 return false; 1087 1088 VMSTATE enmVMState = PDMDevHlpVMState(pDevIns); 1089 if (!RT_LIKELY(enmVMState == VMSTATE_RUNNING || enmVMState == VMSTATE_RUNNING_LS)) 1090 return false; 1091 1092 return true; 1074 1093 } 1075 1094 … … 1083 1102 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 1084 1103 1085 LogFlowFunc(("%s: timeoutMs=%u\n", INSTANCE(pThis), timeoutMs)); 1104 if (!virtioNetAllSystemsGo(pThis, pDevIns)) 1105 { 1106 LogFunc(("VirtIO not ready\n")); 1107 return VERR_NET_NO_BUFFER_SPACE; 1108 } 1086 1109 1087 1110 if (!timeoutMs) 1088 1111 return VERR_NET_NO_BUFFER_SPACE; 1089 1112 1113 LogFlowFunc(("%s: timeoutMs=%u\n", INSTANCE(pThis), timeoutMs)); 1114 1090 1115 ASMAtomicXchgBool(&pThis->fLeafWantsRxBuffers, true); 1091 1116 1092 VMSTATE enmVMState; 1093 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pDevIns)) == VMSTATE_RUNNING 1094 || enmVMState == VMSTATE_RUNNING_LS)) 1095 { 1096 1097 if (RT_SUCCESS(virtioNetR3IsRxQueuePrimed(pDevIns, pThis))) 1117 /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue 1118 selection algorithm feasible or even necessary to prevent starvation? */ 1119 do { 1120 for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue += 2) /* Skip odd queue #'s because Rx queues only! */ 1098 1121 { 1099 LogFunc(("Rx bufs now available, releasing waiter...")); 1100 return VINF_SUCCESS; 1122 if (!IS_RX_QUEUE(idxQueue)) 1123 continue; 1124 1125 if (RT_SUCCESS(virtioNetR3IsRxQueuePrimed(pDevIns, pThis, idxQueue))) 1126 { 1127 LogFunc(("Rx bufs now available, releasing waiter...")); 1128 return VINF_SUCCESS; 1129 } 1101 1130 } 1102 LogFunc(("%s: Starved for guest Rx bufs, waiting %u ms ...\n", INSTANCE(pThis), timeoutMs)); 1103 1104 int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEventRxDescAvail, timeoutMs); 1105 if (RT_FAILURE(rc) && rc != VERR_TIMEOUT && rc != VERR_INTERRUPTED) 1131 LogFunc(("%s: Starved for guest Rx bufs, waiting %u ms ...\n", 1132 INSTANCE(pThis), timeoutMs)); 1133 1134 int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, 1135 pThis->hEventRxDescAvail, timeoutMs); 1136 1137 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED) 1138 continue; 1139 1140 if (RT_FAILURE(rc)) 1106 1141 RTThreadSleep(1); 1107 } 1142 1143 } while (virtioNetAllSystemsGo(pThis, pDevIns)); 1144 1108 1145 ASMAtomicXchgBool(&pThis->fLeafWantsRxBuffers, false); 1109 1146 … … 1203 1240 static bool virtioNetR3AddressFilter(PVIRTIONET pThis, const void *pvBuf, size_t cb) 1204 1241 { 1242 LogFunc(("\n")); 1243 1205 1244 if (pThis->fPromiscuous) 1206 1245 return true; … … 1246 1285 return false; 1247 1286 } 1248 1249 1250 1251 1287 1252 1288 /** … … 1261 1297 * @param pvBuf The available data. 1262 1298 * @param cb Number of bytes available in the buffer. 1299 * @param pGso Pointer to Global Segmentation Offload structure 1300 * @param idxQueue Queue to work with 1263 1301 * @thread RX 1264 1302 */ 1265 1266 /* static void virtioNetR3Receive(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC, uint16_t qIdx, PVIRTIO_DESC_CHAIN_T pDescChain)1267 {1268 RT_NOREF5(pDevIns, pThis, pThisCC, qIdx, pDescChain);1269 }1270 */1271 1303 static int virtioNetR3HandleRxPacket(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC, 1272 const void *pvBuf, size_t cb, PCPDMNETWORKGSO pGso )1304 const void *pvBuf, size_t cb, PCPDMNETWORKGSO pGso, uint16_t idxQueue) 1273 1305 { 1274 1306 RT_NOREF(pThisCC); 1275 1307 1308 LogFunc(("\n")); 1276 1309 VIRTIONET_PKT_HDR_T rxPktHdr; 1277 1310 … … 1327 1360 { 1328 1361 PVIRTIO_DESC_CHAIN_T pDescChain; 1329 int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, RXQIDX_QPAIR( 0), &pDescChain, true);1362 int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue), &pDescChain, true); 1330 1363 1331 1364 AssertRC(rc == VINF_SUCCESS || rc == VERR_NOT_AVAILABLE); … … 1340 1373 * Assert it to reduce complexity. Robust solution would entail finding seg idx and offset of 1341 1374 * virtio_net_header.num_buffers (to update field *after* hdr & pkts copied to gcPhys) */ 1375 1342 1376 AssertMsgReturn(pDescChain->pSgPhysReturn->paSegs[0].cbSeg >= sizeof(VIRTIONET_PKT_HDR_T), 1343 1377 ("Desc chain's first seg has insufficient space for pkt header!\n"), … … 1360 1394 if (cSegs++ >= cSegsAllocated) 1361 1395 { 1362 cSegsAllocated <<= 1; 1396 cSegsAllocated <<= 1; /* double the allocation size */ 1363 1397 paVirtSegsToGuest = (PRTSGSEG)RTMemRealloc(paVirtSegsToGuest, sizeof(RTSGSEG) * cSegsAllocated); 1364 1398 AssertReturn(paVirtSegsToGuest, VERR_NO_MEMORY); … … 1370 1404 1371 1405 /* Append remaining Rx pkt or as much current desc chain has room for */ 1372 uint32_t uboundedSize= RT_MIN(cb, cbDescChainLeft);1373 paVirtSegsToGuest[cSegs].cbSeg = uboundedSize;1406 uint32_t cbLim = RT_MIN(cb, cbDescChainLeft); 1407 paVirtSegsToGuest[cSegs].cbSeg = cbLim; 1374 1408 paVirtSegsToGuest[cSegs++].pvSeg = ((uint8_t *)pvBuf) + uOffset; 1375 uOffset += uboundedSize;1409 uOffset += cbLim; 1376 1410 cDescs++; 1377 1411 1378 1412 RTSgBufInit(pVirtSegBufToGuest, paVirtSegsToGuest, cSegs); 1379 1413 1380 virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, RXQIDX_QPAIR( 0),1414 virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue), 1381 1415 pVirtSegBufToGuest, pDescChain, true); 1382 1416 … … 1392 1426 rc); 1393 1427 1394 virtioCoreQueueSync(pDevIns, &pThis->Virtio, RXQIDX_QPAIR( 0));1428 virtioCoreQueueSync(pDevIns, &pThis->Virtio, RXQIDX_QPAIR(idxQueue)); 1395 1429 1396 1430 for (int i = 0; i < 2; i++) … … 1418 1452 PPDMDEVINS pDevIns = pThisCC->pDevIns; 1419 1453 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 1454 1455 LogFunc(("\n")); 1456 1457 if (!pThis->fVirtioReady) 1458 { 1459 LogRelFunc(("VirtIO not ready, aborting downstream receive\n")); 1460 return VERR_INTERRUPTED; 1461 } 1462 if (pThis->fQuiescing) 1463 { 1464 LogRelFunc(("Quiescing I/O for suspend or power off, aborting downstream receive\n")); 1465 return VERR_INTERRUPTED; 1466 } 1420 1467 1421 1468 if (pGso) … … 1446 1493 } 1447 1494 1448 Log2Func(("pvBuf=%p cb=%u pGso=%p\n", INSTANCE(pThis), pvBuf, cb, pGso)); 1449 1450 int rc = virtioNetR3IsRxQueuePrimed(pDevIns, pThis); 1451 if (RT_FAILURE(rc)) 1452 return rc; 1453 1454 /* Drop packets if VM is not running or cable is disconnected. */ 1455 VMSTATE enmVMState = PDMDevHlpVMState(pDevIns); 1456 if (( enmVMState != VMSTATE_RUNNING 1457 && enmVMState != VMSTATE_RUNNING_LS) 1458 || !(pThis->virtioNetConfig.uStatus & VIRTIONET_F_LINK_UP)) 1459 return VINF_SUCCESS; 1460 1461 virtioNetR3SetReadLed(pThisCC, true); 1462 if (virtioNetR3AddressFilter(pThis, pvBuf, cb)) 1463 { 1464 rc = virtioNetR3HandleRxPacket(pDevIns, pThis, pThisCC, pvBuf, cb, pGso); 1465 } 1466 virtioNetR3SetReadLed(pThisCC, false); 1467 return rc; 1495 Log2Func(("%s pvBuf=%p cb=%u pGso=%p\n", INSTANCE(pThis), pvBuf, cb, pGso)); 1496 1497 /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue 1498 selection algorithm feasible or even necessary to prevent starvation? */ 1499 1500 for (int idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue += 2) /* Skip odd queue #'s because Rx queues only */ 1501 { 1502 if (RT_SUCCESS(!virtioNetR3IsRxQueuePrimed(pDevIns, pThis, idxQueue))) 1503 { 1504 /* Drop packets if VM is not running or cable is disconnected. */ 1505 if (!virtioNetAllSystemsGo(pThis, pDevIns) || !IS_LINK_UP(pThis)) 1506 return VINF_SUCCESS; 1507 1508 virtioNetR3SetReadLed(pThisCC, true); 1509 1510 int rc = VINF_SUCCESS; 1511 if (virtioNetR3AddressFilter(pThis, pvBuf, cb)) 1512 rc = virtioNetR3HandleRxPacket(pDevIns, pThis, pThisCC, pvBuf, cb, pGso, idxQueue); 1513 1514 virtioNetR3SetReadLed(pThisCC, false); 1515 1516 return rc; 1517 } 1518 } 1519 return VERR_INTERRUPTED; 1468 1520 } 1469 1521 … … 1475 1527 return virtioNetR3NetworkDown_ReceiveGso(pInterface, pvBuf, cb, NULL); 1476 1528 } 1477 1478 1479 1529 1480 1530 /* Read physical bytes from the out segment(s) of descriptor chain */ … … 1482 1532 { 1483 1533 uint8_t *pb = (uint8_t *)pv; 1484 uint16_t cb Min= RT_MIN(pDescChain->cbPhysSend, cb);1485 while (cb Min)1486 { 1487 size_t cbSeg = cb Min;1534 uint16_t cbLim = RT_MIN(pDescChain->cbPhysSend, cb); 1535 while (cbLim) 1536 { 1537 size_t cbSeg = cbLim; 1488 1538 RTGCPHYS GCPhys = virtioCoreSgBufGetNextSegment(pDescChain->pSgPhysSend, &cbSeg); 1489 1539 PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pb, cbSeg); 1490 1540 pb += cbSeg; 1491 cbMin -= cbSeg; 1492 } 1493 LogFunc(("Pulled %d bytes out of %d bytes requested from descriptor chain\n", cbMin, cb)); 1494 } 1495 1541 cbLim -= cbSeg; 1542 pDescChain->cbPhysSend -= cbSeg; 1543 } 1544 LogFunc(("Pulled %d / %d bytes from desc chain (%d bytes left in desc chain)\n", 1545 cb - cbLim, cb, pDescChain->cbPhysSend)); 1546 } 1496 1547 1497 1548 static uint8_t virtioNetR3CtrlRx(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC, … … 1501 1552 #define LOG_VIRTIONET_FLAG(fld) LogFunc(("%s = %d\n", #fld, pThis->fld)) 1502 1553 1503 LogFunc((" "));1554 LogFunc(("Processing CTRL Rx command\n")); 1504 1555 switch(pCtrlPktHdr->uCmd) 1505 1556 { … … 1567 1618 PVIRTIONET_CTRL_HDR_T pCtrlPktHdr, PVIRTIO_DESC_CHAIN_T pDescChain) 1568 1619 { 1569 RT_NOREF(pThisCC); 1620 LogFunc(("Processing CTRL MAC command\n")); 1621 1622 RT_NOREF(pThisCC); 1570 1623 1571 1624 #define ASSERT_CTRL_ADDR_SET(v) \ 1572 AssertMsgReturn((v), ("DESC chain too small to process CTRL_MAC_ADDR_SET cmd "), VIRTIONET_ERROR)1625 AssertMsgReturn((v), ("DESC chain too small to process CTRL_MAC_ADDR_SET cmd\n"), VIRTIONET_ERROR) 1573 1626 1574 1627 #define ASSERT_CTRL_TABLE_SET(v) \ 1575 AssertMsgReturn((v), ("DESC chain too small to process CTRL_MAC_TABLE_SET cmd "), VIRTIONET_ERROR)1628 AssertMsgReturn((v), ("DESC chain too small to process CTRL_MAC_TABLE_SET cmd\n"), VIRTIONET_ERROR) 1576 1629 1577 1630 AssertMsgReturn(pDescChain->cbPhysSend >= sizeof(*pCtrlPktHdr), … … 1579 1632 VIRTIONET_ERROR); 1580 1633 1581 size_t cbRemaining = pDescChain->cbPhysSend - sizeof(*pCtrlPktHdr); 1582 1634 size_t cbRemaining = pDescChain->cbPhysSend; 1635 Log6Func(("initial:cbRemaining=%d pDescChain->cbPhysSend=%d sizeof(*pCtrlPktHdr)=%d\n", 1636 cbRemaining, pDescChain->cbPhysSend, sizeof(*pCtrlPktHdr))); 1583 1637 switch(pCtrlPktHdr->uCmd) 1584 1638 { … … 1598 1652 virtioNetR3PullChain(pDevIns, pDescChain, &cMacs, sizeof(cMacs)); 1599 1653 cbRemaining -= sizeof(cMacs); 1600 uint32_t cbMacs = cMacs * sizeof(RTMAC); 1601 ASSERT_CTRL_TABLE_SET(cbRemaining >= cbMacs); 1602 virtioNetR3PullChain(pDevIns, pDescChain, &pThis->aMacUnicastFilter, cbMacs); 1603 cbRemaining -= cbMacs; 1654 Log6Func(("Guest provided %d unicast MAC Table entries\n", cMacs)); 1655 if (cMacs) 1656 { 1657 uint32_t cbMacs = cMacs * sizeof(RTMAC); 1658 ASSERT_CTRL_TABLE_SET(cbRemaining >= cbMacs); 1659 virtioNetR3PullChain(pDevIns, pDescChain, &pThis->aMacUnicastFilter, cbMacs); 1660 cbRemaining -= cbMacs; 1661 } 1604 1662 pThis->cUnicastFilterMacs = cMacs; 1605 1663 … … 1608 1666 virtioNetR3PullChain(pDevIns, pDescChain, &cMacs, sizeof(cMacs)); 1609 1667 cbRemaining -= sizeof(cMacs); 1610 cbMacs = cMacs * sizeof(RTMAC); 1611 ASSERT_CTRL_TABLE_SET(cbRemaining >= cbMacs); 1612 virtioNetR3PullChain(pDevIns, pDescChain, &pThis->aMacMulticastFilter, cbMacs); 1613 cbRemaining -= cbMacs; 1668 Log6Func(("Guest provided %d multicast MAC Table entries\n", cMacs)); 1669 if (cMacs) 1670 { 1671 uint32_t cbMacs = cMacs * sizeof(RTMAC); 1672 ASSERT_CTRL_TABLE_SET(cbRemaining >= cbMacs); 1673 virtioNetR3PullChain(pDevIns, pDescChain, &pThis->aMacMulticastFilter, cbMacs); 1674 cbRemaining -= cbMacs; 1675 } 1614 1676 pThis->cMulticastFilterMacs = cMacs; 1615 1677 … … 1632 1694 PVIRTIONET_CTRL_HDR_T pCtrlPktHdr, PVIRTIO_DESC_CHAIN_T pDescChain) 1633 1695 { 1696 LogFunc(("Processing CTRL VLAN command\n")); 1697 1634 1698 RT_NOREF(pThisCC); 1635 1699 … … 1659 1723 PVIRTIO_DESC_CHAIN_T pDescChain) 1660 1724 { 1661 1662 #define SIZEOF_SEND(descChain, ctrlHdr) RT_MIN(descChain->cbPhysSend, sizeof(ctrlHdr)) 1725 LogFunc(("Received CTRL packet from guest\n")); 1663 1726 1664 1727 if (pDescChain->cbPhysSend < 2) … … 1680 1743 AssertPtrReturnVoid(pCtrlPktHdr); 1681 1744 1682 AssertMsgReturnVoid(pDescChain->cbPhysSend >= sizeof( *pCtrlPktHdr),1745 AssertMsgReturnVoid(pDescChain->cbPhysSend >= sizeof(VIRTIONET_CTRL_HDR_T), 1683 1746 ("DESC chain too small for CTRL pkt header")); 1684 1747 1685 virtioNetR3PullChain(pDevIns, pDescChain, pCtrlPktHdr, SIZEOF_SEND(pDescChain, VIRTIONET_CTRL_HDR_T)); 1748 virtioNetR3PullChain(pDevIns, pDescChain, pCtrlPktHdr, 1749 RT_MIN(pDescChain->cbPhysSend, sizeof(VIRTIONET_CTRL_HDR_T))); 1750 1751 Log6Func(("CTRL pkt hdr: class=%d cmd=%d\n", pCtrlPktHdr->uClass, pCtrlPktHdr->uCmd)); 1686 1752 1687 1753 uint8_t uAck; … … 1697 1763 uAck = virtioNetR3CtrlVlan(pDevIns, pThis, pThisCC, pCtrlPktHdr, pDescChain); 1698 1764 break; 1765 case VIRTIONET_CTRL_ANNOUNCE: 1766 uAck = VIRTIONET_OK; 1767 if (FEATURE_DISABLED(STATUS) || FEATURE_DISABLED(GUEST_ANNOUNCE)) 1768 { 1769 LogFunc(("Ignoring CTRL class VIRTIONET_CTRL_ANNOUNCE. Not configured to handle it\n")); 1770 virtioNetPrintFeatures(pThis, pThis->fNegotiatedFeatures, "Features"); 1771 break; 1772 } 1773 if (pCtrlPktHdr->uCmd != VIRTIONET_CTRL_ANNOUNCE_ACK) 1774 { 1775 LogFunc(("Ignoring CTRL class VIRTIONET_CTRL_ANNOUNCE. Unrecognized uCmd\n")); 1776 break; 1777 } 1778 pThis->virtioNetConfig.uStatus &= ~VIRTIONET_F_ANNOUNCE; 1779 Log6Func(("Clearing VIRTIONET_F_ANNOUNCE in config status\n")); 1780 break; 1781 1699 1782 default: 1783 LogRelFunc(("Unrecognized CTRL pkt hdr class (%d)\n", pCtrlPktHdr->uClass)); 1700 1784 uAck = VIRTIONET_ERROR; 1701 1785 } 1702 1786 1703 int cSegs = 2; 1787 /* Currently CTRL pkt header just returns ack, but keeping segment logic generic/flexible 1788 * in case that changes to make adapting more straightforward */ 1789 int cSegs = 1; 1704 1790 1705 1791 /* Return CTRL packet Ack byte (result code) to guest driver */ 1706 PRTSGSEG paSegs = (PRTSGSEG)RTMemAllocZ(sizeof(RTSGSEG) * cSegs); 1707 AssertMsgReturnVoid(paSegs, ("Out of memory")); 1708 1709 RTSGSEG aSegs[] = { { &uAck, sizeof(uAck) } }; 1710 memcpy(paSegs, aSegs, sizeof(aSegs)); 1711 1712 PRTSGBUF pSegBuf = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF)); 1713 AssertMsgReturnVoid(pSegBuf, ("Out of memory")); 1714 1792 PRTSGSEG paReturnSegs = (PRTSGSEG)RTMemAllocZ(sizeof(RTSGSEG)); 1793 AssertMsgReturnVoid(paReturnSegs, ("Out of memory")); 1794 1795 RTSGSEG aStaticSegs[] = { { &uAck, sizeof(uAck) } }; 1796 memcpy(paReturnSegs, aStaticSegs, sizeof(RTSGSEG)); 1797 1798 PRTSGBUF pReturnSegBuf = (PRTSGBUF)RTMemAllocZ(sizeof(RTSGBUF)); 1799 AssertMsgReturnVoid(pReturnSegBuf, ("Out of memory")); 1715 1800 1716 1801 /* Copy segment data to malloc'd memory to avoid stack out-of-scope errors sanitizer doesn't detect */ 1717 1802 for (int i = 0; i < cSegs; i++) 1718 1803 { 1719 void *pv = pa Segs[i].pvSeg;1720 pa Segs[i].pvSeg = RTMemAlloc(paSegs[i].cbSeg);1721 AssertMsgReturnVoid(pa Segs[i].pvSeg, ("Out of memory"));1722 memcpy(pa Segs[i].pvSeg, pv, paSegs[i].cbSeg);1723 } 1724 1725 RTSgBufInit(p SegBuf, paSegs, cSegs);1726 1727 virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, CTRLQIDX, p SegBuf, pDescChain, true);1804 void *pv = paReturnSegs[i].pvSeg; 1805 paReturnSegs[i].pvSeg = RTMemAlloc(aStaticSegs[i].cbSeg); 1806 AssertMsgReturnVoid(paReturnSegs[i].pvSeg, ("Out of memory")); 1807 memcpy(paReturnSegs[i].pvSeg, pv, aStaticSegs[i].cbSeg); 1808 } 1809 1810 RTSgBufInit(pReturnSegBuf, paReturnSegs, cSegs); 1811 1812 virtioCoreR3QueuePut(pDevIns, &pThis->Virtio, CTRLQIDX, pReturnSegBuf, pDescChain, true); 1728 1813 virtioCoreQueueSync(pDevIns, &pThis->Virtio, CTRLQIDX); 1729 1814 1730 1815 for (int i = 0; i < cSegs; i++) 1731 RTMemFree(pa Segs[i].pvSeg);1732 1733 RTMemFree(pa Segs);1734 RTMemFree(p SegBuf);1735 1736 LogFunc(("Processed ctrl message class/cmd /subcmd = %u/%u/%u. Ack=%u.\n",1737 pCtrlPktHdr->uClass, pCtrlPktHdr->uCmd, pCtrlPktHdr->uCmdSpecific,uAck));1816 RTMemFree(paReturnSegs[i].pvSeg); 1817 1818 RTMemFree(paReturnSegs); 1819 RTMemFree(pReturnSegBuf); 1820 1821 LogFunc(("Processed ctrl message class/cmd = %u/%u. Ack=%u.\n", 1822 pCtrlPktHdr->uClass, pCtrlPktHdr->uCmd, uAck)); 1738 1823 1739 1824 } … … 1833 1918 1834 1919 static void virtioNetR3TransmitPendingPackets(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC, 1835 uint16_t qIdx, bool fOnWorkerThread)1920 uint16_t idxQueue, bool fOnWorkerThread) 1836 1921 { 1837 1922 PVIRTIOCORE pVirtio = &pThis->Virtio; … … 1873 1958 1874 1959 Log3Func(("%s: About to transmit %d pending packets\n", INSTANCE(pThis), 1875 virtioCoreR3QueuePendingCount(pVirtio->pDevIns, pVirtio, TXQIDX_QPAIR(0))));1960 virtioCoreR3QueuePendingCount(pVirtio->pDevIns, pVirtio, idxQueue))); 1876 1961 1877 1962 virtioNetR3SetWriteLed(pThisCC, true); 1878 1963 1879 1964 int rc; 1880 1965 PVIRTIO_DESC_CHAIN_T pDescChain; 1881 while (virtioCoreR3QueuePeek(pVirtio->pDevIns, pVirtio, TXQIDX_QPAIR(0), &pDescChain)) 1882 { 1966 while ((rc = virtioCoreR3QueuePeek(pVirtio->pDevIns, pVirtio, idxQueue, &pDescChain))) 1967 { 1968 if (RT_SUCCESS(rc)) 1969 Log6Func(("fetched descriptor chain from %s\n", VIRTQNAME(idxQueue))); 1970 else 1971 { 1972 LogFunc(("Failed find expected data on %s, rc = %Rrc\n", VIRTQNAME(idxQueue), rc)); 1973 break; 1974 } 1975 1883 1976 uint32_t cSegsFromGuest = pDescChain->pSgPhysSend->cSegs; 1884 1977 PVIRTIOSGSEG paSegsFromGuest = pDescChain->pSgPhysSend->paSegs; 1885 1886 Log6Func(("fetched descriptor chain from %s\n", VIRTQNAME(qIdx)));1887 1978 1888 1979 if (cSegsFromGuest < 2 || paSegsFromGuest[0].cbSeg != cbPktHdr) … … 1915 2006 /** @todo Optimize away the extra copying! (lazy bird) */ 1916 2007 PPDMSCATTERGATHER pSgBufToPdmLeafDevice; 1917 intrc = pThisCC->pDrv->pfnAllocBuf(pThisCC->pDrv, uSize, pGso, &pSgBufToPdmLeafDevice);2008 rc = pThisCC->pDrv->pfnAllocBuf(pThisCC->pDrv, uSize, pGso, &pSgBufToPdmLeafDevice); 1918 2009 if (RT_SUCCESS(rc)) 1919 2010 { … … 1933 2024 } 1934 2025 rc = virtioNetR3TransmitFrame(pThis, pThisCC, pSgBufToPdmLeafDevice, pGso, &PktHdr); 2026 if (RT_FAILURE(rc)) 2027 { 2028 LogFunc(("Failed to transmit frame, rc = %Rrc\n", rc)); 2029 pThisCC->pDrv->pfnFreeBuf(pThisCC->pDrv, pSgBufToPdmLeafDevice); 2030 } 1935 2031 } 1936 2032 else 1937 2033 { 1938 Log4Func(("Failed to allocate S G buffer: size=%u rc=%Rrc\n", uSize, rc));2034 Log4Func(("Failed to allocate S/G buffer: size=%u rc=%Rrc\n", uSize, rc)); 1939 2035 /* Stop trying to fetch TX descriptors until we get more bandwidth. */ 1940 2036 break; 1941 2037 } 1942 2038 } 1943 1944 2039 /* Remove this descriptor chain from the available ring */ 1945 virtioCoreR3QueueSkip(pVirtio, TXQIDX_QPAIR(0));2040 virtioCoreR3QueueSkip(pVirtio, idxQueue); 1946 2041 1947 2042 /* No data to return to guest, but call is needed put elem (e.g. desc chain) on used ring */ 1948 virtioCoreR3QueuePut(pVirtio->pDevIns, pVirtio, TXQIDX_QPAIR(0), NULL, pDescChain, false);1949 1950 virtioCoreQueueSync(pVirtio->pDevIns, pVirtio, TXQIDX_QPAIR(0));2043 virtioCoreR3QueuePut(pVirtio->pDevIns, pVirtio, idxQueue, NULL, pDescChain, false); 2044 2045 virtioCoreQueueSync(pVirtio->pDevIns, pVirtio, idxQueue); 1951 2046 1952 2047 } … … 1955 2050 if (pDrv) 1956 2051 pDrv->pfnEndXmit(pDrv); 2052 1957 2053 ASMAtomicWriteU32(&pThis->uIsTransmitting, 0); 1958 2054 } … … 1966 2062 PPDMDEVINS pDevIns = pThisCC->pDevIns; 1967 2063 PVIRTIONET pThis = PDMDEVINS_2_DATA(pThisCC->pDevIns, PVIRTIONET); 2064 2065 /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue 2066 selection algorithm feasible or even necessary */ 2067 1968 2068 virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC, TXQIDX_QPAIR(0), false /*fOnWorkerThread*/); 1969 2069 } … … 1972 2072 * @callback_method_impl{VIRTIOCORER3,pfnQueueNotified} 1973 2073 */ 1974 static DECLCALLBACK(void) virtioNetR3QueueNotified(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, uint16_t qIdx)2074 static DECLCALLBACK(void) virtioNetR3QueueNotified(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, uint16_t idxQueue) 1975 2075 { 1976 2076 PVIRTIONET pThis = RT_FROM_MEMBER(pVirtio, VIRTIONET, Virtio); 1977 2077 PVIRTIONETCC pThisCC = RT_FROM_MEMBER(pVirtioCC, VIRTIONETCC, Virtio); 1978 2078 PPDMDEVINS pDevIns = pThisCC->pDevIns; 1979 PVIRTIONETWORKER pWorker = &pThis->aWorkers[ qIdx];1980 PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[ qIdx];1981 AssertReturnVoid( qIdx< pThis->cVirtQueues);2079 PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxQueue]; 2080 PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxQueue]; 2081 AssertReturnVoid(idxQueue < pThis->cVirtQueues); 1982 2082 1983 2083 #ifdef LOG_ENABLED … … 1985 2085 #endif 1986 2086 1987 Log6Func(("%s has available buffers\n", VIRTQNAME( qIdx)));1988 1989 if (IS_RX_QUEUE( qIdx))2087 Log6Func(("%s has available buffers\n", VIRTQNAME(idxQueue))); 2088 2089 if (IS_RX_QUEUE(idxQueue)) 1990 2090 { 1991 2091 LogFunc(("%s Receive buffers has been added, waking up receive thread.\n", … … 2000 2100 if (ASMAtomicReadBool(&pWorkerR3->fSleeping)) 2001 2101 { 2002 Log6Func(("waking %s worker.\n", VIRTQNAME( qIdx)));2102 Log6Func(("waking %s worker.\n", VIRTQNAME(idxQueue))); 2003 2103 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pWorker->hEvtProcess); 2004 2104 AssertRC(rc); … … 2013 2113 static DECLCALLBACK(int) virtioNetR3WorkerThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread) 2014 2114 { 2015 uint16_t const qIdx= (uint16_t)(uintptr_t)pThread->pvUser;2115 uint16_t const idxQueue = (uint16_t)(uintptr_t)pThread->pvUser; 2016 2116 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 2017 2117 PVIRTIONETCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC); 2018 PVIRTIONETWORKER pWorker = &pThis->aWorkers[qIdx]; 2019 PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[qIdx]; 2020 2118 PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxQueue]; 2119 PVIRTIONETWORKERR3 pWorkerR3 = &pThisCC->aWorkers[idxQueue]; 2021 2120 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING) 2121 { 2022 2122 return VINF_SUCCESS; 2023 2123 } 2124 LogFunc(("%s\n", VIRTQNAME(idxQueue))); 2024 2125 while (pThread->enmState == PDMTHREADSTATE_RUNNING) 2025 2126 { 2026 2027 virtioCoreQueueSetNotify(&pThis->Virtio, qIdx, true); 2028 2029 if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, qIdx)) 2127 virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, true); 2128 2129 if (virtioCoreQueueIsEmpty(pDevIns, &pThis->Virtio, idxQueue)) 2030 2130 { 2031 2131 /* Atomic interlocks avoid missing alarm while going to sleep & notifier waking the awoken */ … … 2034 2134 if (!fNotificationSent) 2035 2135 { 2036 Log6Func(("%s worker sleeping...\n", VIRTQNAME( qIdx)));2136 Log6Func(("%s worker sleeping...\n", VIRTQNAME(idxQueue))); 2037 2137 Assert(ASMAtomicReadBool(&pWorkerR3->fSleeping)); 2038 2138 int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pWorker->hEvtProcess, RT_INDEFINITE_WAIT); … … 2042 2142 if (rc == VERR_INTERRUPTED) 2043 2143 { 2044 virtioCoreQueueSetNotify(&pThis->Virtio, qIdx, false);2144 virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, false); 2045 2145 continue; 2046 2146 } 2047 Log6Func(("%s worker woken\n", VIRTQNAME( qIdx)));2147 Log6Func(("%s worker woken\n", VIRTQNAME(idxQueue))); 2048 2148 ASMAtomicWriteBool(&pWorkerR3->fNotified, false); 2049 2149 } 2050 2150 ASMAtomicWriteBool(&pWorkerR3->fSleeping, false); 2051 2151 } 2052 2053 virtioCoreQueueSetNotify(&pThis->Virtio, qIdx, false); 2054 2055 if (!pThis->afQueueAttached[qIdx]) 2056 { 2057 LogFunc(("%s queue not attached, worker aborting...\n", VIRTQNAME(qIdx))); 2058 break; 2059 } 2152 virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, false); 2060 2153 2061 2154 /* Dispatch to the handler for the queue this worker is set up to drive */ … … 2063 2156 if (!pThisCC->fQuiescing) 2064 2157 { 2065 if (IS_CTRL_QUEUE( qIdx))2158 if (IS_CTRL_QUEUE(idxQueue)) 2066 2159 { 2067 Log6Func(("fetching next descriptor chain from %s\n", VIRTQNAME( qIdx)));2160 Log6Func(("fetching next descriptor chain from %s\n", VIRTQNAME(idxQueue))); 2068 2161 PVIRTIO_DESC_CHAIN_T pDescChain; 2069 int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, qIdx, &pDescChain, true);2162 int rc = virtioCoreR3QueueGet(pDevIns, &pThis->Virtio, idxQueue, &pDescChain, true); 2070 2163 if (rc == VERR_NOT_AVAILABLE) 2071 2164 { 2072 Log6Func(("Nothing found in %s\n", VIRTQNAME( qIdx)));2165 Log6Func(("Nothing found in %s\n", VIRTQNAME(idxQueue))); 2073 2166 continue; 2074 2167 } 2075 2168 virtioNetR3Ctrl(pDevIns, pThis, pThisCC, pDescChain); 2076 2169 } 2077 else if (IS_TX_QUEUE( qIdx))2170 else if (IS_TX_QUEUE(idxQueue)) 2078 2171 { 2079 2172 Log6Func(("Notified of data to transmit\n")); 2080 2173 virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC, 2081 qIdx, true /* fOnWorkerThread */);2174 idxQueue, true /* fOnWorkerThread */); 2082 2175 } 2176 2083 2177 /* Rx queues aren't handled by our worker threads. Instead, the PDM network 2084 2178 * leaf driver invokes PDMINETWORKDOWN.pfnWaitReceiveAvail() callback, … … 2124 2218 2125 2219 LogFunc(("%s: Link is up\n", INSTANCE(pThis))); 2220 2126 2221 if (pThisCC->pDrv) 2127 2222 pThisCC->pDrv->pfnNotifyLinkChanged(pThisCC->pDrv, PDMNETWORKLINKSTATE_UP); … … 2175 2270 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 2176 2271 2177 bool f OldUp = !!(pThis->virtioNetConfig.uStatus & VIRTIONET_F_LINK_UP);2178 bool f NewUp = enmState == PDMNETWORKLINKSTATE_UP;2179 2180 Log (("%s virtioNetR3NetworkConfig_SetLinkState: enmState=%d\n", INSTANCE(pThis), enmState));2272 bool fCachedLinkIsUp = IS_LINK_UP(pThis); 2273 bool fActiveLinkIsUp = (enmState == PDMNETWORKLINKSTATE_UP); 2274 2275 LogFunc(("%s: enmState=%d\n", INSTANCE(pThis), enmState)); 2181 2276 if (enmState == PDMNETWORKLINKSTATE_DOWN_RESUME) 2182 2277 { 2183 if (f OldUp)2278 if (fCachedLinkIsUp) 2184 2279 { 2185 2280 /* … … 2192 2287 } 2193 2288 } 2194 else if (f NewUp != fOldUp)2195 { 2196 if (f NewUp)2289 else if (fActiveLinkIsUp != fCachedLinkIsUp) 2290 { 2291 if (fCachedLinkIsUp) 2197 2292 { 2198 2293 Log(("%s Link is up\n", INSTANCE(pThis))); 2199 2294 pThis->fCableConnected = true; 2200 pThis->virtioNetConfig.uStatus |= VIRTIONET_F_LINK_UP;2295 SET_LINK_UP(pThis); 2201 2296 virtioCoreNotifyConfigChanged(&pThis->Virtio); 2202 2297 } 2203 else 2298 else /* cached Link state is down */ 2204 2299 { 2205 2300 /* The link was brought down explicitly, make sure it won't come up by timer. */ … … 2207 2302 Log(("%s Link is down\n", INSTANCE(pThis))); 2208 2303 pThis->fCableConnected = false; 2209 pThis->virtioNetConfig.uStatus &= ~VIRTIONET_F_LINK_UP;2304 SET_LINK_DOWN(pThis); 2210 2305 virtioCoreNotifyConfigChanged(&pThis->Virtio); 2211 2306 } … … 2216 2311 } 2217 2312 2313 static int virtioNetR3DestroyWorkerThreads(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC) 2314 { 2315 LogFunc(("\n")); 2316 int rc = VINF_SUCCESS; 2317 for (unsigned idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++) 2318 { 2319 PVIRTIONETWORKER pWorker = &pThis->aWorkers[idxQueue]; 2320 if (pWorker->hEvtProcess != NIL_SUPSEMEVENT) 2321 { 2322 PDMDevHlpSUPSemEventClose(pDevIns, pWorker->hEvtProcess); 2323 pWorker->hEvtProcess = NIL_SUPSEMEVENT; 2324 } 2325 if (pThisCC->aWorkers[idxQueue].pThread) 2326 { 2327 int rcThread; 2328 rc = PDMDevHlpThreadDestroy(pDevIns, pThisCC->aWorkers[idxQueue].pThread, &rcThread); 2329 if (RT_FAILURE(rc) || RT_FAILURE(rcThread)) 2330 AssertMsgFailed(("%s Failed to destroythread rc=%Rrc rcThread=%Rrc\n", __FUNCTION__, rc, rcThread)); 2331 pThisCC->aWorkers[idxQueue].pThread = NULL; 2332 } 2333 } 2334 return rc; 2335 } 2336 2337 static int virtioNetR3CreateWorkerThreads(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC) 2338 { 2339 LogFunc(("\n")); 2340 2341 int rc = VINF_SUCCESS; 2342 /* Attach the queues and create worker threads for them: */ 2343 for (uint16_t idxQueue = 1; idxQueue < pThis->cVirtQueues; idxQueue++) 2344 { 2345 /* Skip creating threads for receive queues, only create for transmit queues & control queue */ 2346 if (IS_RX_QUEUE(idxQueue)) 2347 continue; 2348 2349 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->aWorkers[idxQueue].hEvtProcess); 2350 2351 if (RT_FAILURE(rc)) 2352 return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, 2353 N_("DevVirtioNET: Failed to create SUP event semaphore")); 2354 2355 rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->aWorkers[idxQueue].pThread, 2356 (void *)(uintptr_t)idxQueue, virtioNetR3WorkerThread, 2357 virtioNetR3WakeupWorker, 0, RTTHREADTYPE_IO, VIRTQNAME(idxQueue)); 2358 if (rc != VINF_SUCCESS) 2359 { 2360 LogRel(("Error creating thread for Virtual Queue %s: %Rrc\n", VIRTQNAME(idxQueue), rc)); 2361 return rc; 2362 } 2363 2364 pThis->afQueueAttached[idxQueue] = true; 2365 } 2366 return rc; 2367 } 2218 2368 2219 2369 /** … … 2225 2375 PVIRTIONETCC pThisCC = RT_FROM_MEMBER(pVirtioCC, VIRTIONETCC, Virtio); 2226 2376 2227 LogFunc((" "));2377 LogFunc(("\n")); 2228 2378 2229 2379 pThis->fVirtioReady = fVirtioReady; … … 2232 2382 { 2233 2383 LogFunc(("VirtIO ready\n-----------------------------------------------------------------------------------------\n")); 2234 // uint64_t fFeatures = virtioCoreGetNegotiatedFeatures(pThis->Virtio); 2384 2235 2385 pThis->fResetting = false; 2236 2386 pThisCC->fQuiescing = false; 2237 2238 for (unsigned i = 0; i < VIRTIONET_MAX_QUEUES; i++) 2239 pThis->afQueueAttached[i] = true; 2387 pThis->fNegotiatedFeatures = virtioCoreGetAcceptedFeatures(pVirtio); 2388 for (unsigned idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++) 2389 { 2390 (void) virtioCoreR3QueueAttach(&pThis->Virtio, idxQueue, VIRTQNAME(idxQueue)); 2391 pThis->afQueueAttached[idxQueue] = true; 2392 virtioCoreQueueSetNotify(&pThis->Virtio, idxQueue, true); 2393 } 2240 2394 } 2241 2395 else … … 2262 2416 pThisCC->pDrv->pfnSetPromiscuousMode(pThisCC->pDrv, true); 2263 2417 2264 for (unsigned i = 0; i < VIRTIONET_MAX_QUEUES; i++)2265 pThis->afQueueAttached[i ] = false;2418 for (unsigned idxQueue = 0; idxQueue < pThis->cVirtQueues; idxQueue++) 2419 pThis->afQueueAttached[idxQueue] = false; 2266 2420 } 2267 2421 } … … 2281 2435 PVIRTIONETCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC); 2282 2436 2283 LogFunc((" "));2437 LogFunc(("\n")); 2284 2438 AssertLogRelReturnVoid(iLUN == 0); 2285 2439 … … 2307 2461 2308 2462 RT_NOREF(fFlags); 2463 2309 2464 LogFunc(("%s", INSTANCE(pThis))); 2310 2465 … … 2369 2524 PVIRTIONETCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC); 2370 2525 2371 for (unsigned qIdx = 0; qIdx < pThis->cVirtQueues; qIdx++) 2372 { 2373 PVIRTIONETWORKER pWorker = &pThis->aWorkers[qIdx]; 2374 if (pWorker->hEvtProcess != NIL_SUPSEMEVENT) 2375 { 2376 PDMDevHlpSUPSemEventClose(pDevIns, pWorker->hEvtProcess); 2377 pWorker->hEvtProcess = NIL_SUPSEMEVENT; 2378 } 2379 if (pThisCC->aWorkers[qIdx].pThread) 2380 { 2381 /* Destroy the thread. */ 2382 int rcThread; 2383 int rc = PDMDevHlpThreadDestroy(pDevIns, pThisCC->aWorkers[qIdx].pThread, &rcThread); 2384 if (RT_FAILURE(rc) || RT_FAILURE(rcThread)) 2385 AssertMsgFailed(("%s Failed to destroythread rc=%Rrc rcThread=%Rrc\n", __FUNCTION__, rc, rcThread)); 2386 pThisCC->aWorkers[qIdx].pThread = NULL; 2387 } 2388 } 2526 Log(("%s Destroying instance\n", INSTANCE(pThis))); 2527 2528 if (pThis->hEventRxDescAvail != NIL_SUPSEMEVENT) 2529 { 2530 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventRxDescAvail); 2531 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEventRxDescAvail); 2532 pThis->hEventRxDescAvail = NIL_SUPSEMEVENT; 2533 } 2534 2535 virtioNetR3DestroyWorkerThreads(pDevIns, pThis, pThisCC); 2389 2536 2390 2537 virtioCoreR3Term(pDevIns, &pThis->Virtio, &pThisCC->Virtio); 2538 2391 2539 return VINF_SUCCESS; 2392 2540 } … … 2483 2631 VirtioPciParams.uInterruptPin = 0x01; 2484 2632 2633 /* 2634 * Initialize VirtIO core. This will result in a "status changed" callback 2635 * when VirtIO is ready, at which time the Rx queue and ctrl queue worker threads will be created. 2636 */ 2485 2637 rc = virtioCoreR3Init(pDevIns, &pThis->Virtio, &pThisCC->Virtio, &VirtioPciParams, INSTANCE(pThis), 2486 2638 VIRTIONET_HOST_FEATURES_OFFERED, … … 2495 2647 pThis->cVirtqPairs = pThis->fNegotiatedFeatures & VIRTIONET_F_MQ 2496 2648 ? pThis->virtioNetConfig.uMaxVirtqPairs : 1; 2497 pThis->cVirtQueues += pThis->cVirtqPairs * 2; 2649 2650 pThis->cVirtQueues += pThis->cVirtqPairs * 2 + 1; 2498 2651 2499 2652 /* Create Link Up Timer */ … … 2506 2659 virtioNetR3SetVirtqNames(pThis); 2507 2660 2508 /* Attach the queues and create worker threads for them: */2509 for (uint16_t qIdx = 0; qIdx < pThis->cVirtQueues + 1; qIdx++)2510 {2511 2512 rc = virtioCoreR3QueueAttach(&pThis->Virtio, qIdx, VIRTQNAME(qIdx));2513 if (RT_FAILURE(rc))2514 {2515 pThis->afQueueAttached[qIdx] = true;2516 continue;2517 }2518 2519 /* Skip creating threads for receive queues, only create for transmit queues & control queue */2520 if (IS_RX_QUEUE(qIdx))2521 continue;2522 2523 rc = PDMDevHlpThreadCreate(pDevIns, &pThisCC->aWorkers[qIdx].pThread,2524 (void *)(uintptr_t)qIdx, virtioNetR3WorkerThread,2525 virtioNetR3WakeupWorker, 0, RTTHREADTYPE_IO, VIRTQNAME(qIdx));2526 if (rc != VINF_SUCCESS)2527 {2528 LogRel(("Error creating thread for Virtual Queue %s: %Rrc\n", VIRTQNAME(qIdx), rc));2529 return rc;2530 }2531 2532 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->aWorkers[qIdx].hEvtProcess);2533 if (RT_FAILURE(rc))2534 return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS,2535 N_("DevVirtioNET: Failed to create SUP event semaphore"));2536 pThis->afQueueAttached[qIdx] = true;2537 }2538 2539 2661 /* 2540 * Status driver (optional). 2662 * Create queue workers for life of instance. (I.e. they persist through VirtIO bounces) 2663 */ 2664 rc = virtioNetR3CreateWorkerThreads(pDevIns, pThis, pThisCC); 2665 if (RT_FAILURE(rc)) 2666 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to worker threads")); 2667 2668 /* 2669 * Create the semaphore that will be used to synchronize/throttle 2670 * the downstream LUN's Rx waiter thread. 2671 */ 2672 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEventRxDescAvail); 2673 if (RT_FAILURE(rc)) 2674 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to create event semaphore")); 2675 2676 /* 2677 * Attach network driver instance 2678 */ 2679 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port"); 2680 if (RT_SUCCESS(rc)) 2681 { 2682 pThisCC->pDrv = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP); 2683 AssertMsgStmt(pThisCC->pDrv, ("Failed to obtain the PDMINETWORKUP interface!\n"), 2684 rc = VERR_PDM_MISSING_INTERFACE_BELOW); 2685 } 2686 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER 2687 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME) 2688 Log(("%s No attached driver!\n", INSTANCE(pThis))); 2689 2690 /* 2691 * Status driver 2541 2692 */ 2542 2693 PPDMIBASE pUpBase; … … 2544 2695 if (RT_FAILURE(rc) && rc != VERR_PDM_NO_ATTACHED_DRIVER) 2545 2696 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN")); 2697 2546 2698 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pUpBase, PDMILEDCONNECTORS); 2547 2699
Note:
See TracChangeset
for help on using the changeset viewer.