Changeset 91703 in vbox for trunk/src/VBox/Devices
- Timestamp:
- Oct 13, 2021 2:24:30 AM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 147437
- Location:
- trunk/src/VBox/Devices
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Network/DevVirtioNet_1_0.cpp
r90931 r91703 119 119 */ 120 120 121 #define FEATURE_ENABLED(feature) RT_BOOL( pThis->fNegotiatedFeatures & VIRTIONET_F_##feature)121 #define FEATURE_ENABLED(feature) RT_BOOL(!!(pThis->fNegotiatedFeatures & VIRTIONET_F_##feature)) 122 122 #define FEATURE_DISABLED(feature) (!FEATURE_ENABLED(feature)) 123 123 #define FEATURE_OFFERED(feature) VIRTIONET_HOST_FEATURES_OFFERED & VIRTIONET_F_##feature … … 165 165 (virtioCoreVirtqAvailBufCount(pDevIns, pVirtio, uVirtqNbr) == 0) 166 166 167 #define PCI_DEVICE_ID_VIRTIONET_HOST 0x1041 /**< Informs guest driver of type of VirtIO device */ 168 #define PCI_CLASS_BASE_NETWORK_CONTROLLER 0x02 /**< PCI Network device class */ 167 168 #define PCI_DEVICE_ID_VIRTIONET_HOST 0x1000 /**< VirtIO transitional device ID for network card */ 169 #define PCI_CLASS_BASE_NETWORK_CONTROLLER 0x0200 /**< PCI Network device class */ 169 170 #define PCI_CLASS_SUB_NET_ETHERNET_CONTROLLER 0x00 /**< PCI NET Controller subclass */ 170 171 #define PCI_CLASS_PROG_UNSPECIFIED 0x00 /**< Programming interface. N/A. */ 171 172 #define VIRTIONET_PCI_CLASS 0x01 /**< Base class Mass Storage? */ 172 173 174 /*********************************************************************************************************************************175 * Structures and Typedefs *176 *********************************************************************************************************************************/177 173 178 174 /** … … 373 369 374 370 /** Number of virtqueues total (which includes each queue of each pair plus one control queue */ 375 uint16_t cVirt Virtqs;371 uint16_t cVirtqs; 376 372 377 373 /** Number of worker threads (one for the control queue and one for each Tx queue) */ … … 444 440 bool fCableConnected; 445 441 442 /** True if guest has not reported modern virtio driver */ 443 bool fIsLegacy; 446 444 /** @name Statistic 447 445 * @{ */ … … 553 551 static int virtioNetR3CreateWorkerThreads(PPDMDEVINS, PVIRTIONET, PVIRTIONETCC); 554 552 553 typedef enum VIRTIONETPKTHDRTYPE 554 { 555 kVirtioNetModernPktHdr_1_0 = 0, 556 kVirtioNetLegacyPktHdr = 1, 557 kVirtioNetLegacyPktHdrWithoutMrgRx = 2, 558 kVirtioNetFor32BitHack = 0x7fffffff 559 } VIRTIONETPKTHDRTYPE; 560 561 DECLINLINE(int) virtioNetPktHdrType(PVIRTIOCORE pVirtio, PVIRTIONET pThis) 562 { 563 if (!virtioCoreIsLegacyMode(pVirtio)) 564 return kVirtioNetModernPktHdr_1_0; 565 else /* legacy mode */ 566 if (FEATURE_ENABLED(MRG_RXBUF)) 567 return kVirtioNetLegacyPktHdrWithoutMrgRx; 568 return kVirtioNetLegacyPktHdr; 569 } 570 571 DECLINLINE(size_t) virtioNetCalcPktHdrSize(PVIRTIOCORE pVirtio, PVIRTIONET pThis) 572 { 573 size_t cbHdr = sizeof(VIRTIONETPKTHDR); 574 if (virtioCoreIsLegacyMode(pVirtio) & !FEATURE_ENABLED(MRG_RXBUF)) 575 cbHdr -= RT_SIZEOFMEMB(VIRTIONETPKTHDR, uNumBuffers); 576 return cbHdr; 577 } 578 555 579 DECLINLINE(const char *) virtioNetThreadStateName(PPDMTHREAD pThread) 556 580 { … … 594 618 if (pThis->hEventRxDescAvail != NIL_SUPSEMEVENT) 595 619 { 596 Log10Func((" %sWaking downstream device's Rx buf waiter thread\n", pThis->szInst));620 Log10Func(("[%s] Waking downstream device's Rx buf waiter thread\n", pThis->szInst)); 597 621 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventRxDescAvail); 598 622 AssertRC(rc); … … 600 624 } 601 625 602 603 604 626 /** 605 627 * @callback_method_impl{VIRTIOCORER0,pfnVirtqNotified} … … 614 636 615 637 #if defined (IN_RING3) && defined (LOG_ENABLED) 616 617 RTLogFlush(NULL); 618 638 RTLogFlush(NULL); 619 639 #endif 620 621 640 if (IS_RX_VIRTQ(uVirtqNbr)) 622 641 { … … 640 659 if (ASMAtomicReadBool(&pWorker->fSleeping)) 641 660 { 642 Log10Func((" %s%s has available buffers - waking worker.\n", pThis->szInst, pVirtq->szName));661 Log10Func(("[%s] %s has available buffers - waking worker.\n", pThis->szInst, pVirtq->szName)); 643 662 644 663 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pWorker->hEvtProcess); … … 647 666 else 648 667 { 649 Log10Func((" %s%s has available buffers - worker already awake\n", pThis->szInst, pVirtq->szName));668 Log10Func(("[%s] %s has available buffers - worker already awake\n", pThis->szInst, pVirtq->szName)); 650 669 } 651 670 } 652 671 else 653 672 { 654 Log10Func((" %s%s has available buffers - waking worker.\n", pThis->szInst, pVirtq->szName));673 Log10Func(("[%s] %s has available buffers - waking worker.\n", pThis->szInst, pVirtq->szName)); 655 674 } 656 675 } 657 676 else 658 LogRelFunc((" %sunrecognized queue %s (idx=%d) notified\n", pThis->szInst, pVirtq->szName, uVirtqNbr));677 LogRelFunc(("[%s] unrecognized queue %s (idx=%d) notified\n", pThis->szInst, pVirtq->szName, uVirtqNbr)); 659 678 } 660 679 … … 669 688 PVIRTIONETWORKER pWorker = (PVIRTIONETWORKER)pThread->pvUser; 670 689 671 Log10Func((" %s\n", pThis->szInst));690 Log10Func(("[%s]\n", pThis->szInst)); 672 691 RT_NOREF(pThis); 673 674 692 return PDMDevHlpSUPSemEventSignal(pDevIns, pWorker->hEvtProcess); 675 693 } … … 770 788 { 771 789 LogFunc(("-------------------------------------------------------------------\n")); 772 LogFunc(("rxPktHdr\n" 773 " uFlags ......... %2.2x\n" 774 " uGsoType ....... %2.2x\n" 775 " uHdrLen ........ %4.4x\n" 776 " uGsoSize ....... %4.4x\n" 777 " uChksumStart ... %4.4x\n" 778 " uChksumOffset .. %4.4x\n" 779 " uNumBuffers .... %4.4x\n", 780 pRxPktHdr->uFlags, 781 pRxPktHdr->uGsoType, pRxPktHdr->uHdrLen, pRxPktHdr->uGsoSize, 782 pRxPktHdr->uChksumStart, pRxPktHdr->uChksumOffset, pRxPktHdr->uNumBuffers)); 783 790 if (!virtioCoreIsLegacyMode(&pThis->Virtio) || FEATURE_ENABLED(MRG_RXBUF)) 791 LogFunc(("rxPktHdr\n" 792 " uFlags ......... %2.2x\n uGsoType ....... %2.2x\n uHdrLen ........ %4.4x\n" 793 " uGsoSize ....... %4.4x\n uChksumStart ... %4.4x\n uChksumOffset .. %4.4x\n" 794 " uNumBuffers .... %4.4x\n", 795 pRxPktHdr->uFlags, pRxPktHdr->uGsoType, pRxPktHdr->uHdrLen, pRxPktHdr->uGsoSize, 796 pRxPktHdr->uChksumStart, pRxPktHdr->uChksumOffset, pRxPktHdr->uNumBuffers)); 797 else 798 LogFunc(("rxPktHdr\n" 799 " uFlags ......... %2.2x\n uGsoType ....... %2.2x\n uHdrLen ........ %4.4x\n" 800 " uGsoSize ....... %4.4x\n uChksumStart ... %4.4x\n uChksumOffset .. %4.4x\n", 801 pRxPktHdr->uFlags, pRxPktHdr->uGsoType, pRxPktHdr->uHdrLen, pRxPktHdr->uGsoSize, 802 pRxPktHdr->uChksumStart, pRxPktHdr->uChksumOffset)); 784 803 virtioCoreHexDump((uint8_t *)pRxPktHdr, sizeof(VIRTIONETPKTHDR), 0, "Dump of virtual rPktHdr"); 785 804 } … … 833 852 { 834 853 pHlp->pfnPrintf(pHlp, "Virtq information:\n\n"); 835 836 for (int uVirtqNbr = 0; uVirtqNbr < pThis->cVirtVirtqs; uVirtqNbr++) 854 for (int uVirtqNbr = 0; uVirtqNbr < pThis->cVirtqs; uVirtqNbr++) 837 855 { 838 856 PVIRTIONETVIRTQ pVirtq = &pThis->aVirtqs[uVirtqNbr]; … … 879 897 if (fAll || fPointers) 880 898 { 881 882 899 pHlp->pfnPrintf(pHlp, "Internal Pointers:\n\n"); 883 884 900 pHlp->pfnPrintf(pHlp, " pDevIns ................... %p\n", pDevIns); 885 901 pHlp->pfnPrintf(pHlp, " PVIRTIONET ................ %p\n", pThis); … … 889 905 pHlp->pfnPrintf(pHlp, " pDrv ...................... %p\n", pThisCC->pDrv); 890 906 pHlp->pfnPrintf(pHlp, "\n"); 891 892 907 } 893 908 … … 908 923 pHlp->pfnPrintf(pHlp, " uDeviceStatus ............. 0x%x\n", pThis->Virtio.fDeviceStatus); 909 924 pHlp->pfnPrintf(pHlp, " cVirtqPairs .,............. %d\n", pThis->cVirtqPairs); 910 pHlp->pfnPrintf(pHlp, " cVirt Virtqs .,............. %d\n", pThis->cVirtVirtqs);925 pHlp->pfnPrintf(pHlp, " cVirtqs .,................. %d\n", pThis->cVirtqs); 911 926 pHlp->pfnPrintf(pHlp, " cWorkers .................. %d\n", pThis->cWorkers); 912 927 pHlp->pfnPrintf(pHlp, " MMIO mapping name ......... %d\n", pThisCC->Virtio.pcszMmioName); … … 918 933 { 919 934 pHlp->pfnPrintf(pHlp, "Network configuration:\n\n"); 920 921 935 pHlp->pfnPrintf(pHlp, " MAC: ...................... %RTmac\n", &pThis->macConfigured); 922 936 pHlp->pfnPrintf(pHlp, "\n"); … … 952 966 pHlp->pfnPrintf(pHlp, " Leaf starved: ............. %s\n", pThis->fLeafWantsEmptyRxBufs ? "true" : "false"); 953 967 pHlp->pfnPrintf(pHlp, "\n"); 954 955 968 } 956 969 /** @todo implement this … … 1001 1014 } 1002 1015 1003 static int virtioNetR3 CfgAccessed(PVIRTIONET pThis, uint32_t uOffsetOfAccess, void *pv, uint32_t cb, bool fWrite)1016 static int virtioNetR3DevCfgAccess(PVIRTIONET pThis, uint32_t uOffsetOfAccess, void *pv, uint32_t cb, bool fWrite) 1004 1017 { 1005 1018 AssertReturn(pv && cb <= sizeof(uint32_t), fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00); … … 1033 1046 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 1034 1047 1035 LogFunc((" %s uOffset: %d, cb: %d\n", pThis->szInst, uOffset, cb));1036 1048 RT_NOREF(pThis); 1037 return virtioNetR3 CfgAccessed(PDMDEVINS_2_DATA(pDevIns, PVIRTIONET), uOffset, pv, cb, false /*fRead*/);1049 return virtioNetR3DevCfgAccess(PDMDEVINS_2_DATA(pDevIns, PVIRTIONET), uOffset, pv, cb, false /*fRead*/); 1038 1050 } 1039 1051 … … 1045 1057 PVIRTIONET pThis = PDMDEVINS_2_DATA(pDevIns, PVIRTIONET); 1046 1058 1047 Log10Func((" %suOffset: %d, cb: %d: %.*Rhxs\n", pThis->szInst, uOffset, cb, RT_MAX(cb, 8) , pv));1059 Log10Func(("[%s] uOffset: %d, cb: %d: %.*Rhxs\n", pThis->szInst, uOffset, cb, RT_MAX(cb, 8) , pv)); 1048 1060 RT_NOREF(pThis); 1049 return virtioNetR3 CfgAccessed(PDMDEVINS_2_DATA(pDevIns, PVIRTIONET), uOffset, (void *)pv, cb, true /*fWrite*/);1061 return virtioNetR3DevCfgAccess(PDMDEVINS_2_DATA(pDevIns, PVIRTIONET), uOffset, (void *)pv, cb, true /*fWrite*/); 1050 1062 } 1051 1063 … … 1065 1077 1066 1078 RT_NOREF(pThisCC); 1067 Log7Func((" %sLOAD EXEC!!\n", pThis->szInst));1079 Log7Func(("[%s] LOAD EXEC!!\n", pThis->szInst)); 1068 1080 1069 1081 AssertReturn(uPass == SSM_PASS_FINAL, VERR_SSM_UNEXPECTED_PASS); … … 1075 1087 pHlp->pfnSSMGetU64( pSSM, &pThis->fNegotiatedFeatures); 1076 1088 1077 pHlp->pfnSSMGetU16( pSSM, &pThis->cVirt Virtqs);1078 AssertReturn(pThis->cVirt Virtqs <= (VIRTIONET_MAX_QPAIRS * 2), VERR_OUT_OF_RANGE);1089 pHlp->pfnSSMGetU16( pSSM, &pThis->cVirtqs); 1090 AssertReturn(pThis->cVirtqs <= (VIRTIONET_MAX_QPAIRS * 2), VERR_OUT_OF_RANGE); 1079 1091 1080 1092 pHlp->pfnSSMGetU16( pSSM, &pThis->cWorkers); … … 1082 1094 1083 1095 1084 for (int uVirtqNbr = 0; uVirtqNbr < pThis->cVirt Virtqs; uVirtqNbr++)1096 for (int uVirtqNbr = 0; uVirtqNbr < pThis->cVirtqs; uVirtqNbr++) 1085 1097 pHlp->pfnSSMGetBool(pSSM, &pThis->aVirtqs[uVirtqNbr].fAttachedToVirtioCore); 1086 1098 … … 1148 1160 if (pVirtq->fAttachedToVirtioCore) 1149 1161 { 1150 Log7Func((" %sWaking %s worker.\n", pThis->szInst, pVirtq->szName));1162 Log7Func(("[%s] Waking %s worker.\n", pThis->szInst, pVirtq->szName)); 1151 1163 rc = PDMDevHlpSUPSemEventSignal(pDevIns, pWorker->hEvtProcess); 1152 1164 AssertRCReturn(rc, rc); … … 1166 1178 1167 1179 RT_NOREF(pThisCC); 1168 Log7Func((" %sSAVE EXEC!!\n", pThis->szInst));1180 Log7Func(("[%s] SAVE EXEC!!\n", pThis->szInst)); 1169 1181 1170 1182 pHlp->pfnSSMPutU64( pSSM, pThis->fNegotiatedFeatures); 1171 1183 1172 pHlp->pfnSSMPutU16( pSSM, pThis->cVirt Virtqs);1184 pHlp->pfnSSMPutU16( pSSM, pThis->cVirtqs); 1173 1185 pHlp->pfnSSMPutU16( pSSM, pThis->cWorkers); 1174 1186 1175 for (int uVirtqNbr = 0; uVirtqNbr < pThis->cVirt Virtqs; uVirtqNbr++)1187 for (int uVirtqNbr = 0; uVirtqNbr < pThis->cVirtqs; uVirtqNbr++) 1176 1188 pHlp->pfnSSMPutBool(pSSM, pThis->aVirtqs[uVirtqNbr].fAttachedToVirtioCore); 1177 1189 … … 1249 1261 void virtioNetR3SetReadLed(PVIRTIONETR3 pThisR3, bool fOn) 1250 1262 { 1251 Log10Func(("%s\n", fOn ? "on" : "off"));1252 1263 if (fOn) 1253 1264 pThisR3->led.Asserted.s.fReading = pThisR3->led.Actual.s.fReading = 1; … … 1301 1312 1302 1313 if (!virtioNetIsOperational(pThis, pDevIns)) 1303 Log8Func((" %sNo Rx bufs available. (VirtIO core not ready)\n", pThis->szInst));1314 Log8Func(("[%s] No Rx bufs available. (VirtIO core not ready)\n", pThis->szInst)); 1304 1315 1305 1316 else if (!virtioCoreIsVirtqEnabled(&pThis->Virtio, pRxVirtq->uIdx)) 1306 Log8Func((" %sNo Rx bufs available. (%s not enabled)\n", pThis->szInst, pRxVirtq->szName));1317 Log8Func(("[%s] No Rx bufs available. (%s not enabled)\n", pThis->szInst, pRxVirtq->szName)); 1307 1318 1308 1319 else if (IS_VIRTQ_EMPTY(pDevIns, &pThis->Virtio, pRxVirtq->uIdx)) 1309 Log8Func((" %sNo Rx bufs available. (%s empty)\n", pThis->szInst, pRxVirtq->szName));1320 Log8Func(("[%s] No Rx bufs available. (%s empty)\n", pThis->szInst, pRxVirtq->szName)); 1310 1321 1311 1322 else 1312 1323 { 1313 Log8Func((" %s Empty guest buffers available in %s\n", pThis->szInst,pRxVirtq->szName));1324 Log8Func(("[%s] %s has empty guest bufs avail\n", pThis->szInst, pRxVirtq->szName)); 1314 1325 rc = VINF_SUCCESS; 1315 1326 } … … 1347 1358 if (virtioNetR3RxBufsAvail(pDevIns, pThis, NULL /* pRxVirtq */)) 1348 1359 { 1349 Log10Func((" %sRx bufs now available, releasing waiter...\n", pThis->szInst));1360 Log10Func(("[%s] Rx bufs now available, releasing waiter...\n", pThis->szInst)); 1350 1361 return VINF_SUCCESS; 1351 1362 } … … 1353 1364 return VERR_NET_NO_BUFFER_SPACE; 1354 1365 1355 LogFunc((" %s%s\n", pThis->szInst, timeoutMs == RT_INDEFINITE_WAIT ? "<indefinite wait>" : ""));1366 LogFunc(("[%s] %s\n", pThis->szInst, timeoutMs == RT_INDEFINITE_WAIT ? "<indefinite wait>" : "")); 1356 1367 1357 1368 … … 1362 1373 if (virtioNetR3RxBufsAvail(pDevIns, pThis, NULL /* pRxVirtq */)) 1363 1374 { 1364 Log10Func((" %sRx bufs now available, releasing waiter...\n", pThis->szInst));1375 Log10Func(("[%s] Rx bufs now available, releasing waiter...\n", pThis->szInst)); 1365 1376 ASMAtomicXchgBool(&pThis->fLeafWantsEmptyRxBufs, false); 1366 1377 return VINF_SUCCESS; 1367 1378 } 1368 Log9Func((" %sStarved for empty guest Rx bufs. Waiting...\n", pThis->szInst));1379 Log9Func(("[%s] Starved for empty guest Rx bufs. Waiting...\n", pThis->szInst)); 1369 1380 1370 1381 int rc = PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEventRxDescAvail, timeoutMs); … … 1388 1399 ASMAtomicXchgBool(&pThis->fLeafWantsEmptyRxBufs, false); 1389 1400 1390 Log7Func((" %sWait for Rx buffers available was interrupted\n", pThis->szInst));1401 Log7Func(("[%s] Wait for Rx buffers available was interrupted\n", pThis->szInst)); 1391 1402 return VERR_INTERRUPTED; 1392 1403 } … … 1487 1498 char *pszType; 1488 1499 if (virtioNetR3IsMulticast(pvBuf)) 1489 pszType = (char *)" Multicast";1500 pszType = (char *)"mcast"; 1490 1501 else if (virtioNetR3IsBroadcast(pvBuf)) 1491 pszType = (char *)" Broadcast";1502 pszType = (char *)"bcast"; 1492 1503 else 1493 pszType = (char *)" Unicast";1494 1495 LogFunc((" %s node(%RTmac %s%s), pkt(%RTmac %s)\n",1496 pThis-> szInst, pThis->virtioNetConfig.uMacAddress.au8,1497 pThis->fPromiscuous ? " promiscuous" : "",1498 pThis->fAllMulticast ? " all-m ulticast" : "",1504 pszType = (char *)"ucast"; 1505 1506 LogFunc(("node(%RTmac%s%s), pkt(%RTmac, %s) ", 1507 pThis->virtioNetConfig.uMacAddress.au8, 1508 pThis->fPromiscuous ? " promisc" : "", 1509 pThis->fAllMulticast ? " all-mcast" : "", 1499 1510 pvBuf, pszType)); 1500 1511 } 1501 1512 1502 if (pThis->fPromiscuous) 1513 if (pThis->fPromiscuous) { 1514 Log11(("\n")); 1503 1515 return true; 1516 } 1504 1517 1505 1518 /* Ignore everything outside of our VLANs */ … … 1510 1523 && !ASMBitTest(pThis->aVlanFilter, RT_BE2H_U16(uPtr[7]) & 0xFFF)) 1511 1524 { 1512 Log11Func(("\n %snot our VLAN, returning false\n", pThis->szInst));1525 Log11Func(("\n[%s] not our VLAN, returning false\n", pThis->szInst)); 1513 1526 return false; 1514 1527 } … … 1516 1529 if (virtioNetR3IsBroadcast(pvBuf)) 1517 1530 { 1518 Log11((" ... accept (broadcast)\n"));1531 Log11(("acpt (bcast)\n")); 1519 1532 if (LogIs12Enabled()) 1520 1533 virtioNetR3PacketDump(pThis, (const uint8_t *)pvBuf, cb, "<-- Incoming"); … … 1523 1536 if (pThis->fAllMulticast && virtioNetR3IsMulticast(pvBuf)) 1524 1537 { 1525 Log11((" ... accept (all-multicast mode)\n"));1538 Log11(("acpt (all-mcast)\n")); 1526 1539 if (LogIs12Enabled()) 1527 1540 virtioNetR3PacketDump(pThis, (const uint8_t *)pvBuf, cb, "<-- Incoming"); … … 1531 1544 if (!memcmp(pThis->virtioNetConfig.uMacAddress.au8, pvBuf, sizeof(RTMAC))) 1532 1545 { 1533 Log11((" . . . accept (direct to thisnode)\n"));1546 Log11(("acpt (to-node)\n")); 1534 1547 if (LogIs12Enabled()) 1535 1548 virtioNetR3PacketDump(pThis, (const uint8_t *)pvBuf, cb, "<-- Incoming"); … … 1541 1554 if (!memcmp(&pThis->aMacMulticastFilter[i], pvBuf, sizeof(RTMAC))) 1542 1555 { 1543 Log11((" ... accept (in multicast array)\n"));1556 Log11(("acpt (mcast whitelist)\n")); 1544 1557 if (LogIs12Enabled()) 1545 1558 virtioNetR3PacketDump(pThis, (const uint8_t *)pvBuf, cb, "<-- Incoming"); … … 1551 1564 if (!memcmp(&pThis->aMacUnicastFilter[i], pvBuf, sizeof(RTMAC))) 1552 1565 { 1553 Log11((" ... accept (in unicast array)\n"));1566 Log11(("acpt (ucast whitelist)\n")); 1554 1567 return true; 1555 1568 } 1556 1569 1557 if (LogIs1 2Enabled())1570 if (LogIs11Enabled()) 1558 1571 Log(("... reject\n")); 1559 1572 … … 1570 1583 uint16_t cVirtqBufs = 0; 1571 1584 uint64_t uOffset = 0; 1585 1586 int uPktHdrType = virtioNetPktHdrType(&pThis->Virtio, pThis); 1572 1587 1573 1588 while (uOffset < cb) … … 1584 1599 VERR_INTERNAL_ERROR); 1585 1600 1586 /* Length of first seg of guest Rx S/G buf should never be less than s izeof(virtio_net_pkt_hdr).1601 /* Length of first seg of guest Rx S/G buf should never be less than sthe packet header. 1587 1602 * Otherwise code has to become more complicated, e.g. locate & cache seg idx & offset of 1588 * virtio_net_header.num_buffers, to facilitate deferring updating GCPhys memory. Re-visit if needed */ 1589 1590 AssertMsgReturnStmt(pVirtqBuf->pSgPhysReturn->paSegs[0].cbSeg >= sizeof(VIRTIONETPKTHDR), 1591 ("Desc chain's first seg has insufficient space for pkt header!\n"), 1592 virtioCoreR3VirtqBufRelease(&pThis->Virtio, pVirtqBuf), 1593 VERR_INTERNAL_ERROR); 1603 * virtio_net_header.num_buffers, to facilitate deferring updating GCPhys memory. 1604 * Re-visit if needed */ 1605 1606 size_t cbPktHdr = virtioNetCalcPktHdrSize(&pThis->Virtio, pThis); 1607 1608 AssertMsgReturn(pVirtqBuf->pSgPhysReturn->paSegs[0].cbSeg >= cbPktHdr, 1609 ("Out of Memory"), VERR_NO_MEMORY); 1594 1610 1595 1611 size_t cbBufRemaining = pVirtqBuf->cbPhysReturn; 1596 uint8_t cbHdr = sizeof(VIRTIONETPKTHDR); 1612 1597 1613 1598 1614 /* Fill the Guest Rx buffer with data received from the interface */ … … 1602 1618 { 1603 1619 /* Lead with packet header */ 1604 paVirtSegsToGuest[0].cbSeg = cb Hdr;1605 paVirtSegsToGuest[0].pvSeg = RTMemAlloc(cb Hdr);1620 paVirtSegsToGuest[0].cbSeg = cbPktHdr; 1621 paVirtSegsToGuest[0].pvSeg = RTMemAlloc(cbPktHdr); 1606 1622 AssertReturn(paVirtSegsToGuest[0].pvSeg, VERR_NO_MEMORY); 1607 cbBufRemaining -= cbHdr; 1608 1609 memcpy(paVirtSegsToGuest[0].pvSeg, rxPktHdr, cbHdr); 1610 1611 /* Calculate & cache GCPhys addr of field to update after final value is known */ 1612 GCPhysPktHdrNumBuffers = pVirtqBuf->pSgPhysReturn->paSegs[0].GCPhys 1613 + RT_UOFFSETOF(VIRTIONETPKTHDR, uNumBuffers); 1623 cbBufRemaining -= cbPktHdr; 1624 1625 memcpy(paVirtSegsToGuest[0].pvSeg, rxPktHdr, cbPktHdr); 1626 1627 if (uPktHdrType != kVirtioNetLegacyPktHdrWithoutMrgRx) 1628 { 1629 /* Calculate & cache GCPhys addr of field to update after final value is known */ 1630 GCPhysPktHdrNumBuffers = pVirtqBuf->pSgPhysReturn->paSegs[0].GCPhys 1631 + RT_UOFFSETOF(VIRTIONETPKTHDR, uNumBuffers); 1632 } 1614 1633 fAddPktHdr = false; 1615 1634 cSegs++; … … 1648 1667 if (uOffset < cb) 1649 1668 { 1650 LogFunc((" %sPacket did not fit into RX queue (packet size=%u)!\n", pThis->szInst, cb));1669 LogFunc(("[%s] Packet did not fit into RX queue (packet size=%u)!\n", pThis->szInst, cb)); 1651 1670 return VERR_TOO_MUCH_DATA; 1652 1671 } 1653 1672 1654 /* Fix-up pkthdr (in guest phys. memory) with number buffers (descriptors) processed */ 1655 1656 int rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhysPktHdrNumBuffers, &cVirtqBufs, sizeof(cVirtqBufs)); 1657 AssertMsgRCReturn(rc, ("Failure updating descriptor count in pkt hdr in guest physical memory\n"), rc); 1673 1674 if (uPktHdrType != kVirtioNetLegacyPktHdrWithoutMrgRx) 1675 { 1676 /* Fix-up pkthdr (in guest phys. memory) with number buffers (descriptors) processed */ 1677 int rc = virtioCoreGCPhysWrite(&pThis->Virtio, pDevIns, GCPhysPktHdrNumBuffers, &cVirtqBufs, sizeof(cVirtqBufs)); 1678 AssertMsgRCReturn(rc, ("Failure updating descriptor count in pkt hdr in guest physical memory\n"), rc); 1679 } 1658 1680 1659 1681 virtioCoreVirtqUsedRingSync(pDevIns, &pThis->Virtio, pRxVirtq->uIdx); … … 1683 1705 RT_NOREF(pThisCC); 1684 1706 1685 LogFunc((" %s(%RTmac) pGso %s\n", pThis->szInst, pvBuf, pGso ? "present" : "not present"));1707 LogFunc(("[%s] (%RTmac) pGso %s\n", pThis->szInst, pvBuf, pGso ? "present" : "not present")); 1686 1708 VIRTIONETPKTHDR rxPktHdr; 1687 1709 1688 1710 if (pGso) 1689 1711 { 1690 Log2Func((" %sgso type=%x cbPktHdrsTotal=%u cbPktHdrsSeg=%u mss=%u off1=0x%x off2=0x%x\n",1712 Log2Func(("[%s] gso type=%x cbPktHdrsTotal=%u cbPktHdrsSeg=%u mss=%u off1=0x%x off2=0x%x\n", 1691 1713 pThis->szInst, pGso->u8Type, pGso->cbHdrsTotal, 1692 1714 pGso->cbHdrsSeg, pGso->cbMaxSeg, pGso->offHdr1, pGso->offHdr2)); … … 1783 1805 if (!uFeatures) 1784 1806 { 1785 LogFunc((" %sGSO type (0x%x) not supported\n", pThis->szInst, pGso->u8Type));1807 LogFunc(("[%s] GSO type (0x%x) not supported\n", pThis->szInst, pGso->u8Type)); 1786 1808 return VERR_NOT_SUPPORTED; 1787 1809 } 1788 1810 } 1789 1811 1790 Log10Func((" %spvBuf=%p cb=%3u pGso=%p ...\n", pThis->szInst, pvBuf, cb, pGso));1812 Log10Func(("[%s] pvBuf=%p cb=%3u pGso=%p ...\n", pThis->szInst, pvBuf, cb, pGso)); 1791 1813 1792 1814 /** @todo If we ever start using more than one Rx/Tx queue pair, is a random queue … … 1830 1852 { 1831 1853 1832 #define LOG_VIRTIONET_FLAG(fld) LogFunc((" %sSetting %s=%d\n", pThis->szInst, #fld, pThis->fld))1833 1834 LogFunc((" %sProcessing CTRL Rx command\n", pThis->szInst));1854 #define LOG_VIRTIONET_FLAG(fld) LogFunc(("[%s] Setting %s=%d\n", pThis->szInst, #fld, pThis->fld)) 1855 1856 LogFunc(("[%s] Processing CTRL Rx command\n", pThis->szInst)); 1835 1857 switch(pCtrlPktHdr->uCmd) 1836 1858 { … … 1893 1915 static uint8_t virtioNetR3CtrlMac(PVIRTIONET pThis, PVIRTIONET_CTRL_HDR_T pCtrlPktHdr, PVIRTQBUF pVirtqBuf) 1894 1916 { 1895 LogFunc(("%s Processing CTRL MAC command\n", pThis->szInst)); 1917 LogFunc(("[%s] Processing CTRL MAC command\n", pThis->szInst)); 1918 1896 1919 1897 1920 AssertMsgReturn(pVirtqBuf->cbPhysSend >= sizeof(*pCtrlPktHdr), … … 1925 1948 cbRemaining -= sizeof(cMacs); 1926 1949 1927 Log7Func((" %sGuest provided %d unicast MAC Table entries\n", pThis->szInst, cMacs));1950 Log7Func(("[%s] Guest provided %d unicast MAC Table entries\n", pThis->szInst, cMacs)); 1928 1951 1929 1952 if (cMacs) … … 1950 1973 cbRemaining -= sizeof(cMacs); 1951 1974 1952 Log10Func((" %sGuest provided %d multicast MAC Table entries\n", pThis->szInst, cMacs));1975 Log10Func(("[%s] Guest provided %d multicast MAC Table entries\n", pThis->szInst, cMacs)); 1953 1976 1954 1977 … … 1968 1991 1969 1992 #ifdef LOG_ENABLED 1970 LogFunc((" %sunicast MACs:\n", pThis->szInst));1993 LogFunc(("[%s] unicast MACs:\n", pThis->szInst)); 1971 1994 for(unsigned i = 0; i < cMacs; i++) 1972 1995 LogFunc((" %RTmac\n", &pThis->aMacUnicastFilter[i])); 1973 1996 1974 LogFunc((" %smulticast MACs:\n", pThis->szInst));1997 LogFunc(("[%s] multicast MACs:\n", pThis->szInst)); 1975 1998 for(unsigned i = 0; i < cMacs; i++) 1976 1999 LogFunc((" %RTmac\n", &pThis->aMacMulticastFilter[i])); … … 1987 2010 static uint8_t virtioNetR3CtrlMultiQueue(PVIRTIONET pThis, PVIRTIONETCC pThisCC, PPDMDEVINS pDevIns, PVIRTIONET_CTRL_HDR_T pCtrlPktHdr, PVIRTQBUF pVirtqBuf) 1988 2011 { 1989 LogFunc((" %sProcessing CTRL MQ command\n", pThis->szInst));2012 LogFunc(("[%s] Processing CTRL MQ command\n", pThis->szInst)); 1990 2013 1991 2014 uint16_t cVirtqPairs; … … 2003 2026 2004 2027 AssertMsgReturn(cVirtqPairs > VIRTIONET_MAX_QPAIRS, 2005 (" %sGuest CTRL MQ virtq pair count out of range)\n", pThis->szInst, cVirtqPairs), VIRTIONET_ERROR);2006 2007 LogFunc((" %sGuest specifies %d VQ pairs in use\n", pThis->szInst, cVirtqPairs));2028 ("[%s] Guest CTRL MQ virtq pair count out of range)\n", pThis->szInst, cVirtqPairs), VIRTIONET_ERROR); 2029 2030 LogFunc(("[%s] Guest specifies %d VQ pairs in use\n", pThis->szInst, cVirtqPairs)); 2008 2031 pThis->cVirtqPairs = cVirtqPairs; 2009 2032 break; … … 2035 2058 static uint8_t virtioNetR3CtrlVlan(PVIRTIONET pThis, PVIRTIONET_CTRL_HDR_T pCtrlPktHdr, PVIRTQBUF pVirtqBuf) 2036 2059 { 2037 LogFunc((" %sProcessing CTRL VLAN command\n", pThis->szInst));2060 LogFunc(("[%s] Processing CTRL VLAN command\n", pThis->szInst)); 2038 2061 2039 2062 uint16_t uVlanId; … … 2049 2072 ("%s VLAN ID out of range (VLAN ID=%u)\n", pThis->szInst, uVlanId), VIRTIONET_ERROR); 2050 2073 2051 LogFunc((" %suCommand=%u VLAN ID=%u\n", pThis->szInst, pCtrlPktHdr->uCmd, uVlanId));2074 LogFunc(("[%s] uCommand=%u VLAN ID=%u\n", pThis->szInst, pCtrlPktHdr->uCmd, uVlanId)); 2052 2075 2053 2076 switch (pCtrlPktHdr->uCmd) … … 2069 2092 PVIRTQBUF pVirtqBuf) 2070 2093 { 2071 LogFunc((" %sReceived CTRL packet from guest\n", pThis->szInst));2094 LogFunc(("[%s] Received CTRL packet from guest\n", pThis->szInst)); 2072 2095 2073 2096 if (pVirtqBuf->cbPhysSend < 2) 2074 2097 { 2075 LogFunc((" %sCTRL packet from guest driver incomplete. Skipping ctrl cmd\n", pThis->szInst));2098 LogFunc(("[%s] CTRL packet from guest driver incomplete. Skipping ctrl cmd\n", pThis->szInst)); 2076 2099 return; 2077 2100 } 2078 2101 else if (pVirtqBuf->cbPhysReturn < sizeof(VIRTIONET_CTRL_HDR_T_ACK)) 2079 2102 { 2080 LogFunc((" %sGuest driver didn't allocate memory to receive ctrl pkt ACK. Skipping ctrl cmd\n", pThis->szInst));2103 LogFunc(("[%s] Guest driver didn't allocate memory to receive ctrl pkt ACK. Skipping ctrl cmd\n", pThis->szInst)); 2081 2104 return; 2082 2105 } … … 2095 2118 RT_MIN(pVirtqBuf->cbPhysSend, sizeof(VIRTIONET_CTRL_HDR_T))); 2096 2119 2097 Log7Func((" %sCTRL COMMAND: class=%d command=%d\n", pThis->szInst, pCtrlPktHdr->uClass, pCtrlPktHdr->uCmd));2120 Log7Func(("[%s] CTRL COMMAND: class=%d command=%d\n", pThis->szInst, pCtrlPktHdr->uClass, pCtrlPktHdr->uCmd)); 2098 2121 2099 2122 uint8_t uAck; … … 2122 2145 if (pCtrlPktHdr->uCmd != VIRTIONET_CTRL_ANNOUNCE_ACK) 2123 2146 { 2124 LogFunc((" %sIgnoring CTRL class VIRTIONET_CTRL_ANNOUNCE. Unrecognized uCmd\n", pThis->szInst));2147 LogFunc(("[%s] Ignoring CTRL class VIRTIONET_CTRL_ANNOUNCE. Unrecognized uCmd\n", pThis->szInst)); 2125 2148 break; 2126 2149 } 2127 2150 pThis->virtioNetConfig.uStatus &= ~VIRTIONET_F_ANNOUNCE; 2128 Log7Func((" %sClearing VIRTIONET_F_ANNOUNCE in config status\n", pThis->szInst));2151 Log7Func(("[%s] Clearing VIRTIONET_F_ANNOUNCE in config status\n", pThis->szInst)); 2129 2152 break; 2130 2153 default: … … 2171 2194 } 2172 2195 2173 static int virtioNetR3ReadHeader(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, PVIRTIONETPKTHDR pPktHdr, size_t cbFrame) 2174 { 2175 int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pPktHdr, sizeof(*pPktHdr)); 2196 static int virtioNetR3ReadHeader(PVIRTIOCORE pVirtio, PVIRTIONET pThis, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, PVIRTIONETPKTHDR pPktHdr, size_t cbFrame) 2197 { 2198 size_t cbPktHdr = virtioNetCalcPktHdrSize(pVirtio, pThis); 2199 int rc = virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys, pPktHdr, cbPktHdr); 2176 2200 if (RT_FAILURE(rc)) 2177 2201 return rc; … … 2273 2297 } 2274 2298 2275 static voidvirtioNetR3TransmitPendingPackets(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC,2299 static int virtioNetR3TransmitPendingPackets(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC, 2276 2300 PVIRTIONETVIRTQ pTxVirtq, bool fOnWorkerThread) 2277 2301 { … … 2282 2306 if (!pThis->fVirtioReady) 2283 2307 { 2284 LogFunc(("%s Ignoring Tx requests. VirtIO not ready (status=0x%x) .\n",2308 LogFunc(("%s Ignoring Tx requests. VirtIO not ready (status=0x%x)\n", 2285 2309 pThis->szInst, pThis->virtioNetConfig.uStatus)); 2286 return ;2310 return VERR_IGNORED; 2287 2311 } 2288 2312 2289 2313 if (!pThis->fCableConnected) 2290 2314 { 2291 Log((" %sIgnoring transmit requests while cable is disconnected.\n", pThis->szInst));2292 return ;2315 Log(("[%s] Ignoring transmit requests while cable is disconnected.\n", pThis->szInst)); 2316 return VERR_IGNORED; 2293 2317 } 2294 2318 … … 2299 2323 */ 2300 2324 if (!ASMAtomicCmpXchgU32(&pThis->uIsTransmitting, 1, 0)) 2301 return ;2325 return VERR_IGNORED; 2302 2326 2303 2327 … … 2311 2335 { 2312 2336 ASMAtomicWriteU32(&pThis->uIsTransmitting, 0); 2313 return ;2337 return VERR_TRY_AGAIN; 2314 2338 } 2315 2339 } … … 2318 2342 if (!cPkts) 2319 2343 { 2320 LogFunc((" %sNo packets to send found on %s\n", pThis->szInst, pTxVirtq->szName));2344 LogFunc(("[%s] No packets to send found on %s\n", pThis->szInst, pTxVirtq->szName)); 2321 2345 2322 2346 if (pDrv) … … 2324 2348 2325 2349 ASMAtomicWriteU32(&pThis->uIsTransmitting, 0); 2326 return ;2327 } 2328 LogFunc((" %sAbout to transmit %d pending packet%c\n", pThis->szInst, cPkts, cPkts == 1 ? ' ' : 's'));2350 return VERR_MISSING; 2351 } 2352 LogFunc(("[%s] About to transmit %d pending packet%c\n", pThis->szInst, cPkts, cPkts == 1 ? ' ' : 's')); 2329 2353 2330 2354 virtioNetR3SetWriteLed(pThisCC, true); … … 2334 2358 while ((rc = virtioCoreR3VirtqAvailBufPeek(pVirtio->pDevInsR3, pVirtio, pTxVirtq->uIdx, &pVirtqBuf)) == VINF_SUCCESS) 2335 2359 { 2336 Log10Func((" %sfetched descriptor chain from %s\n", pThis->szInst, pTxVirtq->szName));2360 Log10Func(("[%s] fetched descriptor chain from %s\n", pThis->szInst, pTxVirtq->szName)); 2337 2361 2338 2362 PVIRTIOSGBUF pSgPhysSend = pVirtqBuf->pSgPhysSend; 2339 2363 PVIRTIOSGSEG paSegsFromGuest = pSgPhysSend->paSegs; 2340 2364 uint32_t cSegsFromGuest = pSgPhysSend->cSegs; 2341 2342 VIRTIONETPKTHDR PktHdr;2343 2365 size_t uSize = 0; 2344 2366 2345 Assert(paSegsFromGuest[0].cbSeg >= sizeof(PktHdr)); 2367 size_t cbPktHdr = virtioNetCalcPktHdrSize(pVirtio, pThis); 2368 2369 AssertMsgReturn(paSegsFromGuest[0].cbSeg >= cbPktHdr, 2370 ("Desc chain's first seg has insufficient space for pkt header!\n"), 2371 VERR_INTERNAL_ERROR); 2372 2373 PVIRTIONETPKTHDR pPktHdr = (PVIRTIONETPKTHDR)RTMemAllocZ(cbPktHdr); 2374 AssertMsgReturn(pPktHdr, ("Out of Memory\n"), VERR_NO_MEMORY); 2346 2375 2347 2376 /* Compute total frame size. */ … … 2349 2378 uSize += paSegsFromGuest[i].cbSeg; 2350 2379 2351 Log5Func((" %scomplete frame is %u bytes.\n", pThis->szInst, uSize));2380 Log5Func(("[%s] complete frame is %u bytes.\n", pThis->szInst, uSize)); 2352 2381 Assert(uSize <= VIRTIONET_MAX_FRAME_SIZE); 2353 2382 … … 2360 2389 uint64_t uOffset; 2361 2390 2362 uSize -= sizeof(PktHdr);2363 rc = virtioNetR3ReadHeader(p DevIns, paSegsFromGuest[0].GCPhys, &PktHdr, uSize);2391 uSize -= cbPktHdr; 2392 rc = virtioNetR3ReadHeader(pVirtio, pThis, pDevIns, paSegsFromGuest[0].GCPhys, pPktHdr, uSize); 2364 2393 if (RT_FAILURE(rc)) 2365 return ;2366 virtioCoreGCPhysChainAdvance(pSgPhysSend, sizeof(PktHdr));2367 2368 PDMNETWORKGSO Gso, *pGso = virtioNetR3SetupGsoCtx(&Gso, &PktHdr);2394 return rc; 2395 virtioCoreGCPhysChainAdvance(pSgPhysSend, cbPktHdr); 2396 2397 PDMNETWORKGSO Gso, *pGso = virtioNetR3SetupGsoCtx(&Gso, pPktHdr); 2369 2398 2370 2399 PPDMSCATTERGATHER pSgBufToPdmLeafDevice; … … 2386 2415 uint64_t srcSgCur = (uint64_t)pSgPhysSend->GCPhysCur; 2387 2416 cbCopied = RT_MIN((uint64_t)cbRemain, srcSgLen - (srcSgCur - srcSgStart)); 2388 PDMDevHlpPCIPhysRead(pDevIns,2417 virtioCoreGCPhysRead(pVirtio, pDevIns, 2389 2418 (RTGCPHYS)pSgPhysSend->GCPhysCur, 2390 2419 ((uint8_t *)pSgBufToPdmLeafDevice->aSegs[0].pvSeg) + uOffset, cbCopied); … … 2398 2427 cbTotal, pVirtqBuf->cbPhysSend, pVirtqBuf->cbPhysSend - cbTotal)); 2399 2428 2400 rc = virtioNetR3TransmitFrame(pThis, pThisCC, pSgBufToPdmLeafDevice, pGso, &PktHdr);2429 rc = virtioNetR3TransmitFrame(pThis, pThisCC, pSgBufToPdmLeafDevice, pGso, pPktHdr); 2401 2430 if (RT_FAILURE(rc)) 2402 2431 { 2403 LogFunc((" %sFailed to transmit frame, rc = %Rrc\n", pThis->szInst, rc));2432 LogFunc(("[%s] Failed to transmit frame, rc = %Rrc\n", pThis->szInst, rc)); 2404 2433 STAM_PROFILE_STOP(&pThis->StatTransmitSend, a); 2405 2434 STAM_PROFILE_ADV_STOP(&pThis->StatTransmit, a); … … 2436 2465 2437 2466 ASMAtomicWriteU32(&pThis->uIsTransmitting, 0); 2467 return VINF_SUCCESS; 2438 2468 } 2439 2469 … … 2450 2480 STAM_COUNTER_INC(&pThis->StatTransmitByNetwork); 2451 2481 2452 virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC, pTxVirtq, true /*fOnWorkerThread*/);2482 (void)virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC, pTxVirtq, true /*fOnWorkerThread*/); 2453 2483 } 2454 2484 … … 2466 2496 virtioNetWakeupRxBufWaiter(pDevIns); 2467 2497 2468 LogFunc((" %sLink is up\n", pThis->szInst));2498 LogFunc(("[%s] Link is up\n", pThis->szInst)); 2469 2499 2470 2500 if (pThisCC->pDrv) … … 2496 2526 AssertRC(rc); 2497 2527 2498 LogFunc((" %sLink is down temporarily\n", pThis->szInst));2528 LogFunc(("[%s] Link is down temporarily\n", pThis->szInst)); 2499 2529 } 2500 2530 } … … 2513 2543 if (LogIs7Enabled()) 2514 2544 { 2515 LogFunc((" %s", pThis->szInst));2545 LogFunc(("[%s]", pThis->szInst)); 2516 2546 switch(enmState) 2517 2547 { … … 2548 2578 if (fRequestedLinkStateIsUp) 2549 2579 { 2550 Log((" %sLink is up\n", pThis->szInst));2580 Log(("[%s] Link is up\n", pThis->szInst)); 2551 2581 pThis->fCableConnected = true; 2552 2582 SET_LINK_UP(pThis); … … 2557 2587 /* The link was brought down explicitly, make sure it won't come up by timer. */ 2558 2588 PDMDevHlpTimerStop(pDevIns, pThisCC->hLinkUpTimer); 2559 Log((" %sLink is down\n", pThis->szInst));2589 Log(("[%s] Link is down\n", pThis->szInst)); 2560 2590 pThis->fCableConnected = false; 2561 2591 SET_LINK_DOWN(pThis); … … 2580 2610 static int virtioNetR3DestroyWorkerThreads(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC) 2581 2611 { 2582 Log10Func((" %s\n", pThis->szInst));2612 Log10Func(("[%s]\n", pThis->szInst)); 2583 2613 int rc = VINF_SUCCESS; 2584 2614 for (unsigned uIdxWorker = 0; uIdxWorker < pThis->cWorkers; uIdxWorker++) … … 2608 2638 PVIRTIONETVIRTQ pVirtq) 2609 2639 { 2610 Log10Func((" %s\n", pThis->szInst));2640 Log10Func(("[%s]\n", pThis->szInst)); 2611 2641 RT_NOREF(pThis); 2612 2642 … … 2637 2667 static int virtioNetR3CreateWorkerThreads(PPDMDEVINS pDevIns, PVIRTIONET pThis, PVIRTIONETCC pThisCC) 2638 2668 { 2639 2640 2641 Log10Func(("%s\n", pThis->szInst)); 2669 Log10Func(("[%s]\n", pThis->szInst)); 2670 int rc; 2671 2672 2673 /* Create the Control Queue worker anyway whether or not it is feature-negotiated or utilized by the guest, 2674 * as it's relatively low overhead resource-wise. This is for two reasons: First, at the time of this comment 2675 * queues and workers are configured pre-feature negotiation; secondly, legacy guest drivers are allowed to start 2676 * using the device prior to feature negotiation, and we can only know we are dealing with a modern guest driver 2677 * after feature negotiation. */ 2642 2678 2643 2679 PVIRTIONETVIRTQ pCtlVirtq = &pThis->aVirtqs[CTRLQIDX]; 2644 intrc = virtioNetR3CreateOneWorkerThread(pDevIns, pThis,2645 2680 rc = virtioNetR3CreateOneWorkerThread(pDevIns, pThis, 2681 &pThis->aWorkers[CTRLQIDX], &pThisCC->aWorkers[CTRLQIDX], pCtlVirtq); 2646 2682 AssertRCReturn(rc, rc); 2647 2683 … … 2654 2690 2655 2691 rc = virtioNetR3CreateOneWorkerThread(pDevIns, pThis, &pThis->aWorkers[TXQIDX(uVirtqPair)], 2656 2692 &pThisCC->aWorkers[TXQIDX(uVirtqPair)], pTxVirtq); 2657 2693 AssertRCReturn(rc, rc); 2658 2694 … … 2665 2701 2666 2702 pThis->cWorkers = pThis->cVirtqPairs + 1 /* One control virtq */; 2703 2667 2704 return rc; 2668 2705 } … … 2686 2723 return VINF_SUCCESS; 2687 2724 2688 LogFunc((" %sworker thread idx=%d started for %s (virtq idx=%d)\n", pThis->szInst, pWorker->uIdx, pVirtq->szName, pVirtq->uIdx));2725 LogFunc(("[%s] worker thread idx=%d started for %s (virtq idx=%d)\n", pThis->szInst, pWorker->uIdx, pVirtq->szName, pVirtq->uIdx)); 2689 2726 2690 2727 /** @todo Race w/guest enabling/disabling guest notifications cyclically. … … 2703 2740 if (!fNotificationSent) 2704 2741 { 2705 Log10Func((" %s%s worker sleeping...\n\n", pThis->szInst, pVirtq->szName));2742 Log10Func(("[%s] %s worker sleeping...\n\n", pThis->szInst, pVirtq->szName)); 2706 2743 Assert(ASMAtomicReadBool(&pWorker->fSleeping)); 2707 2744 … … 2723 2760 if (pVirtq->fCtlVirtq) 2724 2761 { 2725 Log10Func((" %s%s worker woken. Fetching desc chain\n", pThis->szInst, pVirtq->szName));2762 Log10Func(("[%s] %s worker woken. Fetching desc chain\n", pThis->szInst, pVirtq->szName)); 2726 2763 PVIRTQBUF pVirtqBuf = NULL; 2727 2764 int rc = virtioCoreR3VirtqAvailBufGet(pDevIns, &pThis->Virtio, pVirtq->uIdx, &pVirtqBuf, true); 2728 2765 if (rc == VERR_NOT_AVAILABLE) 2729 2766 { 2730 Log10Func((" %s%s worker woken. Nothing found in queue/n", pThis->szInst, pVirtq->szName));2767 Log10Func(("[%s] %s worker woken. Nothing found in queue/n", pThis->szInst, pVirtq->szName)); 2731 2768 continue; 2732 2769 } … … 2736 2773 else /* Must be Tx queue */ 2737 2774 { 2738 Log10Func((" %s%s worker woken. Virtq has data to transmit\n", pThis->szInst, pVirtq->szName));2775 Log10Func(("[%s] %s worker woken. Virtq has data to transmit\n", pThis->szInst, pVirtq->szName)); 2739 2776 virtioNetR3TransmitPendingPackets(pDevIns, pThis, pThisCC, pVirtq, false /* fOnWorkerThread */); 2740 2777 } … … 2746 2783 */ 2747 2784 } 2748 Log10((" %s%s worker thread exiting\n", pThis->szInst, pVirtq->szName));2785 Log10(("[%s] %s worker thread exiting\n", pThis->szInst, pVirtq->szName)); 2749 2786 return VINF_SUCCESS; 2750 2787 } … … 2753 2790 * @callback_method_impl{VIRTIOCORER3,pfnStatusChanged} 2754 2791 */ 2755 static DECLCALLBACK(void) virtioNetR3StatusCh anged(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, uint32_t fVirtioReady)2792 static DECLCALLBACK(void) virtioNetR3StatusChg(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, uint32_t fVirtioReady) 2756 2793 { 2757 2794 PVIRTIONET pThis = RT_FROM_MEMBER(pVirtio, VIRTIONET, Virtio); … … 2762 2799 if (fVirtioReady) 2763 2800 { 2764 LogFunc(("%s VirtIO ready\n-----------------------------------------------------------------------------------------\n", 2765 pThis->szInst)); 2801 Log(("-----------------------------------------------------------------------------------------\n")); 2802 Log(("%-23s: %s *** VirtIO Ready ***\n-----------------------------------------------------------------------------------------\n", 2803 __FUNCTION__, pThis->szInst)); 2766 2804 2767 2805 pThis->fNegotiatedFeatures = virtioCoreGetNegotiatedFeatures(pVirtio); … … 2776 2814 pThis->fResetting = false; 2777 2815 2778 for (unsigned uVirtqNbr = 0; uVirtqNbr < pThis->cVirt Virtqs; uVirtqNbr++)2816 for (unsigned uVirtqNbr = 0; uVirtqNbr < pThis->cVirtqs; uVirtqNbr++) 2779 2817 { 2780 2818 PVIRTIONETVIRTQ pVirtq = &pThis->aVirtqs[uVirtqNbr]; … … 2794 2832 else 2795 2833 { 2796 Log Func(("%s VirtIO is resetting\n", pThis->szInst));2834 Log(("%-23s: %s VirtIO is resetting\n", __FUNCTION__, pThis->szInst)); 2797 2835 2798 2836 pThis->virtioNetConfig.uStatus = pThis->fCableConnected ? VIRTIONET_F_LINK_UP : 0; 2799 Log7 Func(("%s Link is %s\n", pThis->szInst, pThis->fCableConnected ? "up" : "down"));2837 Log7(("%-23s: %s Link is %s\n", __FUNCTION__, pThis->szInst, pThis->fCableConnected ? "up" : "down")); 2800 2838 2801 2839 pThis->fPromiscuous = true; … … 2815 2853 pThisCC->pDrv->pfnSetPromiscuousMode(pThisCC->pDrv, true); 2816 2854 2817 for (uint16_t uVirtqNbr = 0; uVirtqNbr < pThis->cVirt Virtqs; uVirtqNbr++)2855 for (uint16_t uVirtqNbr = 0; uVirtqNbr < pThis->cVirtqs; uVirtqNbr++) 2818 2856 pThis->aVirtqs[uVirtqNbr].fAttachedToVirtioCore = false; 2819 2857 } … … 2834 2872 PVIRTIONETCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC); 2835 2873 2836 Log7Func(("%s\n", pThis->szInst)); 2874 Log7Func(("[%s]\n", pThis->szInst)); 2875 RT_NOREF(pThis); 2876 2837 2877 AssertLogRelReturnVoid(iLUN == 0); 2838 2878 2839 RT_NOREF(pThis);2840 2841 /*2842 * Zero important members.2843 */2844 2879 pThisCC->pDrvBase = NULL; 2845 2880 pThisCC->pDrv = NULL; … … 2858 2893 RT_NOREF(fFlags); 2859 2894 2860 Log7Func((" %s", pThis->szInst));2895 Log7Func(("[%s]", pThis->szInst)); 2861 2896 2862 2897 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN); … … 2873 2908 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER 2874 2909 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME) 2875 Log((" %sNo attached driver!\n", pThis->szInst));2910 Log(("[%s] No attached driver!\n", pThis->szInst)); 2876 2911 2877 2912 return rc; … … 2897 2932 { 2898 2933 PVIRTIONETR3 pThisCC = RT_FROM_MEMBER(pInterface, VIRTIONETCC, IBase); 2899 LogFunc(("pInterface=%p %s\n", pInterface, pszIID));2900 2934 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThisCC->INetworkDown); 2901 2935 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThisCC->INetworkConfig); … … 2915 2949 PVIRTIONETCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PVIRTIONETCC); 2916 2950 2917 Log((" %sDestroying instance\n", pThis->szInst));2951 Log(("[%s] Destroying instance\n", pThis->szInst)); 2918 2952 2919 2953 if (pThis->hEventRxDescAvail != NIL_SUPSEMEVENT) … … 2945 2979 */ 2946 2980 Log7Func(("PDM device instance: %d\n", iInstance)); 2947 2948 RTStrPrintf(pThis->szInst, sizeof(pThis->szInst), "VNET%d", iInstance); 2949 2950 /** @todo Remove next line (temporary hack used for less logging clutter for single-instance debugging) */ 2951 *pThis->szInst = '\0'; 2981 RTStrPrintf(pThis->szInst, sizeof(pThis->szInst), "virtio-net #%d", iInstance); 2952 2982 2953 2983 pThisCC->pDevIns = pDevIns; 2954 2955 2984 pThisCC->IBase.pfnQueryInterface = virtioNetR3QueryInterface; 2956 2985 pThisCC->ILeds.pfnQueryStatusLed = virtioNetR3QueryStatusLed; … … 2997 3026 pThis->szInst, pThis->cMsLinkUpDelay / 1000)); 2998 3027 2999 Log((" %sLink up delay is set to %u seconds\n", pThis->szInst, pThis->cMsLinkUpDelay / 1000));3028 Log(("[%s] Link up delay is set to %u seconds\n", pThis->szInst, pThis->cMsLinkUpDelay / 1000)); 3000 3029 3001 3030 /* Copy the MAC address configured for the VM to the MMIO accessible Virtio dev-specific config area */ 3002 3031 memcpy(pThis->virtioNetConfig.uMacAddress.au8, pThis->macConfigured.au8, sizeof(pThis->virtioNetConfig.uMacAddress)); /* TBD */ 3003 3032 3033 Log(("Using MAC address for %s: %2x:%2x:%2x:%2x:%2x:%2x\n", pThis->szInst, 3034 pThis->macConfigured.au8[0], pThis->macConfigured.au8[1], pThis->macConfigured.au8[2], 3035 pThis->macConfigured.au8[3], pThis->macConfigured.au8[4], pThis->macConfigured.au8[5])); 3004 3036 3005 3037 LogFunc(("RC=%RTbool R0=%RTbool\n", pDevIns->fRCEnabled, pDevIns->fR0Enabled)); … … 3015 3047 3016 3048 pThisCC->Virtio.pfnVirtqNotified = virtioNetVirtqNotified; 3017 pThisCC->Virtio.pfnStatusChanged = virtioNetR3StatusCh anged;3049 pThisCC->Virtio.pfnStatusChanged = virtioNetR3StatusChg; 3018 3050 pThisCC->Virtio.pfnDevCapRead = virtioNetR3DevCapRead; 3019 3051 pThisCC->Virtio.pfnDevCapWrite = virtioNetR3DevCapWrite; … … 3033 3065 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to create event semaphore")); 3034 3066 3067 3035 3068 /* Initialize VirtIO core. (pfnStatusChanged callback when both host VirtIO core & guest driver are ready) */ 3036 3069 rc = virtioCoreR3Init(pDevIns, &pThis->Virtio, &pThisCC->Virtio, &VirtioPciParams, pThis->szInst, … … 3044 3077 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio-net: Required features not successfully negotiated.")); 3045 3078 3079 3080 /* 3081 * Initialize queues. Due to this being a transitional device (e.g. accommodating both modern 3082 * and legacy drivers), the control queue must be created whether or not the VIRTIO_NET_F_CTRL_VQ 3083 * is negotiated, because legacy drivers are not bound to begin configuration and I/O until 3084 * feature negotiation is complete. In the final analysis, there may be no good reason to 3085 * enforce VIRTIO_NET_F_CTRL_VQ as a prerequisite to handling guest control queue transactions, 3086 * but merely to log violations (e.g. control transactions without feature explicitly enabled), 3087 * once, thus not being strict with regard to misbehaving modern drivers. 3088 */ 3089 3046 3090 pThis->cVirtqPairs = 1; /* default, VirtIO 1.0, 5.1.6.5.5 */ 3047 3048 pThis->cVirtVirtqs += pThis->cVirtqPairs * 2 + 1; 3049 3050 /* Create Link Up Timer */ 3051 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, virtioNetR3LinkUpTimer, NULL, 3052 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_NO_RING0, 3053 "VirtioNet Link Up", &pThisCC->hLinkUpTimer); 3054 3055 /* 3056 * Initialize queues. 3057 */ 3091 pThis->cVirtqs += pThis->cVirtqPairs * 2 + 1; 3092 pThis->aVirtqs[CTRLQIDX].fCtlVirtq = true; 3093 3058 3094 virtioNetR3SetVirtqNames(pThis); 3059 pThis->aVirtqs[CTRLQIDX].fCtlVirtq = true; 3060 for (unsigned uVirtqNbr = 0; uVirtqNbr < pThis->cVirtVirtqs; uVirtqNbr++) 3095 for (unsigned uVirtqNbr = 0; uVirtqNbr < pThis->cVirtqs; uVirtqNbr++) 3061 3096 { 3062 3097 PVIRTIONETVIRTQ pVirtq = &pThis->aVirtqs[uVirtqNbr]; … … 3067 3102 pWorkerR3->uIdx = uVirtqNbr; 3068 3103 } 3069 3070 3104 /* 3071 3105 * Create queue workers for life of instance. (I.e. they persist through VirtIO bounces) … … 3075 3109 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to create worker threads")); 3076 3110 3077 3111 /* Create Link Up Timer */ 3112 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, virtioNetR3LinkUpTimer, NULL, 3113 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_NO_RING0, 3114 "VirtioNet Link Up", &pThisCC->hLinkUpTimer); 3078 3115 /* 3079 3116 * Attach network driver instance … … 3088 3125 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER 3089 3126 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME) 3090 Log(("%s No attached driver!\n", pThis->szInst)); 3091 3127 Log(("[%s] No attached driver!\n", pThis->szInst)); 3092 3128 /* 3093 3129 * Status driver … … 3111 3147 * The /Public/ bits are official and used by session info in the GUI. 3112 3148 */ 3149 # ifdef VBOX_WITH_STATISTICS 3113 3150 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 3114 3151 "Amount of data received", "/Public/NetAdapter/%u/BytesReceived", uStatNo); … … 3124 3161 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitGSO, STAMTYPE_COUNTER, "Packets/Transmit-Gso", STAMUNIT_COUNT, "Number of sent GSO packets"); 3125 3162 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitCSum, STAMTYPE_COUNTER, "Packets/Transmit-Csum", STAMUNIT_COUNT, "Number of completed TX checksums"); 3126 # ifdef VBOX_WITH_STATISTICS3127 3163 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, "Receive/Total", STAMUNIT_TICKS_PER_CALL, "Profiling receive"); 3128 3164 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, "Receive/Store", STAMUNIT_TICKS_PER_CALL, "Profiling receive storing"); -
trunk/src/VBox/Devices/Storage/DevVirtioSCSI.cpp
r90791 r91703 358 358 { 359 359 R3PTRTYPE(PPDMTHREAD) pThread; /**< pointer to worker thread's handle */ 360 uint16_t auRedoDescs[VIRTQ_ MAX_ENTRIES];/**< List of previously suspended reqs to re-submit */360 uint16_t auRedoDescs[VIRTQ_SIZE];/**< List of previously suspended reqs to re-submit */ 361 361 uint16_t cRedoDescs; /**< Number of redo desc chain head desc idxes in list */ 362 362 } VIRTIOSCSIWORKERR3; … … 457 457 uint32_t fHasT10pi; 458 458 459 /** True if VIRTIO_SCSI_F_ T10_PIwas negotiated */459 /** True if VIRTIO_SCSI_F_HOTPLUG was negotiated */ 460 460 uint32_t fHasHotplug; 461 461 462 /** True if VIRTIO_SCSI_F_ T10_PIwas negotiated */462 /** True if VIRTIO_SCSI_F_INOUT was negotiated */ 463 463 uint32_t fHasInOutBufs; 464 464 465 /** True if VIRTIO_SCSI_F_ T10_PIwas negotiated */465 /** True if VIRTIO_SCSI_F_CHANGE was negotiated */ 466 466 uint32_t fHasLunChange; 467 467 … … 559 559 uint16_t uVirtqNbr; /**< Index of queue this request arrived on */ 560 560 PVIRTQBUF pVirtqBuf; /**< Prepared desc chain pulled from virtq avail ring */ 561 size_t cbDataIn; /**< size of data out buffer*/561 size_t cbDataIn; /**< size of datain buffer */ 562 562 size_t cbDataOut; /**< size of dataout buffer */ 563 563 uint16_t uDataInOff; /**< Fixed size of respHdr + sense (precede datain) */ 564 uint16_t uDataOutOff; /**< Fixed size of re spHdr + sense (precede datain)*/564 uint16_t uDataOutOff; /**< Fixed size of reqhdr + cdb (precede dataout) */ 565 565 uint32_t cbSenseAlloc; /**< Size of sense buffer */ 566 566 size_t cbSenseLen; /**< Receives \# bytes written into sense buffer */ … … 828 828 RTSGSEG aReqSegs[2]; 829 829 830 /* Segment #1: Re questheader*/830 /* Segment #1: Response header*/ 831 831 aReqSegs[0].pvSeg = pRespHdr; 832 832 aReqSegs[0].cbSeg = sizeof(*pRespHdr); … … 1164 1164 */ 1165 1165 size_t const cbReqHdr = sizeof(REQ_CMD_HDR_T) + cbCdb; 1166 AssertReturn(pVirtqBuf ->cbPhysSend >= cbReqHdr, VERR_INVALID_PARAMETER);1166 AssertReturn(pVirtqBuf && pVirtqBuf->cbPhysSend >= cbReqHdr, VERR_INVALID_PARAMETER); 1167 1167 1168 1168 AssertCompile(VIRTIOSCSI_CDB_SIZE_MAX < 4096); … … 1714 1714 /* 1715 1715 * BIOS may change these values. When the OS comes up, and KVM driver accessed 1716 * through the Windows,assumes they are the default size. So as per the VirtIO 1.0 spec,1716 * through Windows, it assumes they are the default size. So as per the VirtIO 1.0 spec, 1717 1717 * 5.6.4, these device configuration values must be set to default upon device reset. 1718 1718 */ … … 1958 1958 rc = pHlp->pfnSSMGetU16(pSSM, &cReqsRedo); 1959 1959 AssertRCReturn(rc, rc); 1960 AssertReturn(cReqsRedo < VIRTQ_ MAX_ENTRIES,1960 AssertReturn(cReqsRedo < VIRTQ_SIZE, 1961 1961 pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS, 1962 1962 N_("Bad count of I/O transactions to re-do in saved state (%#x, max %#x - 1)"), 1963 cReqsRedo, VIRTQ_ MAX_ENTRIES));1963 cReqsRedo, VIRTQ_SIZE)); 1964 1964 1965 1965 for (uint16_t uVirtqNbr = VIRTQ_REQ_BASE; uVirtqNbr < VIRTIOSCSI_VIRTQ_CNT; uVirtqNbr++) … … 1982 1982 rc = pHlp->pfnSSMGetU16(pSSM, &idxHead); 1983 1983 AssertRCReturn(rc, rc); 1984 AssertReturn(idxHead < VIRTQ_ MAX_ENTRIES,1984 AssertReturn(idxHead < VIRTQ_SIZE, 1985 1985 pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS, 1986 1986 N_("Bad queue element index for re-do in saved state (%#x, max %#x)"), 1987 idxHead, VIRTQ_ MAX_ENTRIES- 1));1987 idxHead, VIRTQ_SIZE - 1)); 1988 1988 1989 1989 PVIRTIOSCSIWORKERR3 pWorkerR3 = &pThisCC->aWorkers[uVirtqNbr]; 1990 1990 pWorkerR3->auRedoDescs[pWorkerR3->cRedoDescs++] = idxHead; 1991 pWorkerR3->cRedoDescs %= VIRTQ_ MAX_ENTRIES;1991 pWorkerR3->cRedoDescs %= VIRTQ_SIZE; 1992 1992 } 1993 1993 } -
trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp
r88828 r91703 45 45 #define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName) 46 46 47 48 #define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \ 49 (virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq) == 0) 50 51 47 52 #define IS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) 48 #define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \ 49 (virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq) == 0) 53 #define WAS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK) 50 54 51 55 /** … … 149 153 uint32_t idxDesc, PVIRTQ_DESC_T pDesc) 150 154 { 151 AssertMsg( pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));152 RT_NOREF(pVirtio);153 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */ 154 PDMDevHlpPCIPhysRead(pDevIns,155 pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),156 pDesc, sizeof(VIRTQ_DESC_T));155 AssertMsg(IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 156 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */ 157 158 virtioCoreGCPhysRead(pVirtio, pDevIns, 159 pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems), 160 pDesc, sizeof(VIRTQ_DESC_T)); 157 161 } 158 162 #endif … … 165 169 { 166 170 uint16_t uDescIdx; 167 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n")); 168 RT_NOREF(pVirtio);169 uint16_t const cVirtqItems = RT_MAX(pVirtq->u Size, 1); /* Make sure to avoid div-by-zero. */170 PDMDevHlpPCIPhysRead(pDevIns,171 172 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 173 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */ 174 virtioCoreGCPhysRead(pVirtio, pDevIns, 171 175 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % cVirtqItems]), 172 176 &uDescIdx, sizeof(uDescIdx)); … … 178 182 uint16_t uUsedEventIdx; 179 183 /* VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */ 180 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n")); 181 RT_NOREF(pVirtio); 182 PDMDevHlpPCIPhysRead(pDevIns, 183 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uSize]), 184 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 185 virtioCoreGCPhysRead(pVirtio, pDevIns, 186 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]), 184 187 &uUsedEventIdx, sizeof(uUsedEventIdx)); 185 188 return uUsedEventIdx; … … 190 193 { 191 194 uint16_t uIdx = 0; 192 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n")); 193 RT_NOREF(pVirtio); 194 PDMDevHlpPCIPhysRead(pDevIns, 195 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 196 virtioCoreGCPhysRead(pVirtio, pDevIns, 195 197 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx), 196 198 &uIdx, sizeof(uIdx)); … … 201 203 { 202 204 uint16_t fFlags = 0; 203 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n")); 204 RT_NOREF(pVirtio); 205 PDMDevHlpPCIPhysRead(pDevIns, 205 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 206 virtioCoreGCPhysRead(pVirtio, pDevIns, 206 207 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags), 207 208 &fFlags, sizeof(fFlags)); 209 208 210 return fFlags; 209 211 } … … 220 222 { 221 223 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen }; 222 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n")); 223 RT_NOREF(pVirtio); 224 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */ 225 PDMDevHlpPCIPhysWrite(pDevIns, 224 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 225 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */ 226 virtioCoreGCPhysWrite(pVirtio, pDevIns, 226 227 pVirtq->GCPhysVirtqUsed 227 228 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % cVirtqItems]), … … 231 232 DECLINLINE(void) virtioWriteUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t fFlags) 232 233 { 233 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n")); 234 RT_NOREF(pVirtio); 234 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 235 235 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */ 236 PDMDevHlpPCIPhysWrite(pDevIns,236 virtioCoreGCPhysWrite(pVirtio, pDevIns, 237 237 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags), 238 238 &fFlags, sizeof(fFlags)); … … 242 242 DECLINLINE(void) virtioWriteUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t uIdx) 243 243 { 244 AssertMsg(pVirtio->f DeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));245 RT_ NOREF(pVirtio);246 PDMDevHlpPCIPhysWrite(pDevIns,244 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 245 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */ 246 virtioCoreGCPhysWrite(pVirtio, pDevIns, 247 247 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx), 248 248 &uIdx, sizeof(uIdx)); … … 254 254 { 255 255 uint16_t uIdx = 0; 256 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n")); 257 RT_NOREF(pVirtio); 258 PDMDevHlpPCIPhysRead(pDevIns, 256 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 257 virtioCoreGCPhysRead(pVirtio, pDevIns, 259 258 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx), 260 259 &uIdx, sizeof(uIdx)); … … 265 264 { 266 265 uint16_t fFlags = 0; 267 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n")); 268 RT_NOREF(pVirtio); 269 PDMDevHlpPCIPhysRead(pDevIns, 266 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 267 virtioCoreGCPhysRead(pVirtio, pDevIns, 270 268 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags), 271 269 &fFlags, sizeof(fFlags)); … … 276 274 { 277 275 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */ 278 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n")); 279 RT_NOREF(pVirtio); 280 PDMDevHlpPCIPhysWrite(pDevIns, 276 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n")); 277 virtioCoreGCPhysWrite(pVirtio, pDevIns, 281 278 pVirtq->GCPhysVirtqUsed 282 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtq->u Size]),279 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtq->uQueueSize]), 283 280 &uAvailEventIdx, sizeof(uAvailEventIdx)); 284 281 } 285 282 #endif 286 283 287 DECLINLINE(uint16_t) virtioCoreVirtqAvail BufCount_inline(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)284 DECLINLINE(uint16_t) virtioCoreVirtqAvailCnt(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq) 288 285 { 289 286 uint16_t uIdxActual = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq); … … 292 289 293 290 if (uIdxActual < uIdxShadow) 294 uIdxDelta = (uIdxActual + VIRTQ_ MAX_ENTRIES) - uIdxShadow;291 uIdxDelta = (uIdxActual + VIRTQ_SIZE) - uIdxShadow; 295 292 else 296 293 uIdxDelta = uIdxActual - uIdxShadow; 297 294 298 LogFunc(("%s has %u %s (idx=%u shadow=%u)\n", 299 pVirtq->szName, uIdxDelta, uIdxDelta == 1 ? "entry" : "entries", 300 uIdxActual, uIdxShadow)); 295 LogFunc(("%s, %u %s\n", 296 pVirtq->szName, uIdxDelta, uIdxDelta == 1 ? "entry" : "entries")); 301 297 302 298 return uIdxDelta; … … 316 312 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues), ("uVirtq out of range"), 0); 317 313 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 318 if (!IS_DRIVER_OK(pVirtio) || !pVirtq->uEnable) 319 { 320 LogRelFunc(("Driver not ready or queue not enabled\n")); 314 315 if (!IS_DRIVER_OK(pVirtio)) 316 { 317 LogRelFunc(("Driver not ready\n")); 321 318 return 0; 322 319 } 323 324 return virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq); 320 if (!pVirtio->fLegacyDriver && !pVirtq->uEnable) 321 { 322 LogRelFunc(("virtq: %d (%s) not enabled\n", uVirtq, VIRTQNAME(pVirtio, uVirtq))); 323 return 0; 324 } 325 326 return virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq); 325 327 } 326 328 … … 414 416 void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle) 415 417 { 418 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE); 416 419 #define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb; 417 420 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80; … … 431 434 { 432 435 uint32_t idx = row * 16 + col; 433 PDMDevHlpPCIPhysRead(pDevIns, GCPhys + idx, &c, 1);436 virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys + idx, &c, 1); 434 437 if (idx >= cb) 435 438 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " "); … … 440 443 for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++) 441 444 { 442 PDMDevHlpPCIPhysRead(pDevIns, GCPhys + idx, &c, 1);445 virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys + idx, &c, 1); 443 446 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (c >= 0x20 && c <= 0x7e ? c : '.')); 444 447 ADJCURSOR(cbPrint); … … 455 458 456 459 /** API function: See header file */ 460 int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio) 461 { 462 Log12Func(("%s", pVirtio->fLegacyDriver ? "Legacy Guest Driver handling mode\n" : "")); 463 return pVirtio->fLegacyDriver; 464 } 465 466 /** API function: See header file */ 457 467 void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize, 458 468 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite, … … 553 563 int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtq, const char *pcszName) 554 564 { 555 LogFunc((" %s\n", pcszName));565 LogFunc(("Attaching %s to VirtIO core\n", pcszName)); 556 566 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 557 567 pVirtq->uVirtq = uVirtq; … … 596 606 bool fUsedNoNotify = virtioReadUsedRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_USED_F_NO_NOTIFY; 597 607 598 599 608 pHlp->pfnPrintf(pHlp, " queue enabled: ........... %s\n", pVirtq->uEnable ? "true" : "false"); 600 pHlp->pfnPrintf(pHlp, " size: .................... %d\n", pVirtq->u Size);609 pHlp->pfnPrintf(pHlp, " size: .................... %d\n", pVirtq->uQueueSize); 601 610 pHlp->pfnPrintf(pHlp, " notify offset: ........... %d\n", pVirtq->uNotifyOffset); 602 611 if (pVirtio->fMsiSupport) 603 pHlp->pfnPrintf(pHlp, " MSIX vector: ....... %4.4x\n", pVirtq->uMsix );612 pHlp->pfnPrintf(pHlp, " MSIX vector: ....... %4.4x\n", pVirtq->uMsixVector); 604 613 pHlp->pfnPrintf(pHlp, "\n"); 605 614 pHlp->pfnPrintf(pHlp, " avail ring (%d entries):\n", uAvailIdx - uAvailIdxShadow); … … 684 693 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 685 694 686 if ( pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)695 if (IS_DRIVER_OK(pVirtio)) 687 696 { 688 697 uint16_t fFlags = virtioReadUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq); … … 702 711 LogFunc(("\n")); 703 712 pVirtio->fDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET; 704 if (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) 705 { 706 pVirtio->fGenUpdatePending = true; 713 if (IS_DRIVER_OK(pVirtio)) 714 { 715 if (!pVirtio->fLegacyDriver) 716 pVirtio->fGenUpdatePending = true; 707 717 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig); 708 718 } … … 722 732 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 723 733 724 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable, 725 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE); 734 if (!pVirtio->fLegacyDriver) 735 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable, 736 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE); 726 737 727 738 if (IS_VIRTQ_EMPTY(pVirtio->pDevInsR3, pVirtio, pVirtq)) … … 747 758 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 748 759 749 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable, 750 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE); 760 if (!pVirtio->fLegacyDriver) 761 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable, 762 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE); 751 763 752 764 uint16_t uDescIdx = uHeadIdx; 753 765 754 Log6Func(("%s DESC CHAIN: (head ) desc_idx=%u\n", pVirtio->aVirtqueues[uVirtq].szName, uHeadIdx));766 Log6Func(("%s DESC CHAIN: (head idx = %u)\n", pVirtio->aVirtqueues[uVirtq].szName, uHeadIdx)); 755 767 756 768 /* … … 788 800 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify. 789 801 */ 790 if (cSegsIn + cSegsOut >= VIRTQ_ MAX_ENTRIES)802 if (cSegsIn + cSegsOut >= VIRTQ_SIZE) 791 803 { 792 804 static volatile uint32_t s_cMessages = 0; … … 807 819 if (desc.fFlags & VIRTQ_DESC_F_WRITE) 808 820 { 809 Log6Func(("%s IN desc_idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));821 Log6Func(("%s IN idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb)); 810 822 cbIn += desc.cb; 811 823 pSeg = &paSegsIn[cSegsIn++]; … … 893 905 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE); 894 906 895 Log6Func((" Copying client data to %s, desc chain (head desc_idx %d)\n",896 VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));907 Log6Func((" Copying device data to %s (%s guest), desc chain idx %d\n", 908 VIRTQNAME(pVirtio, uVirtq), pVirtio->fLegacyDriver ? "legacy" : "modern", virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq))); 897 909 898 910 /* Copy s/g buf (virtual memory) to guest phys mem (IN direction). */ … … 910 922 cbCopy = RT_MIN(pSgVirtReturn->cbSegLeft, pSgPhysReturn->cbSegLeft); 911 923 Assert(cbCopy > 0); 912 PDMDevHlpPhysWrite(pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pSgVirtReturn->pvSegCur, cbCopy);924 virtioCoreGCPhysWrite(pVirtio, pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pSgVirtReturn->pvSegCur, cbCopy); 913 925 RTSgBufAdvance(pSgVirtReturn, cbCopy); 914 926 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy); … … 922 934 } 923 935 924 /* If this write-ahead crosses threshold where the driver wants to get an event flag it */936 /* If this write-ahead crosses threshold where the driver wants to get an event, flag it */ 925 937 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX) 926 938 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)) … … 933 945 934 946 if (pSgVirtReturn) 935 Log6Func((".... Copied %zu bytes in %d segs to %u byte buffer, residual=%zu\n", 936 cbTotal - cbRemain, pSgVirtReturn->cSegs, pVirtqBuf->cbPhysReturn, pVirtqBuf->cbPhysReturn - cbTotal)); 937 938 Log6Func(("Write ahead used_idx=%u, %s used_idx=%u\n", 939 pVirtq->uUsedIdxShadow, VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq))); 947 Log6Func((" ... %d segs, %zu bytes, copied to %u byte buf. residual: %zu bytes\n", 948 pSgVirtReturn->cSegs, cbTotal - cbRemain, pVirtqBuf->cbPhysReturn, pVirtqBuf->cbPhysReturn - cbTotal)); 949 950 Log6Func((" %s used_idx=%u\n", VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq))); 940 951 941 952 return VINF_SUCCESS; … … 951 962 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 952 963 953 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable, 954 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE); 955 956 Log6Func(("Updating %s used_idx to %u\n", pVirtq->szName, pVirtq->uUsedIdxShadow)); 964 if (!pVirtio->fLegacyDriver) 965 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable, 966 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE); 967 968 Log6Func((" %s ++used_idx=%u\n", pVirtq->szName, pVirtq->uUsedIdxShadow)); 957 969 958 970 virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow); … … 976 988 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC); 977 989 978 979 990 /* See VirtIO 1.0, section 4.1.5.2 It implies that uVirtq and uNotifyIdx should match. 980 991 * Disregarding this notification may cause throughput to stop, however there's no way to know … … 990 1001 991 1002 Log6Func(("%s (desc chains: %u)\n", pVirtq->szName, 992 virtioCoreVirtqAvail BufCount_inline(pDevIns, pVirtio, pVirtq)));1003 virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq))); 993 1004 994 1005 /* Inform client */ … … 1027 1038 pVirtq->szName, (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))); 1028 1039 #endif 1029 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsix );1040 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector); 1030 1041 pVirtq->fUsedRingEvent = false; 1031 1042 return; … … 1041 1052 if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT)) 1042 1053 { 1043 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsix );1054 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector); 1044 1055 return; 1045 1056 } … … 1056 1067 * @param uVec MSI-X vector, if enabled 1057 1068 */ 1058 static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsix tor)1069 static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixVector) 1059 1070 { 1060 1071 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT) … … 1069 1080 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH); 1070 1081 } 1071 else if (uMsix tor != VIRTIO_MSI_NO_VECTOR)1072 PDMDevHlpPCISetIrq(pDevIns, uMsix tor, 1);1082 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR) 1083 PDMDevHlpPCISetIrq(pDevIns, uMsixVector, 1); 1073 1084 return VINF_SUCCESS; 1074 1085 } … … 1079 1090 * @param pDevIns The device instance. 1080 1091 */ 1081 static void virtioLowerInterrupt(PPDMDEVINS pDevIns, uint16_t uMsix tor)1092 static void virtioLowerInterrupt(PPDMDEVINS pDevIns, uint16_t uMsixVector) 1082 1093 { 1083 1094 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE); 1084 1095 if (!pVirtio->fMsiSupport) 1085 1096 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW); 1086 else if (uMsix tor != VIRTIO_MSI_NO_VECTOR)1097 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR) 1087 1098 PDMDevHlpPCISetIrq(pDevIns, pVirtio->uMsixConfig, PDM_IRQ_LEVEL_LOW); 1088 1099 } … … 1094 1105 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 1095 1106 1107 pVirtq->uQueueSize = VIRTQ_SIZE; 1108 pVirtq->uEnable = false; 1109 pVirtq->uNotifyOffset = uVirtq; 1110 pVirtq->fUsedRingEvent = false; 1096 1111 pVirtq->uAvailIdxShadow = 0; 1097 1112 pVirtq->uUsedIdxShadow = 0; 1098 pVirtq->uEnable = false; 1099 pVirtq->uSize = VIRTQ_MAX_ENTRIES; 1100 pVirtq->uNotifyOffset = uVirtq; 1101 pVirtq->uMsix = uVirtq + 2; 1102 pVirtq->fUsedRingEvent = false; 1113 pVirtq->uMsixVector = uVirtq + 2; 1103 1114 1104 1115 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */ 1105 pVirtq->uMsix = VIRTIO_MSI_NO_VECTOR;1106 1107 virtioLowerInterrupt(pVirtio->pDevInsR3, pVirtq->uMsix );1116 pVirtq->uMsixVector = VIRTIO_MSI_NO_VECTOR; 1117 1118 virtioLowerInterrupt(pVirtio->pDevInsR3, pVirtq->uMsixVector); 1108 1119 } 1109 1120 1110 1121 static void virtioResetDevice(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio) 1111 1122 { 1112 Log2Func(("\n")); 1123 LogFunc(("Resetting device VirtIO state\n")); 1124 pVirtio->fLegacyDriver = 1; /* Assume this. Cleared if VIRTIO_F_VERSION_1 feature ack'd */ 1113 1125 pVirtio->uDeviceFeaturesSelect = 0; 1114 1126 pVirtio->uDriverFeaturesSelect = 0; … … 1123 1135 virtioLowerInterrupt(pDevIns, pVirtio->uMsixConfig); 1124 1136 for (int i = 0; i < VIRTQ_MAX_COUNT; i++) 1125 virtioLowerInterrupt(pDevIns, pVirtio->aVirtqueues[i].uMsix );1137 virtioLowerInterrupt(pDevIns, pVirtio->aVirtqueues[i].uMsixVector); 1126 1138 } 1127 1139 … … 1139 1151 static void virtioGuestR3WasReset(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC) 1140 1152 { 1141 Log Func(("Guest reset the device\n"));1153 Log(("%-23s: Guest reset the device\n", __FUNCTION__)); 1142 1154 1143 1155 /* Let the client know */ 1144 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 0 );1156 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 0 /* fDriverOk */); 1145 1157 virtioResetDevice(pDevIns, pVirtio); 1146 1158 } … … 1210 1222 case 1: 1211 1223 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb); 1224 if (pVirtio->uDriverFeatures & VIRTIO_F_VERSION_1) 1225 pVirtio->fLegacyDriver = 0; 1212 1226 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t)); 1213 1227 break; … … 1218 1232 } 1219 1233 } 1220 /* Guest READ pCommonCfg->udriverFeatures */1234 else /* Guest READ pCommonCfg->udriverFeatures */ 1221 1235 { 1222 1236 switch (pVirtio->uDriverFeaturesSelect) … … 1262 1276 char szOut[80] = { 0 }; 1263 1277 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut)); 1264 Log Func(("Guest wrote fDeviceStatus ................ (%s)\n", szOut));1278 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut)); 1265 1279 } 1266 bool const fStatusChanged = 1267 (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) != (pVirtio->uPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK); 1280 bool const fStatusChanged = IS_DRIVER_OK(pVirtio) != WAS_DRIVER_OK(pVirtio); 1268 1281 1269 1282 if (fDeviceReset || fStatusChanged) … … 1272 1285 /* Since VirtIO status changes are cumbersome by nature, e.g. not a benchmark priority, 1273 1286 * handle the rest in R3 to facilitate logging or whatever dev-specific client needs to do */ 1274 Log6 Func(("RING0 => RING3 (demote)\n"));1287 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__)); 1275 1288 return VINF_IOM_R3_MMIO_WRITE; 1276 1289 #endif … … 1285 1298 1286 1299 if (fStatusChanged) 1287 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK);1300 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, IS_DRIVER_OK(pVirtio)); 1288 1301 #endif 1289 1302 /* 1290 1303 * Save the current status for the next write so we can see what changed. 1291 1304 */ 1292 pVirtio-> uPrevDeviceStatus = pVirtio->fDeviceStatus;1305 pVirtio->fPrevDeviceStatus = pVirtio->fDeviceStatus; 1293 1306 } 1294 1307 else /* Guest READ pCommonCfg->fDeviceStatus */ … … 1329 1342 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqUsed, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues); 1330 1343 else 1331 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( u Size,VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))1332 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( u Size,uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);1344 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueSize, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess)) 1345 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uQueueSize, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues); 1333 1346 else 1334 1347 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uEnable, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess)) … … 1338 1351 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uNotifyOffset, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues); 1339 1352 else 1340 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsix ,VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))1341 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsix ,uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);1353 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess)) 1354 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues); 1342 1355 else 1343 1356 { … … 1352 1365 return rc; 1353 1366 } 1367 1368 /** 1369 * @callback_method_impl{FNIOMIOPORTNEWIN) 1370 * 1371 * This I/O handler exists only to handle access from legacy drivers. 1372 */ 1373 1374 static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb) 1375 { 1376 1377 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE); 1378 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a); 1379 1380 RT_NOREF(pvUser); 1381 // LogFunc((" Read from port offset=%RTiop cb=%#x\n", offPort, cb)); 1382 1383 void *pv = pu32; /* To use existing macros */ 1384 int fWrite = 0; /* To use existing macros */ 1385 1386 uint16_t uVirtq = pVirtio->uVirtqSelect; 1387 1388 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1389 { 1390 uint32_t val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff); 1391 memcpy(pu32, &val, cb); 1392 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort); 1393 } 1394 else 1395 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1396 { 1397 uint32_t val = pVirtio->uDriverFeatures & 0xffffffff; 1398 memcpy(pu32, &val, cb); 1399 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort); 1400 } 1401 else 1402 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1403 { 1404 *(uint8_t *)pu32 = pVirtio->fDeviceStatus; 1405 1406 if (LogIs7Enabled()) 1407 { 1408 char szOut[80] = { 0 }; 1409 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut)); 1410 Log(("%-23s: Guest read fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut)); 1411 } 1412 } 1413 else 1414 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1415 { 1416 ASSERT_GUEST_MSG(cb == 1, ("%d\n", cb)); 1417 *(uint8_t *)pu32 = pVirtio->uISR; 1418 pVirtio->uISR = 0; 1419 virtioLowerInterrupt( pDevIns, 0); 1420 Log((" ISR read and cleared\n")); 1421 } 1422 else 1423 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1424 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio); 1425 else 1426 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqPfn, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1427 { 1428 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[uVirtq]; 1429 *pu32 = pVirtQueue->GCPhysVirtqDesc >> PAGE_SHIFT; 1430 Log(("%-23s: Guest read uVirtqPfn .................... %#x\n", __FUNCTION__, *pu32)); 1431 } 1432 else 1433 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1434 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uQueueSize, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues); 1435 else 1436 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1437 VIRTIO_DEV_CONFIG_ACCESS( uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio); 1438 #if LEGACY_MSIX_SUPPORTED 1439 else 1440 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1441 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio); 1442 else 1443 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1444 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues); 1445 #endif 1446 else if (offPort >= sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T)) 1447 { 1448 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a); 1449 #if IN_RING3 1450 /* Access device-specific configuration */ 1451 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC); 1452 int rc = pVirtioCC->pfnDevCapRead(pDevIns, offPort - sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T), pv, cb); 1453 return rc; 1454 #else 1455 return VINF_IOM_R3_IOPORT_READ; 1456 #endif 1457 } 1458 else 1459 { 1460 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a); 1461 Log2Func(("Bad guest read access to virtio_legacy_pci_common_cfg: offset=%#x, cb=%x\n", 1462 offPort, cb)); 1463 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, 1464 "virtioLegacyIOPortIn: no valid port at offset offset=%RTiop cb=%#x\n", offPort, cb); 1465 return rc; 1466 } 1467 1468 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a); 1469 return VINF_SUCCESS; 1470 } 1471 1472 1473 /** 1474 * @callback_method_impl{ * @callback_method_impl{FNIOMIOPORTNEWOUT} 1475 * 1476 * This I/O Port interface exists only to handle access from legacy drivers. 1477 */ 1478 static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb) 1479 { 1480 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE); 1481 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a); 1482 RT_NOREF(pvUser); 1483 1484 uint16_t uVirtq = pVirtio->uVirtqSelect; 1485 uint32_t u32OnStack = u32; /* allows us to use this impl's MMIO parsing macros */ 1486 void *pv = &u32OnStack; /* To use existing macros */ 1487 int fWrite = 1; /* To use existing macros */ 1488 1489 // LogFunc(("Write to port offset=%RTiop, cb=%#x, u32=%#x\n", offPort, cb, u32)); 1490 1491 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1492 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio); 1493 else 1494 #if LEGACY_MSIX_SUPPORTED 1495 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1496 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio); 1497 else 1498 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1499 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues); 1500 else 1501 #endif 1502 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1503 { 1504 /* Check to see if guest acknowledged unsupported features */ 1505 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort); 1506 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n")); 1507 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1508 return VINF_SUCCESS; 1509 } 1510 else 1511 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1512 { 1513 memcpy(&pVirtio->uDriverFeatures, pv, cb); 1514 if ((pVirtio->uDriverFeatures & ~VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED) == 0) 1515 { 1516 Log(("Guest asked for features host does not support! (host=%x guest=%x)\n", 1517 VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED, pVirtio->uDriverFeatures)); 1518 pVirtio->uDriverFeatures &= VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED; 1519 } 1520 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort); 1521 } 1522 else 1523 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1524 { 1525 VIRTIO_DEV_CONFIG_LOG_ACCESS(uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort); 1526 LogFunc(("... WARNING: Guest attempted to write readonly device_feature (queue size) (ignoring)\n")); 1527 return VINF_SUCCESS; 1528 } 1529 else 1530 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1531 { 1532 bool const fDriverInitiatedReset = (pVirtio->fDeviceStatus = (uint8_t)u32) == 0; 1533 bool const fDriverStateImproved = IS_DRIVER_OK(pVirtio) && !WAS_DRIVER_OK(pVirtio); 1534 1535 if (LogIs7Enabled()) 1536 { 1537 char szOut[80] = { 0 }; 1538 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut)); 1539 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut)); 1540 } 1541 1542 if (fDriverStateImproved || fDriverInitiatedReset) 1543 { 1544 #ifdef IN_RING0 1545 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__)); 1546 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1547 return VINF_IOM_R3_IOPORT_WRITE; 1548 #endif 1549 } 1550 1551 #ifdef IN_RING3 1552 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC); 1553 if (fDriverInitiatedReset) 1554 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC); 1555 1556 else if (fDriverStateImproved) 1557 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 1 /* fDriverOk */); 1558 1559 #endif 1560 pVirtio->fPrevDeviceStatus = pVirtio->fDeviceStatus; 1561 } 1562 else 1563 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uVirtqPfn, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1564 { 1565 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 1566 uint64_t uVirtqPfn = (uint64_t)u32; 1567 1568 if (uVirtqPfn) 1569 { 1570 /* Transitional devices calculate ring physical addresses using rigid spec-defined formulae, 1571 * instead of guest conveying respective address of each ring, as "modern" VirtIO drivers do, 1572 * thus there is no virtq PFN or single base queue address stored in instance data for 1573 * this transitional device, but rather it is derived, when read back, from GCPhysVirtqDesc */ 1574 1575 pVirtq->GCPhysVirtqDesc = uVirtqPfn * VIRTIO_PAGE_SIZE; 1576 pVirtq->GCPhysVirtqAvail = pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * pVirtq->uQueueSize; 1577 pVirtq->GCPhysVirtqUsed = 1578 RT_ALIGN(pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]), VIRTIO_PAGE_SIZE); 1579 } 1580 else 1581 { 1582 /* Don't set ring addresses for queue (to meaningless values), when guest resets the virtq's PFN */ 1583 pVirtq->GCPhysVirtqDesc = 0; 1584 pVirtq->GCPhysVirtqAvail = 0; 1585 pVirtq->GCPhysVirtqUsed = 0; 1586 } 1587 Log(("%-23s: Guest wrote uVirtqPfn .................... %#x:\n" 1588 "%68s... %p -> GCPhysVirtqDesc\n%68s... %p -> GCPhysVirtqAvail\n%68s... %p -> GCPhysVirtqUsed\n", 1589 __FUNCTION__, u32, " ", pVirtq->GCPhysVirtqDesc, " ", pVirtq->GCPhysVirtqAvail, " ", pVirtq->GCPhysVirtqUsed)); 1590 } 1591 else 1592 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1593 { 1594 #ifdef IN_RING3 1595 ASSERT_GUEST_MSG(cb == 2, ("cb=%u\n", cb)); 1596 pVirtio->uQueueNotify = u32 & 0xFFFF; 1597 if (uVirtq < VIRTQ_MAX_COUNT) 1598 { 1599 RT_UNTRUSTED_VALIDATED_FENCE(); 1600 1601 /* Need to check that queue is configured. Legacy spec didn't have a queue enabled flag */ 1602 if (pVirtio->aVirtqueues[pVirtio->uQueueNotify].GCPhysVirtqDesc) 1603 virtioCoreVirtqNotified(pDevIns, pVirtio, pVirtio->uQueueNotify, pVirtio->uQueueNotify /* uNotifyIdx */); 1604 else 1605 Log(("The queue (#%d) being notified has not been initialized.\n", pVirtio->uQueueNotify)); 1606 } 1607 else 1608 Log(("Invalid queue number (%d)\n", pVirtio->uQueueNotify)); 1609 #else 1610 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1611 return VINF_IOM_R3_IOPORT_WRITE; 1612 #endif 1613 } 1614 else 1615 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1616 { 1617 VIRTIO_DEV_CONFIG_LOG_ACCESS( fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort); 1618 LogFunc(("... WARNING: Guest attempted to write readonly device_feature (ISR status) (ignoring)\n")); 1619 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1620 return VINF_SUCCESS; 1621 } 1622 else if (offPort >= sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T)) 1623 { 1624 #if IN_RING3 1625 1626 /* Access device-specific configuration */ 1627 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC); 1628 return pVirtioCC->pfnDevCapWrite(pDevIns, offPort - sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T), pv, cb); 1629 #else 1630 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1631 return VINF_IOM_R3_IOPORT_WRITE; 1632 #endif 1633 } 1634 else 1635 { 1636 Log2Func(("Bad guest write access to virtio_legacy_pci_common_cfg: offset=%#x, cb=0x%x\n", 1637 offPort, cb)); 1638 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1639 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, 1640 "virtioLegacyIOPortOut: no valid port at offset offset=%RTiop cb=0x%#x\n", offPort, cb); 1641 return rc; 1642 } 1643 1644 RT_NOREF(uVirtq); 1645 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1646 return VINF_SUCCESS; 1647 } 1648 1354 1649 1355 1650 /** … … 1368 1663 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER); 1369 1664 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser); 1665 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a); 1666 1370 1667 1371 1668 uint32_t uOffset; … … 1400 1697 1401 1698 virtioLowerInterrupt(pDevIns, 0); 1699 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a); 1402 1700 return rcStrict; 1403 1701 #else 1702 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a); 1404 1703 return VINF_IOM_R3_MMIO_READ; 1405 1704 #endif … … 1415 1714 pVirtio->uISR = 0; /* VirtIO spec requires reads of ISR to clear it */ 1416 1715 virtioLowerInterrupt(pDevIns, 0); 1716 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a); 1417 1717 return VINF_SUCCESS; 1418 1718 } 1419 1719 1420 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", 1421 off, cb)); 1422 return VINF_IOM_MMIO_UNUSED_00; 1720 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", off, cb)); 1721 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a); 1722 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, 1723 "virtioMmioRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb); 1724 return rc; 1423 1725 } 1424 1726 … … 1435 1737 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE); 1436 1738 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC); 1437 1438 1739 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER); 1439 1440 1740 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser); 1741 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a); 1742 1441 1743 uint32_t uOffset; 1442 1744 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap)) … … 1446 1748 * Foreward this MMIO write access for client to deal with. 1447 1749 */ 1750 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1448 1751 return pVirtioCC->pfnDevCapWrite(pDevIns, uOffset, pv, cb); 1449 1752 #else 1753 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1450 1754 return VINF_IOM_R3_MMIO_WRITE; 1451 1755 #endif … … 1453 1757 1454 1758 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap)) 1759 { 1760 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1455 1761 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, true /* fWrite */, uOffset, cb, (void *)pv); 1762 } 1456 1763 1457 1764 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t)) … … 1462 1769 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT, 1463 1770 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG))); 1771 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1464 1772 return VINF_SUCCESS; 1465 1773 } … … 1469 1777 { 1470 1778 virtioCoreVirtqNotified(pDevIns, pVirtio, uOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER, *(uint16_t *)pv); 1779 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1471 1780 return VINF_SUCCESS; 1472 1781 } 1473 1782 1474 1783 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb)); 1475 return VINF_SUCCESS; 1784 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1785 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, 1786 "virtioMmioRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb); 1787 return rc; 1476 1788 } 1477 1789 … … 1494 1806 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space 1495 1807 * (the virtio_pci_cfg_cap capability), and access data items. 1808 * This is used by BIOS to gain early boot access to the the storage device. 1496 1809 */ 1497 1810 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap; … … 1535 1848 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability 1536 1849 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space 1537 * (the virtio_pci_cfg_cap capability), and access data items. */ 1850 * (the virtio_pci_cfg_cap capability), and access data items. 1851 * This is used by BIOS to gain early boot access to the the storage device.*/ 1538 1852 1539 1853 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap; … … 1593 1907 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqUsed); 1594 1908 pHlp->pfnSSMPutU16( pSSM, pVirtq->uNotifyOffset); 1595 pHlp->pfnSSMPutU16( pSSM, pVirtq->uMsix );1909 pHlp->pfnSSMPutU16( pSSM, pVirtq->uMsixVector); 1596 1910 pHlp->pfnSSMPutU16( pSSM, pVirtq->uEnable); 1597 pHlp->pfnSSMPutU16( pSSM, pVirtq->u Size);1911 pHlp->pfnSSMPutU16( pSSM, pVirtq->uQueueSize); 1598 1912 pHlp->pfnSSMPutU16( pSSM, pVirtq->uAvailIdxShadow); 1599 1913 pHlp->pfnSSMPutU16( pSSM, pVirtq->uUsedIdxShadow); … … 1653 1967 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed); 1654 1968 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uNotifyOffset); 1655 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsix );1969 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsixVector); 1656 1970 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uEnable); 1657 pHlp->pfnSSMGetU16( pSSM, &pVirtq->u Size);1971 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uQueueSize); 1658 1972 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uAvailIdxShadow); 1659 1973 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uUsedIdxShadow); … … 1695 2009 for (int uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++) 1696 2010 { 1697 if ( pVirtio->aVirtqueues[uVirtq].uEnable)2011 if (!pVirtio->fLegacyDriver || pVirtio->aVirtqueues[uVirtq].uEnable) 1698 2012 virtioCoreNotifyGuestDriver(pVirtio->pDevInsR3, pVirtio, uVirtq); 1699 2013 } … … 1719 2033 pVirtioCC->pbPrevDevSpecificCfg = NULL; 1720 2034 } 2035 1721 2036 RT_NOREF(pDevIns, pVirtio); 1722 2037 } … … 1726 2041 const char *pcszInstance, uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg) 1727 2042 { 2043 2044 1728 2045 /* 1729 2046 * The pVirtio state must be the first member of the shared device instance … … 1740 2057 AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER); 1741 2058 AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER); 1742 1743 #if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed */ 2059 AssertReturn(VIRTQ_SIZE > 0 && VIRTQ_SIZE <= 32768, VERR_OUT_OF_RANGE); /* VirtIO specification-defined limit */ 2060 2061 #if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed 2062 * The legacy MSI support has not been implemented yet 2063 */ 1744 2064 # ifdef VBOX_WITH_MSI_DEVICES 1745 2065 pVirtio->fMsiSupport = true; 1746 2066 # endif 1747 2067 #endif 2068 1748 2069 1749 2070 /* … … 1756 2077 1757 2078 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance); 1758 1759 pVirtio->fDeviceStatus = 0;1760 2079 pVirtioCC->cbDevSpecificCfg = cbDevSpecificCfg; 1761 2080 pVirtioCC->pbDevSpecificCfg = (uint8_t *)pvDevSpecificCfg; … … 1769 2088 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO); 1770 2089 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO); 2090 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId); 2091 PDMPciDevSetSubSystemId(pPciDev, DEVICE_PCI_NETWORK_SUBSYSTEM); 1771 2092 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO); 1772 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);1773 2093 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase); 1774 2094 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub); 1775 2095 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg); 1776 PDMPciDevSetSubSystemId(pPciDev, pPciParams->uSubsystemId);1777 2096 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine); 1778 2097 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin); … … 1785 2104 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite); 1786 2105 AssertRCReturn(rc, rc); 1787 1788 2106 1789 2107 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */ … … 1918 2236 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST); 1919 2237 1920 size_t cbSize = RTStrPrintf(pVirtioCC->pcszMmioName, sizeof(pVirtioCC->pcszMmioName), "%s MMIO", pcszInstance);2238 size_t cbSize = RTStrPrintf(pVirtioCC->pcszMmioName, sizeof(pVirtioCC->pcszMmioName), "%s (modern)", pcszInstance); 1921 2239 if (cbSize <= 0) 1922 2240 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */ 1923 2241 1924 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the 2242 cbSize = RTStrPrintf(pVirtioCC->pcszPortIoName, sizeof(pVirtioCC->pcszPortIoName), "%s (legacy)", pcszInstance); 2243 if (cbSize <= 0) 2244 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */ 2245 2246 /* As a transitional device that supports legacy VirtIO drivers, this VirtIO device generic implementation presents 2247 * legacy driver interface in I/O space at BAR0. The following maps the common (e.g. device independent) 2248 * dev config area as well as device-specific dev config area (whose size is passed to init function of this VirtIO 2249 * generic device code) for access via Port I/O, since legacy drivers (e.g. pre VirtIO 1.0) don't use MMIO callbacks. 2250 * (See VirtIO 1.1, Section 4.1.4.8). 2251 */ 2252 rc = PDMDevHlpPCIIORegionCreateIo(pDevIns, VIRTIO_REGION_LEGACY_IO, sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T) + cbDevSpecificCfg, 2253 virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/, pVirtioCC->pcszPortIoName, 2254 NULL /*paExtDescs*/, &pVirtio->hLegacyIoPorts); 2255 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register legacy config in I/O space at BAR0 */"))); 2256 2257 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the 1925 2258 * 'unknown' device-specific capability without querying the capability to figure 1926 2259 * out size, so pad with an extra page 1927 2260 */ 1928 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + PAGE_SIZE,PAGE_SIZE),2261 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + VIRTIO_PAGE_SIZE, VIRTIO_PAGE_SIZE), 1929 2262 PCI_ADDRESS_SPACE_MEM, virtioMmioWrite, virtioMmioRead, pVirtio, 1930 2263 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU, … … 1935 2268 * Statistics. 1936 2269 */ 2270 # ifdef VBOX_WITH_STATISTICS 1937 2271 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsAllocated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 1938 2272 "Total number of allocated descriptor chains", "DescChainsAllocated"); … … 1943 2277 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsOut, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 1944 2278 "Total number of outbound segments", "DescChainsSegsOut"); 2279 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3"); 2280 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadR0, STAMTYPE_PROFILE, "IO/ReadR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R0"); 2281 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadRC, STAMTYPE_PROFILE, "IO/ReadRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RC"); 2282 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3"); 2283 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteR0, STAMTYPE_PROFILE, "IO/WriteR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R0"); 2284 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteRC, STAMTYPE_PROFILE, "IO/WriteRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RC"); 2285 # endif /* VBOX_WITH_STATISTICS */ 2286 2287 virtioResetDevice(pDevIns, pVirtio); /* Reset VirtIO specific state of device */ 1945 2288 1946 2289 return VINF_SUCCESS; … … 1967 2310 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioWrite, virtioMmioRead, pVirtio); 1968 2311 AssertRCReturn(rc, rc); 2312 2313 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pVirtio->hLegacyIoPorts, virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/); 2314 AssertRCReturn(rc, rc); 2315 1969 2316 return rc; 1970 2317 } -
trunk/src/VBox/Devices/VirtIO/VirtioCore.h
r90791 r91703 25 25 #include <iprt/ctype.h> 26 26 #include <iprt/sg.h> 27 #include <iprt/types.h> 27 28 28 29 #ifdef LOG_ENABLED … … 48 49 49 50 #define VIRTIO_MAX_VIRTQ_NAME_SIZE 32 /**< Maximum length of a queue name */ 50 #define VIRTQ_ MAX_ENTRIES 1024 /**< Max size (# desc elements) of a virtq*/51 #define VIRTQ_SIZE 1024 /**< Max size (# entries) of a virtq */ 51 52 #define VIRTQ_MAX_COUNT 24 /**< Max queues we allow guest to create */ 52 53 #define VIRTIO_NOTIFY_OFFSET_MULTIPLIER 2 /**< VirtIO Notify Cap. MMIO config param */ 54 #define VIRTIO_REGION_LEGACY_IO 0 /**< BAR for VirtIO legacy drivers MBZ */ 53 55 #define VIRTIO_REGION_PCI_CAP 2 /**< BAR for VirtIO Cap. MMIO (impl specific) */ 54 56 #define VIRTIO_REGION_MSIX_CAP 0 /**< Bar for MSI-X handling */ 57 #define VIRTIO_PAGE_SIZE 4096 /**< Page size used by VirtIO specification */ 58 59 60 /* Note: The VirtIO specification, particularly rev. 0.95, and clarified in rev 1.0 for transitional devices, 61 says the page sized used for Queue Size calculations is usually 4096 bytes, but dependent on the 62 the transport. In an appendix of the 0.95 spec, the 'mmio device', which has not been 63 implemented by VBox legacy device in VirtualBox, says guest must report the page size. For now 64 will set page size to a static 4096 based on the original VBox legacy VirtIO implementation which 65 tied it to PAGE_SIZE which appears to work (or at least good enough for most practical purposes) */ 55 66 56 67 … … 107 118 VIRTIOSGBUF SgBufIn; 108 119 VIRTIOSGBUF SgBufOut; 109 VIRTIOSGSEG aSegsIn[VIRTQ_ MAX_ENTRIES];110 VIRTIOSGSEG aSegsOut[VIRTQ_ MAX_ENTRIES];120 VIRTIOSGSEG aSegsIn[VIRTQ_SIZE]; 121 VIRTIOSGSEG aSegsOut[VIRTQ_SIZE]; 111 122 /** @} */ 112 123 } VIRTQBUF_T; … … 129 140 } VIRTIOPCIPARAMS, *PVIRTIOPCIPARAMS; 130 141 142 /* Virtio Platform Indepdented Reserved Feature Bits (see 1.1 specification section 6) */ 143 144 #define VIRTIO_F_NOTIFY_ON_EMPTY RT_BIT_64(24) /**< Legacy feature: Force intr if no AVAIL */ 145 #define VIRTIO_F_ANY_LAYOUT RT_BIT_64(27) /**< Doc bug: Goes under two names in spec */ 146 #define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */ 147 #define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */ 148 #define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */ 149 #define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */ 150 #define VIRTIO_F_BAD_FEATURE RT_BIT_64(30) /**< QEMU kludge. UNUSED as of >= VirtIO 1.0 */ 131 151 #define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */ 132 #define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */ 133 #define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */ 134 #define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */ 135 #define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */ 152 #define VIRTIO_F_ACCESS_PLATFORM RT_BIT_64(33) /**< Funky guest mem access (VirtIO 1.1 NYI) */ 153 #define VIRTIO_F_RING_PACKED RT_BIT_64(34) /**< Packed Queue Layout (VirtIO 1.1 NYI) */ 154 #define VIRTIO_F_IN_ORDER RT_BIT_64(35) /**< Honor guest buf order (VirtIO 1.1 NYI) */ 155 #define VIRTIO_F_ORDER_PLATFORM RT_BIT_64(36) /**< Host mem access honored (VirtIO 1.1 NYI) */ 156 #define VIRTIO_F_SR_IOV RT_BIT_64(37) /**< Dev Single Root I/O virt (VirtIO 1.1 NYI) */ 157 #define VIRTIO_F_NOTIFICAITON_DATA RT_BIT_64(38) /**< Driver passes extra data (VirtIO 1.1 NYI) */ 136 158 137 159 #define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */ 160 #define VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED ( 0 ) /**< Only offered to legacy drivers */ 138 161 139 162 #define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */ 140 163 #define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */ 164 #define DEVICE_PCI_NETWORK_SUBSYSTEM 1 /**< Network Card, per VirtIO legacy spec. */ 141 165 #define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */ 142 #define DEVICE_PCI_REVISION_ID_VIRTIO 1 /**< VirtIO 1.0 non-transitional drivers >= 1*/166 #define DEVICE_PCI_REVISION_ID_VIRTIO 0 /**< VirtIO Modern Transitional driver rev MBZ */ 143 167 144 168 /** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */ … … 190 214 191 215 /** 216 * VirtIO Legacy Capabilities' related MMIO-mapped structs (see virtio-0.9.5 spec) 217 * 218 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here. 219 */ 220 typedef struct virtio_legacy_pci_common_cfg 221 { 222 /* Device-specific fields */ 223 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */ 224 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */ 225 uint32_t uVirtqPfn; /**< RW (driver writes queue page number) */ 226 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */ 227 uint16_t uVirtqSelect; /**< RW (selects queue focus for these fields) */ 228 uint16_t uQueueNotify; /**< RO (offset into virtqueue; see spec) */ 229 uint8_t fDeviceStatus; /**< RW (driver writes device status, 0=reset) */ 230 uint8_t fIsrStatus; /**< RW (driver writes ISR status, 0=reset) */ 231 // uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */ 232 // uint16_t uMsixVector; /**< RW (driver sets MSI-X config vector) */ 233 } VIRTIO_LEGACY_PCI_COMMON_CFG_T, *PVIRTIO_LEGACY_PCI_COMMON_CFG_T; 234 235 /** 192 236 * VirtIO 1.0 Capabilities' related MMIO-mapped structs: 193 237 * … … 208 252 /* Virtq-specific fields (values reflect (via MMIO) info related to queue indicated by uVirtqSelect. */ 209 253 uint16_t uVirtqSelect; /**< RW (selects queue focus for these fields) */ 210 uint16_t u Size;/**< RW (queue size, 0 - 2^n) */211 uint16_t uMsix ;/**< RW (driver selects MSI-X queue vector) */254 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */ 255 uint16_t uMsixVector; /**< RW (driver selects MSI-X queue vector) */ 212 256 uint16_t uEnable; /**< RW (driver controls usability of queue) */ 213 257 uint16_t uNotifyOffset; /**< RO (offset into virtqueue; see spec) */ … … 245 289 RTGCPHYS GCPhysVirtqAvail; /**< (MMIO) PhysAdr per-Q avail structs GUEST */ 246 290 RTGCPHYS GCPhysVirtqUsed; /**< (MMIO) PhysAdr per-Q used structs GUEST */ 247 uint16_t uMsix ;/**< (MMIO) Per-queue vector for MSI-X GUEST */291 uint16_t uMsixVector; /**< (MMIO) Per-queue vector for MSI-X GUEST */ 248 292 uint16_t uEnable; /**< (MMIO) Per-queue enable GUEST */ 249 293 uint16_t uNotifyOffset; /**< (MMIO) per-Q notify offset HOST */ 250 uint16_t u Size;/**< (MMIO) Per-queue size HOST/GUEST */294 uint16_t uQueueSize; /**< (MMIO) Per-queue size HOST/GUEST */ 251 295 uint16_t uAvailIdxShadow; /**< Consumer's position in avail ring */ 252 296 uint16_t uUsedIdxShadow; /**< Consumer's position in used ring */ … … 272 316 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */ 273 317 uint8_t fDeviceStatus; /**< (MMIO) Device Status GUEST */ 274 uint8_t uPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */318 uint8_t fPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */ 275 319 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */ 320 uint16_t uQueueNotify; /**< Caches queue idx in legacy mode GUEST */ 321 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */ 322 uint8_t uPciCfgDataOff; /**< Offset to PCI configuration data area */ 323 uint8_t uISR; /**< Interrupt Status Register. */ 324 uint8_t fMsiSupport; /**< Flag set if using MSI instead of ISR */ 325 uint8_t fLegacyDriver; /**< Set if guest driver < VirtIO 1.0 */ 326 uint16_t uVirtqSelect; /**< (MMIO) queue selector GUEST */ 276 327 277 328 /** @name The locations of the capability structures in PCI config space and the BAR. … … 284 335 /** @} */ 285 336 286 uint16_t uVirtqSelect; /**< (MMIO) queue selector GUEST */ 287 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */ 288 uint8_t uPciCfgDataOff; /**< Offset to PCI configuration data area */ 289 uint8_t uISR; /**< Interrupt Status Register. */ 290 uint8_t fMsiSupport; /**< Flag set if using MSI instead of ISR */ 291 /** The MMIO handle for the PCI capability region (\#2). */ 292 IOMMMIOHANDLE hMmioPciCap; 293 337 338 339 IOMMMIOHANDLE hMmioPciCap; /**< MMIO handle of PCI cap. region (\#2) */ 340 IOMIOPORTHANDLE hLegacyIoPorts; /**< Handle of legacy I/O port range. */ 341 342 343 #ifdef VBOX_WITH_STATISTICS 294 344 /** @name Statistics 295 345 * @{ */ … … 298 348 STAMCOUNTER StatDescChainsSegsIn; 299 349 STAMCOUNTER StatDescChainsSegsOut; 350 STAMPROFILEADV StatReadR3; /** I/O port and MMIO R3 Read profiling */ 351 STAMPROFILEADV StatReadR0; /** I/O port and MMIO R0 Read profiling */ 352 STAMPROFILEADV StatReadRC; /** I/O port and MMIO R3 Read profiling */ 353 STAMPROFILEADV StatWriteR3; /** I/O port and MMIO R3 Write profiling */ 354 STAMPROFILEADV StatWriteR0; /** I/O port and MMIO R3 Write profiling */ 355 STAMPROFILEADV StatWriteRC; /** I/O port and MMIO R3 Write profiling */ 356 #endif 357 300 358 /** @} */ 301 359 } VIRTIOCORE; … … 367 425 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */ 368 426 char pcszMmioName[MAX_NAME]; /**< MMIO mapping name */ 427 char pcszPortIoName[MAX_NAME]; /**< PORT mapping name */ 369 428 } VIRTIOCORER3; 370 429 … … 648 707 int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr); 649 708 709 /** 710 * Checks to see if guest has acknowledged device's VIRTIO_F_VERSION_1 feature. 711 * If not, it's presumed to be a VirtIO legacy guest driver. Note that legacy drivers 712 * may start using the device prematurely, as opposed to the rigorously sane protocol 713 * prescribed by the "modern" VirtIO spec. Doing so is suggestive of a legacy driver. 714 * Therefore legacy mode is the assumption un proven otherwise. 715 * 716 * @param pVirtio Pointer to the virtio state. 717 */ 718 int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio); 650 719 651 720 DECLINLINE(void) virtioCoreGCPhysChainInit(PVIRTIOSGBUF pGcSgBuf, PVIRTIOSGSEG paSegs, size_t cSegs) … … 881 950 { 882 951 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqueues)); 952 if (pVirtio->fLegacyDriver) 953 return pVirtio->aVirtqueues[uVirtqNbr].GCPhysVirtqDesc != 0; 883 954 return pVirtio->aVirtqueues[uVirtqNbr].uEnable != 0; 884 955 } … … 958 1029 * @param uBase base address of per-row address prefixing of hex output 959 1030 * @param pszTitle Optional title. If present displays title that lists 960 * provided text with value of cb to indicate sizenext to it.1031 * provided text with value of cb to indicate VIRTQ_SIZE next to it. 961 1032 */ 962 1033 void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle); … … 993 1064 } 994 1065 1066 /** 1067 * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers. 1068 * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys 1069 * access functions can't be used. The following wrappers select the mem access method based on whether the 1070 * device is operating in legacy mode or not. 1071 */ 1072 DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite) 1073 { 1074 int rc; 1075 if (virtioCoreIsLegacyMode(pVirtio)) 1076 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite); 1077 else 1078 rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite); 1079 return rc; 1080 } 1081 1082 DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead) 1083 { 1084 int rc; 1085 if (virtioCoreIsLegacyMode(pVirtio)) 1086 rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead); 1087 else 1088 rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead); 1089 return rc; 1090 } 995 1091 996 1092 /** Misc VM and PDM boilerplate */ … … 1055 1151 RT_UOFFSETOF(tCfgStruct, member), \ 1056 1152 RT_SIZEOFMEMB(tCfgStruct, member), false /* fSubfieldMatch */) 1153 1154 1057 1155 1058 1156 /**
Note:
See TracChangeset
for help on using the changeset viewer.