Changeset 92939 in vbox for trunk/src/VBox/Devices/VirtIO
- Timestamp:
- Dec 15, 2021 3:51:28 PM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 148914
- Location:
- trunk/src/VBox/Devices/VirtIO
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp
r92091 r92939 42 42 * Defined Constants And Macros * 43 43 *********************************************************************************************************************************/ 44 44 45 #define INSTANCE(a_pVirtio) ((a_pVirtio)->szInstance) 45 46 #define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName) 46 47 47 48 48 #define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \ 49 49 (virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq) == 0) 50 50 51 52 51 #define IS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) 53 52 #define WAS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK) 53 54 /** 55 * These defines are used to track guest virtio-net driver writing driver features accepted flags 56 * in two 32-bit operations (in arbitrary order), and one bit dedicated to ensured 'features complete' 57 * is handled once. 58 */ 59 #define DRIVER_FEATURES_0_WRITTEN 1 /**< fDriverFeatures[0] written by guest virtio-net */ 60 #define DRIVER_FEATURES_1_WRITTEN 2 /**< fDriverFeatures[1] written by guest virtio-net */ 61 #define DRIVER_FEATURES_0_AND_1_WRITTEN 3 /**< Both 32-bit parts of fDriverFeatures[] written */ 62 #define DRIVER_FEATURES_COMPLETE_HANDLED 4 /**< Features negotiation complete handler called */ 54 63 55 64 /** … … 68 77 69 78 70 /** Marks the start of the virtio saved state (just for sanity). */71 #define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff)72 /** The current saved state version for the virtio core. */73 #define VIRTIO_SAVEDSTATE_VERSION UINT32_C(1)74 75 76 79 /********************************************************************************************************************************* 77 80 * Structures and Typedefs * 78 81 *********************************************************************************************************************************/ 79 80 82 81 83 /** @name virtq related flags … … 90 92 91 93 /** 92 * virtq 93 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)94 * virtq-related structs 95 * (struct names follow VirtIO 1.0 spec, field names use VBox styled naming, w/respective spec'd name in comments) 94 96 */ 95 97 typedef struct virtq_desc … … 125 127 } VIRTQ_USED_T, *PVIRTQ_USED_T; 126 128 127 128 129 const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState) 129 130 { … … 141 142 142 143 static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq); 143 static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec); 144 static int virtioNudgeGuest(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec); 145 146 DECLINLINE(uint16_t) virtioCoreR3CountPendingBufs(uint16_t uRingIdx, uint16_t uShadowIdx, uint16_t uQueueSize) 147 { 148 if (uShadowIdx == uRingIdx) 149 return 0; 150 else 151 if (uShadowIdx > uRingIdx) 152 return uShadowIdx - uRingIdx; 153 return uQueueSize - (uRingIdx - uShadowIdx); 154 } 144 155 145 156 /** @name Internal queue operations … … 156 167 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */ 157 168 158 159 160 169 virtioCoreGCPhysRead(pVirtio, pDevIns, 170 pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems), 171 pDesc, sizeof(VIRTQ_DESC_T)); 161 172 } 162 173 #endif … … 207 218 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags), 208 219 &fFlags, sizeof(fFlags)); 209 210 220 return fFlags; 211 221 } … … 249 259 } 250 260 251 252 261 #ifdef IN_RING3 253 262 DECLINLINE(uint16_t) virtioReadUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq) … … 289 298 290 299 if (uIdxActual < uIdxShadow) 291 uIdxDelta = (uIdxActual + VIRTQ_SIZE) - uIdxShadow;300 uIdxDelta = (uIdxActual + pVirtq->uQueueSize) - uIdxShadow; 292 301 else 293 302 uIdxDelta = uIdxActual - uIdxShadow; 294 295 LogFunc(("%s, %u %s\n",296 pVirtq->szName, uIdxDelta, uIdxDelta == 1 ? "entry" : "entries"));297 303 298 304 return uIdxDelta; … … 320 326 if (!pVirtio->fLegacyDriver && !pVirtq->uEnable) 321 327 { 322 LogRelFunc(("virtq: % d (%s) not enabled\n", uVirtq, VIRTQNAME(pVirtio, uVirtq)));328 LogRelFunc(("virtq: %s not enabled\n", VIRTQNAME(pVirtio, uVirtq))); 323 329 return 0; 324 330 } 325 326 331 return virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq); 327 332 } … … 440 445 else 441 446 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", c, (col + 1) % 8 ? "" : " "); 442 447 ADJCURSOR(cbPrint); 443 448 } 444 449 for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++) … … 456 461 #undef ADJCURSOR 457 462 } 458 #endif /* LOG_ENABLED */ 459 460 /** API function: See header file */ 461 int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio) 462 { 463 Log12Func(("%s", pVirtio->fLegacyDriver ? "Legacy Guest Driver handling mode\n" : "")); 464 return pVirtio->fLegacyDriver; 465 } 463 466 464 467 465 /** API function: See header file */ … … 470 468 int fHasIndex, uint32_t idx) 471 469 { 472 if (!LogIs6Enabled()) 473 return; 474 475 char szIdx[16]; 476 if (fHasIndex) 477 RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx); 478 else 479 szIdx[0] = '\0'; 480 481 if (cb == 1 || cb == 2 || cb == 4 || cb == 8) 482 { 483 char szDepiction[64]; 484 size_t cchDepiction; 485 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */ 486 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]", 487 pszMember, szIdx, uOffset, uOffset + cb - 1); 470 if (LogIs6Enabled()) 471 { 472 char szIdx[16]; 473 if (fHasIndex) 474 RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx); 488 475 else 489 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx); 490 491 /* padding */ 492 if (cchDepiction < 30) 493 szDepiction[cchDepiction++] = ' '; 494 while (cchDepiction < 30) 495 szDepiction[cchDepiction++] = '.'; 496 szDepiction[cchDepiction] = '\0'; 497 498 RTUINT64U uValue; 499 uValue.u = 0; 500 memcpy(uValue.au8, pv, cb); 501 Log6(("%-23s: Guest %s %s %#0*RX64\n", 502 pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u)); 503 } 504 else /* odd number or oversized access, ... log inline hex-dump style */ 505 { 506 Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n", 507 pszFunc, fWrite ? "wrote" : "read ", pszMember, 508 szIdx, uOffset, uOffset + cb, cb, pv)); 476 szIdx[0] = '\0'; 477 478 if (cb == 1 || cb == 2 || cb == 4 || cb == 8) 479 { 480 char szDepiction[64]; 481 size_t cchDepiction; 482 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */ 483 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]", 484 pszMember, szIdx, uOffset, uOffset + cb - 1); 485 else 486 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx); 487 488 /* padding */ 489 if (cchDepiction < 30) 490 szDepiction[cchDepiction++] = ' '; 491 while (cchDepiction < 30) 492 szDepiction[cchDepiction++] = '.'; 493 szDepiction[cchDepiction] = '\0'; 494 495 RTUINT64U uValue; 496 uValue.u = 0; 497 memcpy(uValue.au8, pv, cb); 498 Log6(("%-23s: Guest %s %s %#0*RX64\n", 499 pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u)); 500 } 501 else /* odd number or oversized access, ... log inline hex-dump style */ 502 { 503 Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n", 504 pszFunc, fWrite ? "wrote" : "read ", pszMember, 505 szIdx, uOffset, uOffset + cb, cb, pv)); 506 } 509 507 } 510 508 RT_NOREF2(fWrite, pszFunc); … … 512 510 513 511 /** 514 * Makes the MMIO-mapped Virtio fDeviceStatus registers non-cryptic (buffers to 515 * keep the output clean during multi-threaded activity) 512 * Log MMIO-mapped Virtio fDeviceStatus register bitmask, naming the bits 516 513 */ 517 514 DECLINLINE(void) virtioCoreFormatDeviceStatus(uint8_t bStatus, char *pszBuf, size_t uSize) 518 515 { 519 520 #define ADJCURSOR(len) cp += len; uSize -= len; sep = (char *)" | "; 521 516 # define ADJCURSOR(len) { cp += len; uSize -= len; sep = (char *)" | "; } 522 517 memset(pszBuf, 0, uSize); 523 size_t len; 524 char *cp = pszBuf; 525 char *sep = (char *)""; 526 527 if (bStatus == 0) { 518 char *cp = pszBuf, *sep = (char *)""; 519 int len; 520 if (bStatus == 0) 528 521 RTStrPrintf(cp, uSize, "RESET"); 529 return; 530 } 531 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE) 532 { 533 len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE"); 534 ADJCURSOR(len); 535 } 536 if (bStatus & VIRTIO_STATUS_DRIVER) 537 { 538 len = RTStrPrintf(cp, uSize, "%sDRIVER", sep); 539 ADJCURSOR(len); 540 } 541 if (bStatus & VIRTIO_STATUS_FEATURES_OK) 542 { 543 len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep); 544 ADJCURSOR(len); 545 } 546 if (bStatus & VIRTIO_STATUS_DRIVER_OK) 547 { 548 len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep); 549 ADJCURSOR(len); 550 } 551 if (bStatus & VIRTIO_STATUS_FAILED) 552 { 553 len = RTStrPrintf(cp, uSize, "%sFAILED", sep); 554 ADJCURSOR(len); 555 } 556 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET) 557 RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep); 558 559 #undef ADJCURSOR 522 else 523 { 524 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE) 525 { 526 len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE"); 527 ADJCURSOR(len); 528 } 529 if (bStatus & VIRTIO_STATUS_DRIVER) 530 { 531 len = RTStrPrintf(cp, uSize, "%sDRIVER", sep); 532 ADJCURSOR(len); 533 } 534 if (bStatus & VIRTIO_STATUS_FEATURES_OK) 535 { 536 len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep); 537 ADJCURSOR(len); 538 } 539 if (bStatus & VIRTIO_STATUS_DRIVER_OK) 540 { 541 len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep); 542 ADJCURSOR(len); 543 } 544 if (bStatus & VIRTIO_STATUS_FAILED) 545 { 546 len = RTStrPrintf(cp, uSize, "%sFAILED", sep); 547 ADJCURSOR(len); 548 } 549 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET) 550 RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep); 551 } 552 # undef ADJCURSOR 553 } 554 555 #endif /* LOG_ENABLED */ 556 557 /** API function: See header file */ 558 int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio) 559 { 560 return pVirtio->fLegacyDriver; 560 561 } 561 562 … … 570 571 pVirtq->uUsedIdxShadow = 0; 571 572 pVirtq->fUsedRingEvent = false; 573 pVirtq->fAttached = true; 572 574 RTStrCopy(pVirtq->szName, sizeof(pVirtq->szName), pcszName); 573 575 return VINF_SUCCESS; 576 } 577 578 int virtioCoreR3VirtqDetach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr) 579 { 580 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtqNbr]; 581 pVirtq->uVirtq = 0; 582 pVirtq->uAvailIdxShadow = 0; 583 pVirtq->uUsedIdxShadow = 0; 584 pVirtq->fUsedRingEvent = false; 585 pVirtq->fAttached = false; 586 memset(pVirtq->szName, 0, sizeof(pVirtq->szName)); 587 return VINF_SUCCESS; 588 } 589 590 bool virtioCoreR3VirtqIsAttached(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr) 591 { 592 return pVirtio->aVirtqueues[uVirtqNbr].fAttached; 593 } 594 595 bool virtioCoreR3VirtqIsEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr) 596 { 597 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtqNbr]; 598 return (bool)pVirtq->uEnable && pVirtq->GCPhysVirtqDesc; 574 599 } 575 600 … … 582 607 583 608 /** @todo add ability to dump physical contents described by any descriptor (using existing VirtIO core API function) */ 584 // 609 // bool fDump = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)" 585 610 586 611 uint16_t uAvailIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq); … … 648 673 pHlp->pfnPrintf(pHlp, " No desc chains available\n"); 649 674 pHlp->pfnPrintf(pHlp, "\n"); 650 651 675 } 652 676 … … 661 685 return cRefs; 662 686 } 663 664 687 665 688 /** API Function: See header file */ … … 687 710 void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio) 688 711 { 689 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig); 690 } 712 virtioNudgeGuest(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig); 713 } 714 691 715 692 716 /** API Function: See header file */ 693 717 void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtq, bool fEnable) 694 718 { 695 696 719 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues)); 697 720 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; … … 719 742 if (!pVirtio->fLegacyDriver) 720 743 pVirtio->fGenUpdatePending = true; 721 virtio Kick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);744 virtioNudgeGuest(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig); 722 745 } 723 746 } … … 748 771 return VINF_SUCCESS; 749 772 } 750 751 773 752 774 /** API Function: See header file */ … … 797 819 { 798 820 PVIRTIOSGSEG pSeg; 799 800 821 /* 801 822 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking … … 804 825 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify. 805 826 */ 806 if (cSegsIn + cSegsOut >= VIRTQ_SIZE)827 if (cSegsIn + cSegsOut >= pVirtq->uQueueSize) 807 828 { 808 829 static volatile uint32_t s_cMessages = 0; … … 823 844 if (desc.fFlags & VIRTQ_DESC_F_WRITE) 824 845 { 825 Log6Func(("%s IN idx=% u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));846 Log6Func(("%s IN idx=%-4u seg=%-3u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb)); 826 847 cbIn += desc.cb; 827 848 pSeg = &paSegsIn[cSegsIn++]; … … 829 850 else 830 851 { 831 Log6Func(("%s OUT desc_idx=% u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));852 Log6Func(("%s OUT desc_idx=%-4u seg=%-3u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb)); 832 853 cbOut += desc.cb; 833 854 pSeg = &paSegsOut[cSegsOut++]; … … 840 861 #endif 841 862 } 842 843 863 pSeg->GCPhys = desc.GCPhysBuf; 844 864 pSeg->cbSeg = desc.cb; 845 846 865 uDescIdx = desc.uDescIdxNext; 847 866 } while (desc.fFlags & VIRTQ_DESC_F_NEXT); … … 915 934 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE); 916 935 917 Log6Func((" Copying device data to %s (%s guest), desc chain idx %d\n",918 VIRTQNAME(pVirtio, uVirtq), pVirt io->fLegacyDriver ? "legacy" : "modern", virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));919 920 /* Copy s/g buf (virtual memory) to guest phys mem ( INdirection). */936 Log6Func((" Copying device data to %s, [desc:%u → used ring:%u]\n", 937 VIRTQNAME(pVirtio, uVirtq), pVirtqBuf->uHeadIdx, pVirtq->uUsedIdxShadow)); 938 939 /* Copy s/g buf (virtual memory) to guest phys mem (VirtIO "IN" direction). */ 921 940 922 941 size_t cbCopy = 0, cbTotal = 0, cbRemain = 0; … … 944 963 } 945 964 946 /* If this write-ahead crosses threshold where the driver wants to get an event, flag it*/965 /* Flag if write-ahead crosses threshold where guest driver indicated it wants event notification */ 947 966 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX) 948 967 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)) … … 951 970 /* 952 971 * Place used buffer's descriptor in used ring but don't update used ring's slot index. 953 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync() */ 972 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync() 973 */ 954 974 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbTotal); 955 975 956 if (pSgVirtReturn) 957 Log6Func((" ... %d segs, %zu bytes, copied to %u byte buf. residual: %zu bytes\n", 958 pSgVirtReturn->cSegs, cbTotal - cbRemain, pVirtqBuf->cbPhysReturn, pVirtqBuf->cbPhysReturn - cbTotal)); 959 960 Log6Func((" %s used_idx=%u\n", VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq))); 976 #ifdef LOG_ENABLED 977 if (LogIs6Enabled() && pSgVirtReturn) 978 { 979 980 LogFunc((" ... %d segs, %zu bytes, copied to %u byte buf@offset=%u. Residual: %zu bytes\n", 981 pSgVirtReturn->cSegs, cbTotal - cbRemain, pVirtqBuf->cbPhysReturn, 982 ((virtioCoreGCPhysChainCalcBufSize(pVirtqBuf->pSgPhysReturn) - 983 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)) - (cbTotal - cbRemain)), 984 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn) )); 985 986 uint16_t uPending = virtioCoreR3CountPendingBufs( 987 virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq), 988 pVirtq->uUsedIdxShadow, pVirtq->uQueueSize); 989 990 LogFunc((" %u used buf%s not synced in %s\n", uPending, uPending == 1 ? "" : "s ", 991 VIRTQNAME(pVirtio, uVirtq))); 992 } 993 #endif 994 return VINF_SUCCESS; 995 } 996 997 /** API function: See Header file */ 998 int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, 999 size_t cb, void const *pv, PVIRTQBUF pVirtqBuf, uint32_t cbEnqueue, bool fFence) 1000 { 1001 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues)); 1002 Assert(pv); 1003 1004 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 1005 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn; 1006 1007 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC); 1008 Assert(pVirtqBuf->cRefs > 0); 1009 1010 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE); 1011 1012 Log6Func((" Copying device data to %s, [desc chain head idx:%u]\n", 1013 VIRTQNAME(pVirtio, uVirtq), pVirtqBuf->uHeadIdx)); 1014 1015 /* 1016 * Convert virtual memory simple buffer to guest physical memory (VirtIO descriptor chain) 1017 */ 1018 uint8_t *pvBuf = (uint8_t *)pv; 1019 size_t cbRemain = cb, cbCopy = 0; 1020 while (cbRemain) 1021 { 1022 cbCopy = RT_MIN(pSgPhysReturn->cbSegLeft, cbRemain); 1023 Assert(cbCopy > 0); 1024 virtioCoreGCPhysWrite(pVirtio, pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pvBuf, cbCopy); 1025 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy); 1026 pvBuf += cbCopy; 1027 cbRemain -= cbCopy; 1028 } 1029 LogFunc((" ...%zu bytes, copied to %u byte buf@offset=%u. Residual: %zu bytes\n", 1030 cb , pVirtqBuf->cbPhysReturn, 1031 ((virtioCoreGCPhysChainCalcBufSize(pVirtqBuf->pSgPhysReturn) - 1032 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)) - cb), 1033 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn))); 1034 1035 if (cbEnqueue) 1036 { 1037 if (fFence) 1038 { 1039 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */ 1040 Assert(!(cbCopy >> 32)); 1041 } 1042 /* Flag if write-ahead crosses threshold where guest driver indicated it wants event notification */ 1043 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX) 1044 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)) 1045 pVirtq->fUsedRingEvent = true; 1046 /* 1047 * Place used buffer's descriptor in used ring but don't update used ring's slot index. 1048 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync() 1049 */ 1050 Log6Func((" Enqueue desc chain head idx %u to %s used ring @ %u\n", pVirtqBuf->uHeadIdx, 1051 VIRTQNAME(pVirtio, uVirtq), pVirtq->uUsedIdxShadow)); 1052 1053 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, cbEnqueue); 1054 1055 #ifdef LOG_ENABLED 1056 if (LogIs6Enabled()) 1057 { 1058 uint16_t uPending = virtioCoreR3CountPendingBufs( 1059 virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq), 1060 pVirtq->uUsedIdxShadow, pVirtq->uQueueSize); 1061 1062 LogFunc((" %u used buf%s not synced in %s\n", 1063 uPending, uPending == 1 ? "" : "s ", VIRTQNAME(pVirtio, uVirtq))); 1064 } 1065 #endif 1066 } /* fEnqueue */ 961 1067 962 1068 return VINF_SUCCESS; … … 976 1082 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE); 977 1083 978 Log6Func((" %s ++used_idx=%u\n", pVirtq->szName, pVirtq->uUsedIdxShadow)); 1084 Log6Func((" Sync %s used ring (%u → idx)\n", 1085 pVirtq->szName, pVirtq->uUsedIdxShadow)); 979 1086 980 1087 virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow); … … 998 1105 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC); 999 1106 1000 /* See VirtIO 1.0, section 4.1.5.2 It implies that uVirtq and uNotifyIdx should match. 1001 * Disregarding this notification may cause throughput to stop, however there's no way to know 1002 * which was queue was intended for wake-up if the two parameters disagree. */ 1003 1107 /* VirtIO 1.0, section 4.1.5.2 implies uVirtq and uNotifyIdx should match. Disregarding any of 1108 * these notifications (if those indicies disagree) may break device/driver synchronization, 1109 * causing eternal throughput starvation, yet there's no specified way to disambiguate 1110 * which queue to wake-up in any awkward situation where the two parameters differ. 1111 */ 1004 1112 AssertMsg(uNotifyIdx == uVirtq, 1005 1113 ("Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n", … … 1010 1118 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 1011 1119 1012 Log6Func(("%s (desc chains: %u)\n", pVirtq->szName,1120 Log6Func(("%s: (desc chains: %u)\n", pVirtq->szName ? pVirtq->szName : "?UNAMED QUEUE?", 1013 1121 virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq))); 1014 1122 … … 1048 1156 pVirtq->szName, (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))); 1049 1157 #endif 1050 virtio Kick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);1158 virtioNudgeGuest(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector); 1051 1159 pVirtq->fUsedRingEvent = false; 1052 1160 return; … … 1062 1170 if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT)) 1063 1171 { 1064 virtio Kick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);1172 virtioNudgeGuest(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector); 1065 1173 return; 1066 1174 } … … 1077 1185 * @param uVec MSI-X vector, if enabled 1078 1186 */ 1079 static int virtio Kick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixVector)1187 static int virtioNudgeGuest(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixVector) 1080 1188 { 1081 1189 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT) 1082 Log6Func((" reason:buffer added to 'used' ring.\n"));1190 Log6Func(("Reason for interrupt - buffer added to 'used' ring.\n")); 1083 1191 else 1084 1192 if (uCause == VIRTIO_ISR_DEVICE_CONFIG) 1085 Log6Func((" reason:device config change\n"));1193 Log6Func(("Reason for interrupt - device config change\n")); 1086 1194 1087 1195 if (!pVirtio->fMsiSupport) … … 1132 1240 { 1133 1241 LogFunc(("Resetting device VirtIO state\n")); 1134 pVirtio->fLegacyDriver = 1; /* Assume this.Cleared if VIRTIO_F_VERSION_1 feature ack'd */1242 pVirtio->fLegacyDriver = pVirtio->fOfferLegacy; /* Cleared if VIRTIO_F_VERSION_1 feature ack'd */ 1135 1243 pVirtio->uDeviceFeaturesSelect = 0; 1136 1244 pVirtio->uDriverFeaturesSelect = 0; … … 1168 1276 } 1169 1277 #endif /* IN_RING3 */ 1278 1279 /* 1280 * Determines whether guest virtio driver is modern or legacy and does callback 1281 * informing device-specific code that feature negotiation is complete. 1282 * Should be called only once (coordinated via the 'toggle' flag) 1283 */ 1284 #ifdef IN_RING3 1285 DECLINLINE(void) virtioR3DoFeaturesCompleteOnceOnly(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC) 1286 { 1287 if (pVirtio->uDriverFeatures & VIRTIO_F_VERSION_1) 1288 { 1289 LogFunc(("VIRTIO_F_VERSION_1 feature ack'd by guest\n")); 1290 pVirtio->fLegacyDriver = 0; 1291 } 1292 else 1293 { 1294 if (pVirtio->fOfferLegacy) 1295 { 1296 pVirtio->fLegacyDriver = 1; 1297 LogFunc(("VIRTIO_F_VERSION_1 feature was NOT set by guest\n")); 1298 } 1299 else 1300 AssertMsgFailed(("Guest didn't accept VIRTIO_F_VERSION_1, but fLegacyOffered flag not set.\n")); 1301 } 1302 pVirtioCC->pfnFeatureNegotiationComplete(pVirtio, pVirtio->uDriverFeatures, pVirtio->fLegacyDriver); 1303 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_COMPLETE_HANDLED; 1304 } 1305 #endif 1170 1306 1171 1307 /** … … 1228 1364 case 0: 1229 1365 memcpy(&pVirtio->uDriverFeatures, pv, cb); 1366 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_0_WRITTEN; 1367 LogFunc(("Set DRIVER_FEATURES_0_WRITTEN. pVirtio->fDriverFeaturesWritten=%d\n", pVirtio->fDriverFeaturesWritten)); 1368 if ( (pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_0_AND_1_WRITTEN) == DRIVER_FEATURES_0_AND_1_WRITTEN 1369 && !(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED)) 1370 #ifdef IN_RING0 1371 return VINF_IOM_R3_MMIO_WRITE; 1372 #endif 1373 #ifdef IN_RING3 1374 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC); 1375 #endif 1230 1376 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess); 1231 1377 break; 1232 1378 case 1: 1233 1379 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb); 1234 if (pVirtio->uDriverFeatures & VIRTIO_F_VERSION_1) 1235 { 1380 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_1_WRITTEN; 1381 LogFunc(("Set DRIVER_FEATURES_1_WRITTEN. pVirtio->fDriverFeaturesWritten=%d\n", pVirtio->fDriverFeaturesWritten)); 1382 if ( (pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_0_AND_1_WRITTEN) == DRIVER_FEATURES_0_AND_1_WRITTEN 1383 && !(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED)) 1236 1384 #ifdef IN_RING0 1237 1385 return VINF_IOM_R3_MMIO_WRITE; 1238 1386 #endif 1239 1387 #ifdef IN_RING3 1240 pVirtio->fLegacyDriver = 0; 1241 pVirtioCC->pfnGuestVersionHandler(pVirtio, 1 /* fModern */); 1242 #endif 1243 } 1388 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC); 1389 #endif 1244 1390 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t)); 1245 1391 break; … … 1389 1535 * This I/O handler exists only to handle access from legacy drivers. 1390 1536 */ 1391 1392 1537 static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb) 1393 1538 { 1394 1395 1539 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE); 1396 1540 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a); 1397 1541 1398 1542 RT_NOREF(pvUser); 1543 Log(("%-23s: Port read at offset=%RTiop, cb=%#x%s", 1544 __FUNCTION__, offPort, cb, 1545 VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort) ? "" : "\n")); 1399 1546 1400 1547 void *pv = pu32; /* To use existing macros */ … … 1412 1559 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) 1413 1560 { 1414 uint32_t val = pVirtio->uDriverFeatures & 0xffffffff;1561 uint32_t val = pVirtio->uDriverFeatures & UINT32_C(0xffffffff); 1415 1562 memcpy(pu32, &val, cb); 1416 1563 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort); … … 1420 1567 { 1421 1568 *(uint8_t *)pu32 = pVirtio->fDeviceStatus; 1422 1423 1569 if (LogIs7Enabled()) 1424 1570 { … … 1435 1581 pVirtio->uISR = 0; 1436 1582 virtioLowerInterrupt( pDevIns, 0); 1437 Log((" ISR read and cleared\n"));1583 Log((" (ISR read and cleared)\n")); 1438 1584 } 1439 1585 else … … 1482 1628 return rc; 1483 1629 } 1484 1485 1630 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a); 1486 1631 return VINF_SUCCESS; 1487 1632 } 1488 1489 1633 1490 1634 /** … … 1504 1648 int fWrite = 1; /* To use existing macros */ 1505 1649 1506 // LogFunc(("Write to port offset=%RTiop, cb=%#x, u32=%#x\n",offPort, cb, u32));1650 Log(("%-23s: Port written at offset=%RTiop, cb=%#x, u32=%#x\n", __FUNCTION__, offPort, cb, u32)); 1507 1651 1508 1652 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort)) … … 1535 1679 pVirtio->uDriverFeatures &= VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED; 1536 1680 } 1681 if (!((pVirtio->fDriverFeaturesWritten ^= 1) & 1)) 1682 { 1683 #ifdef IN_RING0 1684 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__)); 1685 return VINF_IOM_R3_MMIO_WRITE; 1686 #endif 1687 #ifdef IN_RING3 1688 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC); 1689 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC); 1690 #endif 1691 } 1537 1692 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort); 1538 1693 } … … 1556 1711 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut)); 1557 1712 } 1558 1559 1713 if (fDriverStateImproved || fDriverInitiatedReset) 1560 1714 { … … 1693 1847 1694 1848 /* 1695 * A dditionally, anytime any part of the device-specific configuration (which our client maintains)1696 * is READ it needs to be checked to see if it changed since the last time any part was read, in1697 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)1849 * Anytime any part of the dev-specific dev config (which this virtio core implementation sees 1850 * as a blob, and virtio dev-specific code separates into fields) is READ, it must be compared 1851 * for deltas from previous read to maintain a config gen. seq. counter (VirtIO 1.0, section 4.1.4.3.1) 1698 1852 */ 1699 1853 bool fDevSpecificFieldChanged = RT_BOOL(memcmp(pVirtioCC->pbDevSpecificCfg + uOffset, … … 1706 1860 { 1707 1861 ++pVirtio->uConfigGeneration; 1708 Log6Func(("Bumped cfg. generation to %d because %s%s\n", 1709 pVirtio->uConfigGeneration, 1862 Log6Func(("Bumped cfg. generation to %d because %s%s\n", pVirtio->uConfigGeneration, 1710 1863 fDevSpecificFieldChanged ? "<dev cfg changed> " : "", 1711 1864 pVirtio->fGenUpdatePending ? "<update was pending>" : "")); … … 1769 1922 #else 1770 1923 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a); 1924 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__)); 1771 1925 return VINF_IOM_R3_MMIO_WRITE; 1772 1926 #endif … … 1819 1973 if (uAddress == pVirtio->uPciCfgDataOff) 1820 1974 { 1821 /* 1822 * VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability 1823 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space 1824 * (the virtio_pci_cfg_cap capability), and access data items. 1825 * This is used by BIOS to gain early boot access to the the storage device. 1826 */ 1975 /* See comments in PCI Cfg capability initialization (in capabilities setup section of this code) */ 1827 1976 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap; 1828 1977 uint32_t uLength = pPciCap->uLength; … … 1863 2012 if (uAddress == pVirtio->uPciCfgDataOff) 1864 2013 { 1865 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability 1866 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space 1867 * (the virtio_pci_cfg_cap capability), and access data items. 1868 * This is used by BIOS to gain early boot access to the the storage device.*/ 1869 2014 /* See comments in PCI Cfg capability initialization (in capabilities setup section of this code) */ 1870 2015 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap; 1871 2016 uint32_t uLength = pPciCap->uLength; … … 1889 2034 1890 2035 /********************************************************************************************************************************* 1891 * Saved state .*2036 * Saved state (SSM) * 1892 2037 *********************************************************************************************************************************/ 2038 2039 2040 /** 2041 * Loads a saved device state (called from device-specific code on SSM final pass) 2042 * 2043 * @param pVirtio Pointer to the shared virtio state. 2044 * @param pHlp The ring-3 device helpers. 2045 * @param pSSM The saved state handle. 2046 * @returns VBox status code. 2047 */ 2048 int virtioCoreR3LegacyDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, 2049 PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uVirtioLegacy_3_1_Beta) 2050 { 2051 int rc; 2052 uint32_t uDriverFeaturesLegacy32bit; 2053 2054 rc = pHlp->pfnSSMGetU32( pSSM, &uDriverFeaturesLegacy32bit); 2055 AssertRCReturn(rc, rc); 2056 pVirtio->uDriverFeatures = (uint64_t)uDriverFeaturesLegacy32bit; 2057 2058 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect); 2059 AssertRCReturn(rc, rc); 2060 2061 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus); 2062 AssertRCReturn(rc, rc); 2063 2064 char szOut[80] = { 0 }; 2065 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut)); 2066 Log(("Loaded legacy device status = (%s)\n", szOut)); 2067 2068 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR); 2069 AssertRCReturn(rc, rc); 2070 2071 uint32_t cQueues = 3; /* Thes constant default value copied from earliest v0.9 code */ 2072 if (uVersion > uVirtioLegacy_3_1_Beta) 2073 { 2074 rc = pHlp->pfnSSMGetU32(pSSM, &cQueues); 2075 AssertRCReturn(rc, rc); 2076 } 2077 2078 AssertLogRelMsgReturn(cQueues <= VIRTQ_MAX_COUNT, ("%#x\n", cQueues), VERR_SSM_LOAD_CONFIG_MISMATCH); 2079 AssertLogRelMsgReturn(pVirtio->uVirtqSelect < cQueues || (cQueues == 0 && pVirtio->uVirtqSelect), 2080 ("uVirtqSelect=%u cQueues=%u\n", pVirtio->uVirtqSelect, cQueues), 2081 VERR_SSM_LOAD_CONFIG_MISMATCH); 2082 2083 Log(("\nRestoring %d legacy-only virtio-net device queues from saved state:\n", cQueues)); 2084 for (unsigned uVirtq = 0; uVirtq < cQueues; uVirtq++) 2085 { 2086 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq]; 2087 2088 if (uVirtq == cQueues - 1) 2089 RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-ctrlq"); 2090 else if (uVirtq % 2) 2091 RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-xmitq<%d>", uVirtq / 2); 2092 else 2093 RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-recvq<%d>", uVirtq / 2); 2094 2095 rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uQueueSize); 2096 AssertRCReturn(rc, rc); 2097 2098 uint32_t uVirtqPfn; 2099 rc = pHlp->pfnSSMGetU32(pSSM, &uVirtqPfn); 2100 AssertRCReturn(rc, rc); 2101 2102 rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uAvailIdxShadow); 2103 AssertRCReturn(rc, rc); 2104 2105 rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uUsedIdxShadow); 2106 AssertRCReturn(rc, rc); 2107 2108 if (uVirtqPfn) 2109 { 2110 pVirtq->GCPhysVirtqDesc = (uint64_t)uVirtqPfn * VIRTIO_PAGE_SIZE; 2111 pVirtq->GCPhysVirtqAvail = pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * pVirtq->uQueueSize; 2112 pVirtq->GCPhysVirtqUsed = 2113 RT_ALIGN(pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]), VIRTIO_PAGE_SIZE); 2114 pVirtq->uEnable = 1; 2115 } 2116 else 2117 { 2118 LogFunc(("WARNING: QUEUE \"%s\" PAGE NUMBER ZERO IN SAVED STATE\n", pVirtq->szName)); 2119 pVirtq->uEnable = 0; 2120 } 2121 pVirtq->uNotifyOffset = 0; /* unused in legacy mode */ 2122 pVirtq->uMsixVector = 0; /* unused in legacy mode */ 2123 } 2124 pVirtio->fGenUpdatePending = 0; /* unused in legacy mode */ 2125 pVirtio->uConfigGeneration = 0; /* unused in legacy mode */ 2126 pVirtio->uPciCfgDataOff = 0; /* unused in legacy mode (port I/O used instead) */ 2127 2128 return VINF_SUCCESS; 2129 } 2130 2131 /** 2132 * Loads a saved device state (called from device-specific code on SSM final pass) 2133 * 2134 * Note: This loads state saved by a Modern (VirtIO 1.0+) device, of which this transitional device is one, 2135 * and thus supports both legacy and modern guest virtio drivers. 2136 * 2137 * @param pVirtio Pointer to the shared virtio state. 2138 * @param pHlp The ring-3 device helpers. 2139 * @param pSSM The saved state handle. 2140 * @returns VBox status code. 2141 */ 2142 int virtioCoreR3ModernDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uTestVersion, uint32_t cQueues) 2143 { 2144 RT_NOREF2(cQueues, uVersion); 2145 LogFunc(("\n")); 2146 /* 2147 * Check the marker and (embedded) version number. 2148 */ 2149 uint64_t uMarker = 0; 2150 int rc; 2151 2152 rc = pHlp->pfnSSMGetU64(pSSM, &uMarker); 2153 AssertRCReturn(rc, rc); 2154 if (uMarker != VIRTIO_SAVEDSTATE_MARKER) 2155 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS, 2156 N_("Expected marker value %#RX64 found %#RX64 instead"), 2157 VIRTIO_SAVEDSTATE_MARKER, uMarker); 2158 uint32_t uVersionSaved = 0; 2159 rc = pHlp->pfnSSMGetU32(pSSM, &uVersionSaved); 2160 AssertRCReturn(rc, rc); 2161 if (uVersionSaved != uTestVersion) 2162 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS, 2163 N_("Unsupported virtio version: %u"), uVersionSaved); 2164 /* 2165 * Load the state. 2166 */ 2167 rc = pHlp->pfnSSMGetU32( pSSM, &pVirtio->fLegacyDriver); 2168 AssertRCReturn(rc, rc); 2169 rc = pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending); 2170 AssertRCReturn(rc, rc); 2171 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus); 2172 AssertRCReturn(rc, rc); 2173 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uConfigGeneration); 2174 AssertRCReturn(rc, rc); 2175 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uPciCfgDataOff); 2176 AssertRCReturn(rc, rc); 2177 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR); 2178 AssertRCReturn(rc, rc); 2179 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect); 2180 AssertRCReturn(rc, rc); 2181 rc = pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDeviceFeaturesSelect); 2182 AssertRCReturn(rc, rc); 2183 rc = pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDriverFeaturesSelect); 2184 AssertRCReturn(rc, rc); 2185 rc = pHlp->pfnSSMGetU64( pSSM, &pVirtio->uDriverFeatures); 2186 AssertRCReturn(rc, rc); 2187 2188 /** @todo Adapt this loop use cQueues argument instead of static queue count (safely with SSM versioning) */ 2189 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++) 2190 { 2191 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i]; 2192 rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc); 2193 AssertRCReturn(rc, rc); 2194 rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail); 2195 AssertRCReturn(rc, rc); 2196 rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed); 2197 AssertRCReturn(rc, rc); 2198 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uNotifyOffset); 2199 AssertRCReturn(rc, rc); 2200 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsixVector); 2201 AssertRCReturn(rc, rc); 2202 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uEnable); 2203 AssertRCReturn(rc, rc); 2204 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uQueueSize); 2205 AssertRCReturn(rc, rc); 2206 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uAvailIdxShadow); 2207 AssertRCReturn(rc, rc); 2208 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uUsedIdxShadow); 2209 AssertRCReturn(rc, rc); 2210 rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName, sizeof(pVirtq->szName)); 2211 AssertRCReturn(rc, rc); 2212 } 2213 return VINF_SUCCESS; 2214 } 1893 2215 1894 2216 /** … … 1900 2222 * @returns VBox status code. 1901 2223 */ 1902 int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM) 1903 { 2224 int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t cQueues) 2225 { 2226 RT_NOREF(cQueues); 2227 /** @todo figure out a way to save cQueues (with SSM versioning) */ 2228 1904 2229 LogFunc(("\n")); 1905 2230 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER); 1906 pHlp->pfnSSMPutU32(pSSM, VIRTIO_SAVEDSTATE_VERSION); 1907 2231 pHlp->pfnSSMPutU32(pSSM, uVersion); 2232 2233 pHlp->pfnSSMPutU32( pSSM, pVirtio->fLegacyDriver); 1908 2234 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending); 1909 2235 pHlp->pfnSSMPutU8( pSSM, pVirtio->fDeviceStatus); … … 1932 2258 AssertRCReturn(rc, rc); 1933 2259 } 1934 1935 return VINF_SUCCESS;1936 }1937 1938 /**1939 * Called from the FNSSMDEVLOADEXEC function of the device.1940 *1941 * @param pVirtio Pointer to the shared virtio state.1942 * @param pHlp The ring-3 device helpers.1943 * @param pSSM The saved state handle.1944 * @returns VBox status code.1945 */1946 int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)1947 {1948 LogFunc(("\n"));1949 /*1950 * Check the marker and (embedded) version number.1951 */1952 uint64_t uMarker = 0;1953 int rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);1954 AssertRCReturn(rc, rc);1955 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)1956 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,1957 N_("Expected marker value %#RX64 found %#RX64 instead"),1958 VIRTIO_SAVEDSTATE_MARKER, uMarker);1959 uint32_t uVersion = 0;1960 rc = pHlp->pfnSSMGetU32(pSSM, &uVersion);1961 AssertRCReturn(rc, rc);1962 if (uVersion != VIRTIO_SAVEDSTATE_VERSION)1963 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,1964 N_("Unsupported virtio version: %u"), uVersion);1965 /*1966 * Load the state.1967 */1968 pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending);1969 pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus);1970 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uConfigGeneration);1971 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uPciCfgDataOff);1972 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR);1973 pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect);1974 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDeviceFeaturesSelect);1975 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDriverFeaturesSelect);1976 pHlp->pfnSSMGetU64( pSSM, &pVirtio->uDriverFeatures);1977 1978 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)1979 {1980 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];1981 1982 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc);1983 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail);1984 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed);1985 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uNotifyOffset);1986 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsixVector);1987 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uEnable);1988 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uQueueSize);1989 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uAvailIdxShadow);1990 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uUsedIdxShadow);1991 rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName, sizeof(pVirtq->szName));1992 AssertRCReturn(rc, rc);1993 }1994 1995 2260 return VINF_SUCCESS; 1996 2261 } … … 2002 2267 2003 2268 /** 2004 * This must be called by the client to handle VM state changes 2005 * after the client takes care of its device-specific tasks for the state change. 2006 * (i.e. Reset, suspend, power-off, resume) 2269 * This must be called by the client to handle VM state changes after the client takes care of its device-specific 2270 * tasks for the state change (i.e. reset, suspend, power-off, resume) 2007 2271 * 2008 2272 * @param pDevIns The device instance. … … 2057 2321 /** API Function: See header file */ 2058 2322 int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams, 2059 const char *pcszInstance, uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg) 2323 const char *pcszInstance, uint64_t fDevSpecificFeatures, uint32_t fOfferLegacy, 2324 void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg) 2060 2325 { 2061 2326 /* 2062 * The pVirtio state must be the first member of the shared device instance2063 * data, otherwise we cannot get our bearings in the PCI configurationcallbacks.2327 * Virtio state must be the first member of shared device instance data, 2328 * otherwise can't get our bearings in PCI config callbacks. 2064 2329 */ 2065 2330 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED); … … 2073 2338 AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER); 2074 2339 AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER); 2075 AssertReturn(pVirtioCC->pfn GuestVersionHandler, VERR_INVALID_POINTER);2340 AssertReturn(pVirtioCC->pfnFeatureNegotiationComplete, VERR_INVALID_POINTER); 2076 2341 AssertReturn(VIRTQ_SIZE > 0 && VIRTQ_SIZE <= 32768, VERR_OUT_OF_RANGE); /* VirtIO specification-defined limit */ 2077 2342 2078 2343 #if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed 2079 * Thelegacy MSI support has not been implemented yet2344 * VBox legacy MSI support has not been implemented yet 2080 2345 */ 2081 2346 # ifdef VBOX_WITH_MSI_DEVICES … … 2084 2349 #endif 2085 2350 2086 /* Tell the device-specific code that guest is in legacy mode (for now) */2087 pVirtioCC->pfnGuestVersionHandler(pVirtio, false /* fModern */);2088 2089 2351 /* 2090 * The host features offered include both device-specific features2091 * and reserved feature bits (device independent)2352 * Host features (presented as a smörgasbord for guest to select from) 2353 * include both dev-specific features & reserved dev-independent features (bitmask). 2092 2354 */ 2093 2355 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1 2094 2356 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED 2095 2357 | fDevSpecificFeatures; 2358 2359 pVirtio->fLegacyDriver = pVirtio->fOfferLegacy = fOfferLegacy; 2096 2360 2097 2361 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance); … … 2138 2402 uint32_t cbRegion = 0; 2139 2403 2140 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */ 2404 /* 2405 * Common capability (VirtIO 1.0, section 4.1.4.3) 2406 */ 2141 2407 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40]; 2142 2408 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG; … … 2152 2418 2153 2419 /* 2154 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based on the choice 2155 * of this implementation to make each queue's uNotifyOffset equal to (VirtqSelect) ordinal 2156 * value of the queue (different strategies are possible according to spec). 2420 * Notify capability (VirtIO 1.0, section 4.1.4.4). 2421 * 2422 * The size of the spec-defined subregion described by this VirtIO capability is 2423 * based-on the choice of this implementation to make the notification area of each 2424 * queue equal to queue's ordinal position (e.g. queue selector value). The VirtIO 2425 * specification leaves it up to implementation to define queue notification area layout. 2157 2426 */ 2158 2427 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext]; … … 2170 2439 pVirtioCC->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER; 2171 2440 2172 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)2441 /* ISR capability (VirtIO 1.0, section 4.1.4.5) 2173 2442 * 2174 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram2175 * of spec shows it as a 32-bit field with upper bits 'reserved'2176 * Will take spec's words more literally than the diagram for now.2443 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. The specification example/diagram 2444 * illustrates this capability as 32-bit field with upper bits 'reserved'. Those depictions 2445 * differ. The spec's wording, not the diagram, is seen to work in practice. 2177 2446 */ 2178 2447 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext]; … … 2189 2458 pVirtioCC->pIsrCap = pCfg; 2190 2459 2191 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7) 2192 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted 2193 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write 2194 * values from any region. NOTE: The linux driver not only doesn't use this feature, it will not 2195 * even list it as present if uLength isn't non-zero and also 4-byte-aligned as the linux driver is 2196 * initializing. 2460 /* PCI Cfg capability (VirtIO 1.0, section 4.1.4.7) 2461 * 2462 * This capability facilitates early-boot access to this device (BIOS). 2463 * This region isn't page-MMIO mapped. PCI configuration accesses are intercepted, 2464 * wherein uBar, uOffset and uLength are modulated by consumers to locate and read/write 2465 * values in any part of any region. (NOTE: Linux driver doesn't utilize this feature. 2466 * This capability only appears in lspci output on Linux if uLength is non-zero, 4-byte aligned, 2467 * during initialization of linux virtio driver). 2197 2468 */ 2198 2469 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData); … … 2211 2482 if (pVirtioCC->pbDevSpecificCfg) 2212 2483 { 2213 /* Device specific config capability (via VirtIO 1.0, section 4.1.4.6). 2484 /* Device-specific config capability (VirtIO 1.0, section 4.1.4.6). 2485 * 2214 2486 * Client defines the device-specific config struct and passes size to virtioCoreR3Init() 2215 * to inform this. */ 2487 * to inform this. 2488 */ 2216 2489 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext]; 2217 2490 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG; … … 2263 2536 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */ 2264 2537 2265 /* As a transitional device that supports legacy VirtIO drivers, this VirtIO device generic implementation presents 2266 * legacy driver interface in I/O space at BAR0. The following maps the common (e.g. device independent) 2267 * dev config area as well as device-specific dev config area (whose size is passed to init function of this VirtIO 2268 * generic device code) for access via Port I/O, since legacy drivers (e.g. pre VirtIO 1.0) don't use MMIO callbacks. 2269 * (See VirtIO 1.1, Section 4.1.4.8). 2270 */ 2271 rc = PDMDevHlpPCIIORegionCreateIo(pDevIns, VIRTIO_REGION_LEGACY_IO, sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T) + cbDevSpecificCfg, 2272 virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/, pVirtioCC->pcszPortIoName, 2273 NULL /*paExtDescs*/, &pVirtio->hLegacyIoPorts); 2274 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register legacy config in I/O space at BAR0 */"))); 2538 if (pVirtio->fOfferLegacy) 2539 { 2540 /* As a transitional device that supports legacy VirtIO drivers, this VirtIO device generic implementation presents 2541 * legacy driver interface in I/O space at BAR0. The following maps the common (e.g. device independent) 2542 * dev config area as well as device-specific dev config area (whose size is passed to init function of this VirtIO 2543 * generic device code) for access via Port I/O, since legacy drivers (e.g. pre VirtIO 1.0) don't use MMIO callbacks. 2544 * (See VirtIO 1.1, Section 4.1.4.8). 2545 */ 2546 rc = PDMDevHlpPCIIORegionCreateIo(pDevIns, VIRTIO_REGION_LEGACY_IO, sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T) + cbDevSpecificCfg, 2547 virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/, pVirtioCC->pcszPortIoName, 2548 NULL /*paExtDescs*/, &pVirtio->hLegacyIoPorts); 2549 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register legacy config in I/O space at BAR0 */"))); 2550 } 2275 2551 2276 2552 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the 2277 * 'unknown' device-specific capability without querying the capability to figure 2278 * out size, so pad with an extra page 2553 * 'unknown' device-specific capability without querying the capability to determine size, so pad w/extra page. 2279 2554 */ 2280 2555 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + VIRTIO_PAGE_SIZE, VIRTIO_PAGE_SIZE), … … 2304 2579 # endif /* VBOX_WITH_STATISTICS */ 2305 2580 2306 virtioResetDevice(pDevIns, pVirtio); /* Reset VirtIO specific state of device */2307 2308 2581 return VINF_SUCCESS; 2309 2582 } -
trunk/src/VBox/Devices/VirtIO/VirtioCore.h
r92091 r92939 36 36 # define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) do { } while (0) 37 37 #endif 38 39 /** Marks the start of the virtio saved state (just for sanity). */ 40 #define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff) 38 41 39 42 /** Pointer to the shared VirtIO state. */ … … 57 60 #define VIRTIO_PAGE_SIZE 4096 /**< Page size used by VirtIO specification */ 58 61 59 60 /* Note: The VirtIO specification, particularly rev. 0.95, and clarified in rev 1.0 for transitional devices, 61 says the page sized used for Queue Size calculations is usually 4096 bytes, but dependent on the 62 the transport. In an appendix of the 0.95 spec, the 'mmio device', which has not been 63 implemented by VBox legacy device in VirtualBox, says guest must report the page size. For now 64 will set page size to a static 4096 based on the original VBox legacy VirtIO implementation which 65 tied it to PAGE_SIZE which appears to work (or at least good enough for most practical purposes) */ 66 67 68 /** The following virtioCoreGCPhysChain*() functions mimic the functionality of the related RT s/g functions, 69 * except they work with the data type GCPhys rather than void * 62 /** 63 * @todo Move the following virtioCoreGCPhysChain*() functions mimic the functionality of the related 64 * into some VirtualBox source tree common location and out of this code. 65 * 66 * They behave identically to the S/G utilities in the RT library, except they work with that 67 * GCPhys data type specifically instead of void *, to avoid potentially disastrous mismatch 68 * between sizeof(void *) and sizeof(GCPhys). 69 * 70 70 */ 71 71 typedef struct VIRTIOSGSEG /**< An S/G entry */ … … 91 91 92 92 /** 93 * VirtIO buffers are descriptor chains (scatter-gather vectors). Each buffer is described 94 * by the index of its head descriptor, which in optionally chains to another descriptor 95 * and so on. 96 * 97 * Each descriptor, [len, GCPhys] pair in the chain represents either an OUT segment (e.g. guest-to-host) 98 * or an IN segment (host-to-guest). A VIRTQBUF is created and retured from a call to 99 * virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet(). That function consolodates 100 * the VirtIO descriptor chain into a representation, where pSgPhysSend is a GCPhys s/g buffer containing 101 * all of the OUT descriptors and pSgPhysReturn is a GCPhys s/g buffer containing all of IN descriptors 102 * to be filled with data on the host to return to theguest. 93 * VirtIO buffers are descriptor chains (e.g. scatter-gather vectors). A VirtIO buffer is referred to by the index 94 * of its head descriptor. Each descriptor optionally chains to another descriptor, and so on. 95 * 96 * For any given descriptor, each length and GCPhys pair in the chain represents either an OUT segment (e.g. guest-to-host) 97 * or an IN segment (host-to-guest). 98 * 99 * A VIRTQBUF is created and retured from a call to to either virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet(). 100 * 101 * Those functions consolidate the VirtIO descriptor chain into a single representation where: 102 * 103 * pSgPhysSend GCPhys s/g buffer containing all of the (VirtIO) OUT descriptors 104 * pSgPhysReturn GCPhys s/g buffer containing all of the (VirtIO) IN descriptors 105 * 106 * The OUT descriptors are data sent from guest to host (dev-specific commands and/or data) 107 * The IN are to be filled with data (converted to physical) on host, to be returned to guest 108 * 103 109 */ 104 110 typedef struct VIRTQBUF … … 166 172 static const VIRTIO_FEATURES_LIST s_aCoreFeatures[] = 167 173 { 174 { VIRTIO_F_VERSION_1, " VERSION_1 Guest driver supports VirtIO specification V1.0+ (e.g. \"modern\")\n" }, 175 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" }, 168 176 { VIRTIO_F_RING_INDIRECT_DESC, " RING_INDIRECT_DESC Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" }, 169 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },170 { VIRTIO_F_VERSION_1, " VERSION Used to detect legacy drivers.\n" },171 177 }; 172 173 178 174 179 #define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */ … … 202 207 kvirtIoVmStateChangedFor32BitHack = 0x7fffffff 203 208 } VIRTIOVMSTATECHANGED; 204 205 206 209 207 210 /** @def Virtio Device PCI Capabilities type codes */ … … 305 308 typedef struct VIRTQUEUE 306 309 { 307 RTGCPHYS GCPhysVirtqDesc; /**< (MMIO) PhysAdr per-Q desc structsGUEST */308 RTGCPHYS GCPhysVirtqAvail; /**< (MMIO) PhysAdr per-Q avail structsGUEST */309 RTGCPHYS GCPhysVirtqUsed; /**< (MMIO) PhysAdr per-Q used structsGUEST */310 uint16_t uMsixVector; /**< (MMIO) Per-queue vector for MSI-XGUEST */311 uint16_t uEnable; /**< (MMIO) Per-queue enableGUEST */312 uint16_t uNotifyOffset; /**< (MMIO) per-Q notify offsetHOST */313 uint16_t uQueueSize; /**< (MMIO) Per-queue sizeHOST/GUEST */310 RTGCPHYS GCPhysVirtqDesc; /**< (MMIO) Addr of virtq's desc ring GUEST */ 311 RTGCPHYS GCPhysVirtqAvail; /**< (MMIO) Addr of virtq's avail ring GUEST */ 312 RTGCPHYS GCPhysVirtqUsed; /**< (MMIO) Addr of virtq's used ring GUEST */ 313 uint16_t uMsixVector; /**< (MMIO) MSI-X vector GUEST */ 314 uint16_t uEnable; /**< (MMIO) Queue enable flag GUEST */ 315 uint16_t uNotifyOffset; /**< (MMIO) Notification offset for queue HOST */ 316 uint16_t uQueueSize; /**< (MMIO) Size of queue HOST/GUEST */ 314 317 uint16_t uAvailIdxShadow; /**< Consumer's position in avail ring */ 315 318 uint16_t uUsedIdxShadow; /**< Consumer's position in used ring */ … … 317 320 char szName[32]; /**< Dev-specific name of queue */ 318 321 bool fUsedRingEvent; /**< Flags if used idx to notify guest reached */ 319 uint8_t padding[3];322 bool fAttached; /**< Flags if dev-specific client attached */ 320 323 } VIRTQUEUE, *PVIRTQUEUE; 321 324 … … 331 334 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */ 332 335 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */ 336 uint32_t fDriverFeaturesWritten; /**< (MMIO) Host features complete tracking */ 333 337 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */ 334 338 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */ … … 343 347 uint8_t fMsiSupport; /**< Flag set if using MSI instead of ISR */ 344 348 uint16_t uVirtqSelect; /**< (MMIO) queue selector GUEST */ 345 uint32_t fLegacyDriver; /**< Set if guest driver < VirtIO 1.0 */ 349 uint32_t fLegacyDriver; /**< Set if guest drv < VirtIO 1.0 and allowed */ 350 uint32_t fOfferLegacy; /**< Set at init call from dev-specific code */ 346 351 347 352 /** @name The locations of the capability structures in PCI config space and the BAR. … … 354 359 /** @} */ 355 360 356 357 358 361 IOMMMIOHANDLE hMmioPciCap; /**< MMIO handle of PCI cap. region (\#2) */ 359 362 IOMIOPORTHANDLE hLegacyIoPorts; /**< Handle of legacy I/O port range. */ 360 361 363 362 364 #ifdef VBOX_WITH_STATISTICS … … 374 376 STAMPROFILEADV StatWriteRC; /** I/O port and MMIO R3 Write profiling */ 375 377 #endif 376 377 378 378 /** @} */ 379 379 380 } VIRTIOCORE; 380 381 … … 389 390 * @{ */ 390 391 /** 391 * Implementation-specific client callback to report VirtIO version as modern or legacy. 392 * That's the only meaningful distinction in the VirtIO specification. Beyond that 393 * versioning is loosely discernable through feature negotiation. There will be two callbacks, 394 * the first indicates the guest driver is considered legacy VirtIO, as it is critical to 395 * assume that initially. A 2nd callback will occur during feature negotiation 396 * which will indicate the guest is modern, if the guest acknowledges VIRTIO_F_VERSION_1, 397 * feature, or legacy if the feature isn't negotiated. That 2nd callback allows 398 * the device-specific code to configure its behavior in terms of both guest version and features. 392 * Implementation-specific client callback to report VirtIO when feature negotiation is 393 * complete. It should be invoked by the VirtIO core only once. 399 394 * 400 * @param pVirtio Pointer to the shared virtio state. 401 * @param fModern True if guest driver identified itself as modern (e.g. VirtIO 1.0 featured) 395 * @param pVirtio Pointer to the shared virtio state. 396 * @param fDriverFeatures Bitmask of features the guest driver has accepted/declined. 397 * @param fLegacy true if legacy mode offered and until guest driver identifies itself 398 * as modern(e.g. VirtIO 1.0 featured) 402 399 */ 403 DECLCALLBACKMEMBER(void, pfn GuestVersionHandler,(PVIRTIOCORE pVirtio, uint32_t fModern));400 DECLCALLBACKMEMBER(void, pfnFeatureNegotiationComplete, (PVIRTIOCORE pVirtio, uint64_t fDriverFeatures, uint32_t fLegacy)); 404 401 405 402 /** … … 436 433 DECLCALLBACKMEMBER(int, pfnDevCapWrite,(PPDMDEVINS pDevIns, uint32_t offCap, const void *pvBuf, uint32_t cbWrite)); 437 434 438 439 435 /** 440 436 * When guest-to-host queue notifications are enabled, the guest driver notifies the host … … 469 465 { 470 466 /** 471 * When guest-to-host queue notifications are enabled, the guest driver notifies the host 472 * that the avail queue has buffers, and this callback informs the client. 467 * This callback notifies the device-specific portion of this device implementation (if guest-to-host 468 * queue notifications are enabled), that the guest driver has notified the host (this device) 469 * that the VirtIO "avail" ring of a queue has some new s/g buffers added by the guest VirtIO driver. 473 470 * 474 471 * @param pVirtio Pointer to the shared virtio state. … … 488 485 } VIRTIOCORERC; 489 486 490 491 487 /** @typedef VIRTIOCORECC 492 488 * The instance data for the current context. */ 493 489 typedef CTX_SUFF(VIRTIOCORE) VIRTIOCORECC; 494 490 495 496 491 /** @name API for VirtIO parent device 497 492 * @{ */ … … 502 497 * This should be called from PDMDEVREGR3::pfnConstruct. 503 498 * 504 * @param pDevIns The device instance.499 * @param pDevIns Device instance. 505 500 * @param pVirtio Pointer to the shared virtio state. This 506 501 * must be the first member in the shared … … 519 514 int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, 520 515 PVIRTIOPCIPARAMS pPciParams, const char *pcszInstance, 521 uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg); 522 516 uint64_t fDevSpecificFeatures, uint32_t fOfferLegacy, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg); 523 517 /** 524 518 * Initiate orderly reset procedure. This is an exposed API for clients that might need it. … … 532 526 * 'Attaches' host device-specific implementation's queue state to host VirtIO core 533 527 * virtqueue management infrastructure, informing the virtio core of the name of the 534 * queue associated with the queue number. uVirtqNbr is used as the 'handle' for virt queues 535 * in this API (and is opaquely the index into the VirtIO core's array of queue state). 536 * 537 * Virtqueue numbers are VirtIO specification defined (i.e. they are unique within each 538 * VirtIO device type). 528 * queue to associate with the queue number. 529 530 * Note: uVirtqNbr (ordinal index) is used as the 'handle' for virtqs in this VirtioCore 531 * implementation's API (as an opaque selector into the VirtIO core's array of queues' states). 532 * 533 * Virtqueue numbers are actually VirtIO-specification defined device-specifically 534 * (i.e. they are unique within each VirtIO device type), but are in some cases scalable 535 * so only the pattern of queue numbers is defined by the spec and implementations may contain 536 * a self-determined plurality of queues. 539 537 * 540 538 * @param pVirtio Pointer to the shared virtio state. … … 547 545 548 546 /** 549 * Enables or disables a virtq 547 * Detaches host device-specific implementation's queue state from the host VirtIO core 548 * virtqueue management infrastructure, informing the VirtIO core that the queue is 549 * not utilized by the device-specific code. 550 550 * 551 551 * @param pVirtio Pointer to the shared virtio state. 552 552 * @param uVirtqNbr Virtq number 553 * @param fEnable Flags whether to enable or disable the virtq 554 * 555 */ 556 void virtioCoreVirtqEnable(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable); 553 * @param pcszName Name to give queue 554 * 555 * @returns VBox status code. 556 */ 557 int virtioCoreR3VirtqDetach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr); 558 559 /** 560 * Checks to see whether queue is attached to core. 561 * 562 * @param pVirtio Pointer to the shared virtio state. 563 * @param uVirtqNbr Virtq number 564 * 565 * Returns boolean true or false indicating whether dev-specific reflection 566 * of queue is attached to core. 567 */ 568 bool virtioCoreR3VirtqIsAttached(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr); 569 570 /** 571 * Checks to see whether queue is enabled. 572 * 573 * @param pVirtio Pointer to the shared virtio state. 574 * @param uVirtqNbr Virtq number 575 * 576 * Returns boolean true or false indicating core queue enable state. 577 * There is no API function to enable the queue, because the actual enabling is handled 578 * by the guest via MMIO. 579 * 580 * NOTE: Guest VirtIO driver's claim over this state is overridden (which violates VirtIO 1.0 spec 581 * in a carefully controlled manner) in the case where the queue MUST be disabled, due to observed 582 * control queue corruption (e.g. null GCPhys virtq base addr) while restoring legacy-only device's 583 * (DevVirtioNet.cpp) as a way to flag that the queue is unusable-as-saved and must to be removed. 584 * That is all handled in the load/save exec logic. Device reset could potentially, depending on 585 * parameters passed from host VirtIO device to guest VirtIO driver, result in guest re-establishing 586 * queue, except, in that situation, the queue operational state would be valid. 587 */ 588 bool virtioCoreR3VirtqIsEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr); 557 589 558 590 /** 559 591 * Enable or disable notification for the specified queue. 560 592 * 561 * With notification enabled, the guest driver notifies the host device (via MMIO 562 * to the queue notification offset describe in VirtIO 1.0, 4.1.4.4 "Notification Structure Layout") 563 * whenever the guest driver adds a new entry to the avail ring of the respective queue. 564 * 565 * Note: In the VirtIO world, the device sets flags in the used ring to communicate to the driver how to 566 * handle notifications for the avail ring and the drivers sets flags in the avail ring to communicate 567 * to the device how to handle sending interrupts for the used ring. 593 * When queue notifications are enabled, the guest VirtIO driver notifies host VirtIO device 594 * (via MMIO, see VirtIO 1.0, 4.1.4.4 "Notification Structure Layout") whenever guest driver adds 595 * a new s/g buffer to the "avail" ring of the queue. 596 * 597 * Note: VirtIO queue layout includes flags the device controls in "used" ring to inform guest 598 * driver if it should notify host of guest's buffer additions to the "avail" ring, and 599 * conversely, the guest driver sets flags in the "avail" ring to communicate to host device 600 * whether or not to interrupt guest when it adds buffers to used ring. 568 601 * 569 602 * @param pVirtio Pointer to the shared virtio state. … … 581 614 582 615 /** 583 * Displays the VirtIO spec-related features offered and their accepted/declined status 584 * by both the VirtIO core and dev-specific device code (which invokes this function). 585 * The result is a comprehensive list of available features the VirtIO specification 586 * defines, which ones were actually offered by the device, and which ones were accepted 587 * by the guest driver, thus providing a legible summary view of the configuration 588 * the device is operating with. 589 * 616 * Displays a well-formated human-readable translation of otherwise inscrutable bitmasks 617 * that embody features VirtIO specification definitions, indicating: Totality of features 618 * that can be implemented by host and guest, which features were offered by the host, and 619 * which were actually accepted by the guest. It displays it as a summary view of the device's 620 * finalized operational state (host-guest negotiated architecture) in such a way that shows 621 * which options are available for implementing or enabling. 622 * 623 * The non-device-specific VirtIO features list are managed by core API (e.g. implied). 624 * Only dev-specific features must be passed as parameter. 625 590 626 * @param pVirtio Pointer to the shared virtio state. 591 627 * @param pHlp Pointer to the debug info hlp struct 592 * @param s_aDevSpecificFeatures 593 * Features specification lists for device-specific implementation 594 * (i.e: net controller, scsi controller ...) 628 * @param s_aDevSpecificFeatures Dev-specific features (virtio-net, virtio-scsi...) 595 629 * @param cFeatures Number of features in aDevSpecificFeatures 596 630 */ … … 599 633 600 634 /* 601 * Debug ing assist feature displays the state of the VirtIO core code, which includes635 * Debug-assist utility function to display state of the VirtIO core code, including 602 636 * an overview of the state of all of the queues. 603 637 * … … 608 642 * 609 643 * This is implemented currently to be invoked by the inheriting device-specific code 610 * (see DevVirtioNet for an example, which receives the debugvm callback directly). 611 * DevVirtioNet lists the available sub-options if no arguments are provided. In that 644 * (see the the VirtualBox virtio-net (VirtIO network controller device implementation) 645 * for an example of code that receive debugvm callback directly). 646 * 647 * DevVirtioNet lists available sub-options if no arguments are provided. In that 612 648 * example this virtq info related function is invoked hierarchically when virtio-net 613 649 * displays its device-specific queue info. … … 629 665 630 666 /** 631 * This function is identical to virtioCoreR3VirtqAvailBufGet(), except it doesn't 'consume' 632 * the buffer from the avail ring of the virtq. The peek operation becomes identical to a get 633 * operation if virtioCoreR3VirtqAvailRingNext() is called to consume the buffer from the avail ring, 634 * at which point virtioCoreR3VirtqUsedBufPut() must be called to complete the roundtrip 635 * transaction by putting the descriptor on the used ring. 636 * 667 * This function is identical to virtioCoreR3VirtqAvailBufGet(), *except* it doesn't consume 668 * peeked buffer from avail ring of the virtq. The function *becomes* identical to the 669 * virtioCoreR3VirtqAvailBufGet() only if virtioCoreR3VirtqAvailRingNext() is invoked to 670 * consume buf from the queue's avail ring, followed by invocation of virtioCoreR3VirtqUsedBufPut(), 671 * to hand host-processed buffer back to guest, which completes guest-initiated virtq buffer circuit. 637 672 * 638 673 * @param pDevIns The device instance. … … 652 687 /** 653 688 * This function fetches the next buffer (descriptor chain) from the VirtIO "avail" ring of 654 * indicated queue, and convertsthe buf's s/g vectors into OUT (e.g. guest-to-host)689 * indicated queue, separating the buf's s/g vectors into OUT (e.g. guest-to-host) 655 690 * components and and IN (host-to-guest) components. 656 691 * 657 * The caller is responsible for GCPhys to host virtual memory conversions. If the692 * Caller is responsible for GCPhys to host virtual memory conversions. If the 658 693 * virtq buffer being peeked at is "consumed", virtioCoreR3VirtqAvailRingNext() must 659 * be called and in that casevirtioCoreR3VirtqUsedBufPut() must be called to660 * complete the roundtrip virtq transaction.694 * be called, and after that virtioCoreR3VirtqUsedBufPut() must be called to 695 * complete the buffer transfer cycle with the guest. 661 696 * 662 697 * @param pDevIns The device instance. … … 678 713 679 714 /** 680 * Fetches a specific descriptor chain using avail ring of indicated queue and converts the descriptor681 * chain into its OUT (to device) and IN (to guest) components.715 * Fetches a specific descriptor chain using avail ring of indicated queue and converts the 716 * descriptor chain into its OUT (to device) and IN (to guest) components. 682 717 * 683 718 * The caller is responsible for GCPhys to host virtual memory conversions and *must* … … 704 739 /** 705 740 * Returns data to the guest to complete a transaction initiated by virtioCoreR3VirtqAvailBufGet(), 706 * or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pairsto complete each707 * intervening a roundtrip transaction, ultimately putting each descriptor chain pulled from the708 * avail ring of a queue onto the used ring of the queue. wherein I/O transactions are always709 * initiated by the guest and completed by the host. In other words, for the host to send any710 * data to the guest, the guest must provide buffers, for the host to fill, via the avail ring711 * of the virtq.741 * (or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pair), to complete each 742 * buffer transfer transaction (guest-host buffer cycle), ultimately moving each descriptor chain 743 * from the avail ring of a queue onto the used ring of the queue. Note that VirtIO buffer 744 * transactions are *always* initiated by the guest and completed by the host. In other words, 745 * for the host to send any I/O related data to the guest (and in some cases configuration data), 746 * the guest must provide buffers via the virtq's avail ring, for the host to fill. 712 747 * 713 748 * At some some point virtioCoreR3VirtqUsedRingSync() must be called to return data to the guest, 714 * completing all pending virtioCoreR3VirtqAvailBufPut() transactions that have accumulated since 715 * the last call to virtioCoreR3VirtqUsedRingSync() 716 717 * @note This does a write-ahead to the used ring of the guest's queue. The data 718 * written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync() 719 * 749 * completing all pending virtioCoreR3VirtqAvailBufPut() operations that have accumulated since 750 * the last call to virtioCoreR3VirtqUsedRingSync(). 751 752 * @note This function effectively performs write-ahead to the used ring of the virtq. 753 * Data written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync() 720 754 * 721 755 * @param pDevIns The device instance (for reading). … … 729 763 * buffer originally pulled from the queue. 730 764 * 731 * @param fFence If true , put up copyfence (memory barrier) after765 * @param fFence If true (default), put up copy-fence (memory barrier) after 732 766 * copying to guest phys. mem. 733 767 * … … 741 775 */ 742 776 int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, PRTSGBUF pSgVirtReturn, 743 PVIRTQBUF pVirtqBuf, bool fFence); 777 PVIRTQBUF pVirtqBuf, bool fFence = true); 778 779 780 /** 781 * Quicker variant of same-named function (directly above) that it overloads, 782 * Instead, this variant accepts as input a pointer to a buffer and count, 783 * instead of S/G buffer thus doesn't have to copy between two S/G buffers and avoids some overhead. 784 * 785 * @param pDevIns The device instance (for reading). 786 * @param pVirtio Pointer to the shared virtio state. 787 * @param uVirtqNbr Virtq number 788 * @param cb Number of bytes to add to copy to phys. buf. 789 * @param pv Virtual mem buf to copy to phys buf. 790 * @param cbEnqueue How many bytes in packet to enqueue (0 = don't enqueue) 791 * @param fFence If true (default), put up copy-fence (memory barrier) after 792 * copying to guest phys. mem. 793 * 794 * @returns VBox status code. 795 * @retval VINF_SUCCESS Success 796 * @retval VERR_INVALID_STATE VirtIO not in ready state 797 * @retval VERR_NOT_AVAILABLE Virtq is empty 798 * 799 * @note This function will not release any reference to pVirtqBuf. The 800 * caller must take care of that. 801 */ 802 int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, size_t cb, const void *pv, 803 PVIRTQBUF pVirtqBuf, uint32_t cbEnqueue, bool fFence = true); 804 805 744 806 /** 745 807 * Advance index of avail ring to next entry in specified virtq (see virtioCoreR3VirtqAvailBufPeek()) … … 751 813 752 814 /** 753 * Checks to see if guest has ac knowledged device's VIRTIO_F_VERSION_1 feature.754 * If not, it's presumed to be a VirtIO legacy guest driver. Note that legacy drivers755 * may start using the device prematurely, as opposed to the rigorously sane protocol756 * prescribed by the "modern" VirtIO spec. Early access implies a legacy driver.757 * Therefore legacy mode is the assumption until feature negotiation.815 * Checks to see if guest has accepted host device's VIRTIO_F_VERSION_1 (i.e. "modern") 816 * behavioral modeling, indicating guest agreed to comply with the modern VirtIO 1.0+ specification. 817 * Otherwise unavoidable presumption is that the host device is dealing with legacy VirtIO 818 * guest drive, thus must be prepared to cope with less mature architecture and behaviors 819 * from prototype era of VirtIO. (see comments in PDM-invoked device constructor for more information). 758 820 * 759 821 * @param pVirtio Pointer to the virtio state. … … 761 823 int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio); 762 824 825 /** 826 * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers. 827 * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys 828 * access functions can't be used. The following wrappers select the memory access method based on whether the 829 * device is operating in legacy mode or not. 830 */ 831 DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite) 832 { 833 int rc; 834 if (virtioCoreIsLegacyMode(pVirtio)) 835 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite); 836 else 837 rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite); 838 return rc; 839 } 840 841 DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead) 842 { 843 int rc; 844 if (virtioCoreIsLegacyMode(pVirtio)) 845 rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead); 846 else 847 rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead); 848 return rc; 849 } 850 851 /* 852 * (See comments for corresponding function in sg.h) 853 */ 763 854 DECLINLINE(void) virtioCoreGCPhysChainInit(PVIRTIOSGBUF pGcSgBuf, PVIRTIOSGSEG paSegs, size_t cSegs) 764 855 { … … 782 873 } 783 874 875 /* 876 * (See comments for corresponding function in sg.h) 877 */ 784 878 DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGet(PVIRTIOSGBUF pGcSgBuf, size_t *pcbData) 785 879 { … … 826 920 } 827 921 922 /* 923 * (See comments for corresponding function in sg.h) 924 */ 828 925 DECLINLINE(void) virtioCoreGCPhysChainReset(PVIRTIOSGBUF pGcSgBuf) 829 926 { … … 843 940 } 844 941 942 /* 943 * (See comments for corresponding function in sg.h) 944 */ 845 945 DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainAdvance(PVIRTIOSGBUF pGcSgBuf, size_t cbAdvance) 846 946 { … … 860 960 } 861 961 962 /* 963 * (See comments for corresponding function in sg.h) 964 */ 862 965 DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGetNextSeg(PVIRTIOSGBUF pGcSgBuf, size_t *pcbSeg) 863 966 { … … 871 974 } 872 975 873 DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PVIRTIOSGBUF pGcSgBuf) 976 /** 977 * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment. 978 * 979 * @param pGcSgBuf Guest Context (GCPhys) S/G buffer to calculate length of 980 */ 981 DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf) 874 982 { 875 983 size_t cb = 0; 876 984 unsigned i = pGcSgBuf->cSegs; 877 while (i-- > 0) 878 cb += pGcSgBuf->paSegs[i].cbSeg; 879 return cb; 880 } 881 985 while (i-- > 0) 986 cb += pGcSgBuf->paSegs[i].cbSeg; 987 return cb; 988 } 989 990 /* 991 * (See comments for corresponding function in sg.h) 992 */ 993 DECLINLINE(size_t) virtioCoreGCPhysChainCalcLengthLeft(PVIRTIOSGBUF pGcSgBuf) 994 { 995 size_t cb = pGcSgBuf->cbSegLeft; 996 unsigned i = pGcSgBuf->cSegs; 997 while (i-- > pGcSgBuf->idxSeg + 1) 998 cb += pGcSgBuf->paSegs[i].cbSeg; 999 return cb; 1000 } 882 1001 #define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName) 883 1002 884 1003 /** 885 * Add some bytes to a virtq (s/g) buffer, converting them from virtual memory to GCPhys 886 * 887 * To be performant it is left to the caller to validate the size of the buffer with regard 888 * to data being pulled from it to avoid overruns/underruns. 1004 * Convert and append bytes from a virtual-memory simple buffer to VirtIO guest's 1005 * physical memory described by a buffer pulled form the avail ring of a virtq. 889 1006 * 890 1007 * @param pVirtio Pointer to the shared virtio state. 891 * @param pVirtqBuf output: virtq buffer1008 * @param pVirtqBuf VirtIO buffer to fill 892 1009 * @param pv input: virtual memory buffer to receive bytes 893 1010 * @param cb number of bytes to add to the s/g buffer. … … 895 1012 DECLINLINE(void) virtioCoreR3VirqBufFill(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb) 896 1013 { 897 uint8_t *pb = (uint8_t *)pv; 898 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysReturn, cb); 899 while (cbLim) 1014 uint8_t *pvBuf = (uint8_t *)pv; 1015 size_t cbRemain = cb, cbTotal = 0; 1016 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn; 1017 while (cbRemain) 900 1018 { 901 size_t cbSeg = cbLim; 902 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysReturn, &cbSeg); 903 PDMDevHlpPCIPhysWrite(pVirtio->pDevInsR3, GCPhys, pb, cbSeg); 904 pb += cbSeg; 905 cbLim -= cbSeg; 906 pVirtqBuf->cbPhysSend -= cbSeg; 1019 uint32_t cbBounded = RT_MIN(pSgPhysReturn->cbSegLeft, cbRemain); 1020 Assert(cbBounded > 0); 1021 virtioCoreGCPhysWrite(pVirtio, CTX_SUFF(pVirtio->pDevIns), (RTGCPHYS)pSgPhysReturn->GCPhysCur, pvBuf, cbBounded); 1022 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbBounded); 1023 pvBuf += cbBounded; 1024 cbRemain -= cbBounded; 1025 cbTotal += cbBounded; 907 1026 } 908 LogFunc(("Added %d/%d bytes to %s buffer, head idx: %u (%d bytes remain)\n", 909 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq), 910 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysReturn)); 911 } 912 913 /** 914 * Extract some bytes out of a virtq (s/g) buffer, converting them from GCPhys to virtual memory 915 * 916 * To be performant it is left to the caller to validate the size of the buffer with regard 917 * to data being pulled from it to avoid overruns/underruns. 1027 LogFunc(("Appended %d bytes to guest phys buf [head: %u]. %d bytes unused in buf.)\n", 1028 cbTotal, pVirtqBuf->uHeadIdx, virtioCoreGCPhysChainCalcLengthLeft(pSgPhysReturn))); 1029 } 1030 1031 /** 1032 * Extract some bytes from of a virtq s/g buffer, converting them from GCPhys space to 1033 * to ordinary virtual memory (i.e. making data directly accessible to host device code) 1034 * 1035 * As a performance optimization, it is left to the caller to validate buffer size. 918 1036 * 919 1037 * @param pVirtio Pointer to the shared virtio state. … … 937 1055 LogFunc(("Drained %d/%d bytes from %s buffer, head idx: %u (%d bytes left)\n", 938 1056 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq), 939 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysSend));1057 pVirtqBuf->uHeadIdx, virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn))); 940 1058 } 941 1059 … … 1016 1134 * VirtIO implementation to identify this device's operational configuration after features 1017 1135 * have been negotiated with guest VirtIO driver. Feature negotiation entails host indicating 1018 * to guest which features it supports, then guest accepting among those offeredwhich features1136 * to guest which features it supports, then guest accepting from among the offered, which features 1019 1137 * it will enable. That becomes the agreement between the host and guest. The bitmask containing 1020 1138 * virtio core features plus device-specific features is provided as a parameter to virtioCoreR3Init() … … 1031 1149 1032 1150 /** 1033 * Get the thename of the VM state change associated with the enumeration variable1151 * Get name of the VM state change associated with the enumeration variable 1034 1152 * 1035 1153 * @param enmState VM state (enumeration value) … … 1078 1196 /** 1079 1197 * Debug assist for any consumer device code 1080 &1081 1198 * Do a hex dump of memory in guest physical context 1082 1199 * … … 1093 1210 */ 1094 1211 1095 /**1096 * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment.1097 *1098 * @param pGcSgBuf Guest Context (GCPhys) S/G buffer to calculate length of1099 */1100 DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf)1101 {1102 size_t cb = 0;1103 unsigned i = pGcSgBuf->cSegs;1104 while (i-- > 0)1105 cb += pGcSgBuf->paSegs[i].cbSeg;1106 return cb;1107 }1108 1109 /**1110 * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers.1111 * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys1112 * access functions can't be used. The following wrappers select the mem access method based on whether the1113 * device is operating in legacy mode or not.1114 */1115 DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite)1116 {1117 int rc;1118 if (virtioCoreIsLegacyMode(pVirtio))1119 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);1120 else1121 rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);1122 return rc;1123 }1124 1125 DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)1126 {1127 int rc;1128 if (virtioCoreIsLegacyMode(pVirtio))1129 rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead);1130 else1131 rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead);1132 return rc;1133 }1134 1135 1212 /** Misc VM and PDM boilerplate */ 1136 int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM); 1137 int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM); 1213 int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t cQueues); 1214 int virtioCoreR3ModernDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uTestVersion, uint32_t cQueues); 1215 int virtioCoreR3LegacyDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uVirtioLegacy_3_1_Beta); 1138 1216 void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState); 1139 1217 void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC); … … 1147 1225 * cb, pv and fWrite are implicit parameters and must be defined by the invoker. 1148 1226 */ 1149 1150 1227 #ifdef LOG_ENABLED 1151 1228 … … 1201 1278 * the memory described by cb and pv. 1202 1279 * 1203 * cb, pv and fWrite are implicit parameters and must be defined by theinvoker.1280 * cb, pv and fWrite are implicit parameters and must be defined by invoker. 1204 1281 */ 1205 1282 #define VIRTIO_DEV_CONFIG_ACCESS(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \ … … 1216 1293 /** 1217 1294 * Copies bytes into memory described by cb, pv from the specified member field of the config struct. 1218 * The operation is a nop and logs error if implied parameter fWrite istrue.1295 * The operation is a NOP, logging an error if an implied parameter, fWrite, is boolean true. 1219 1296 * 1220 1297 * cb, pv and fWrite are implicit parameters and must be defined by the invoker. … … 1237 1314 * the memory described by cb and pv. 1238 1315 * 1239 * cb, pv and fWrite are implicit parameters and must be defined by theinvoker.1316 * cb, pv and fWrite are implicit parameters and must be defined by invoker. 1240 1317 */ 1241 1318 #define VIRTIO_DEV_CONFIG_ACCESS_INDEXED(member, uIdx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \ … … 1254 1331 * The operation is a nop and logs error if implied parameter fWrite is true. 1255 1332 * 1256 * cb, pv and fWrite are implicit parameters and must be defined by theinvoker.1333 * cb, pv and fWrite are implicit parameters and must be defined by invoker. 1257 1334 */ 1258 1335 #define VIRTIO_DEV_CONFIG_ACCESS_INDEXED_READONLY(member, uidx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
Note:
See TracChangeset
for help on using the changeset viewer.