- Timestamp:
- Jul 5, 2021 2:25:35 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 145521
- Location:
- trunk/src/VBox
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp
r89554 r90028 48 48 /** Enable the IOTLBE cache only in ring-3 for now, see @bugref{9654#c95}. */ 49 49 #ifdef IN_RING3 50 # define IOMMU_WITH_IOTLBE_CACHE 50 //# define IOMMU_WITH_IOTLBE_CACHE /* Disabled for now, see @bugref{9654#c107}. */ 51 51 #endif 52 52 /** Enable the interrupt cache. */ … … 219 219 # define IOMMU_UNLOCK(a_pDevIns, a_pThisCC) (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnUnlock((a_pDevIns)) 220 220 221 /** Gets the maximum valid IOVA for the given I/O page-table level. */ 222 #define IOMMU_GET_MAX_VALID_IOVA(a_Level) ((X86_PAGE_4K_SIZE << ((a_Level) * 9)) - 1) 223 221 224 222 225 /********************************************************************************************************************************* … … 353 356 bool volatile fCmdThreadSignaled; 354 357 /** Padding. */ 355 bool afPadding0[7]; 358 bool afPadding0[3]; 359 /** The IOMMU PCI address. */ 360 PCIBDF uPciAddress; 356 361 357 362 #ifdef IOMMU_WITH_DTE_CACHE … … 536 541 STAMCOUNTER StatIntrCacheHit; /**< Number of interrupt cache hits. */ 537 542 STAMCOUNTER StatIntrCacheMiss; /**< Number of interrupt cache misses. */ 543 544 STAMCOUNTER StatNonStdPageSize; /**< Number of non-standard page size translations. */ 545 STAMCOUNTER StatIopfs; /**< Number of I/O page faults. */ 538 546 /** @} */ 539 547 #endif … … 691 699 typedef DECLCALLBACKTYPE(int, FNIOPAGELOOKUP,(PPDMDEVINS pDevIns, uint64_t uIovaPage, uint8_t fPerm, PCIOMMUOPAUX pAux, 692 700 PIOPAGELOOKUP pPageLookup)); 693 typedef FNIOPAGELOOKUP 701 typedef FNIOPAGELOOKUP *PFNIOPAGELOOKUP; 694 702 695 703 … … 803 811 static bool iommuAmdLookupIsAccessContig(PCIOPAGELOOKUP pPageLookupPrev, PCIOPAGELOOKUP pPageLookup) 804 812 { 805 Assert(pPageLookupPrev->fPerm == pPageLookup->fPerm);806 813 size_t const cbPrev = RT_BIT_64(pPageLookupPrev->cShift); 807 814 RTGCPHYS const GCPhysPrev = pPageLookupPrev->GCPhysSpa; 808 815 RTGCPHYS const GCPhys = pPageLookup->GCPhysSpa; 809 #ifdef RT_STRICT 816 810 817 /* Paranoia: Ensure offset bits are 0. */ 811 { 812 uint64_t const fOffMaskPrev = X86_GET_PAGE_OFFSET_MASK(pPageLookupPrev->cShift); 813 uint64_t const fOffMask = X86_GET_PAGE_OFFSET_MASK(pPageLookup->cShift); 814 Assert(!(GCPhysPrev & fOffMaskPrev)); 815 Assert(!(GCPhys & fOffMask)); 816 } 817 #endif 818 Assert(!(GCPhysPrev & X86_GET_PAGE_OFFSET_MASK(pPageLookupPrev->cShift))); 819 Assert(!(GCPhys & X86_GET_PAGE_OFFSET_MASK(pPageLookup->cShift))); 820 821 /* Paranoia: Ensure permissions are identical. */ 822 Assert(pPageLookupPrev->fPerm == pPageLookup->fPerm); 823 818 824 return GCPhysPrev + cbPrev == GCPhys; 819 825 } 820 826 821 827 828 #ifdef IOMMU_WITH_DTE_CACHE 822 829 /** 823 830 * Gets the basic I/O device flags for the given device table entry. … … 860 867 return fFlags; 861 868 } 869 #endif 862 870 863 871 … … 1081 1089 AssertPtr(pArgs->pIommuR3); 1082 1090 AssertPtr(pArgs->pHlp); 1083 //Assert(pArgs->pIommu CC->u32Magic == IOMMU_MAGIC);1091 //Assert(pArgs->pIommuR3->u32Magic == IOMMU_MAGIC); 1084 1092 1085 1093 uint16_t const idDomain = IOMMU_IOTLB_KEY_GET_DOMAIN_ID(pNode->Key); … … 1137 1145 1138 1146 /** 1147 * Destroys an IOTLB entry that's in the tree. 1148 * 1149 * @returns VINF_SUCCESS. 1150 * @param pNode Pointer to an IOTLBE. 1151 * @param pvUser Opaque data. Currently not used, will be NULL. 1152 */ 1153 static DECLCALLBACK(int) iommuAmdIotlbEntryDestroy(PAVLU64NODECORE pNode, void *pvUser) 1154 { 1155 RT_NOREF(pvUser); 1156 PIOTLBE pIotlbe = (PIOTLBE)pNode; 1157 Assert(pIotlbe); 1158 pIotlbe->NdLru.pNext = NULL; 1159 pIotlbe->NdLru.pPrev = NULL; 1160 RT_ZERO(pIotlbe->PageLookup); 1161 pIotlbe->fEvictPending = false; 1162 return VINF_SUCCESS; 1163 } 1164 1165 1166 /** 1139 1167 * Inserts an IOTLB entry into the cache. 1140 1168 * … … 1306 1334 if (pThisR3->cCachedIotlbes > 0) 1307 1335 { 1308 size_t const cbIotlbes = sizeof(IOTLBE) * IOMMU_IOTLBE_MAX;1309 RT _BZERO(pThisR3->paIotlbes, cbIotlbes);1336 RTAvlU64Destroy(&pThisR3->TreeIotlbe, iommuAmdIotlbEntryDestroy, NULL /* pvParam */); 1337 RTListInit(&pThisR3->LstLruIotlbe); 1310 1338 pThisR3->idxUnusedIotlbe = 0; 1311 1339 pThisR3->cCachedIotlbes = 0; 1312 1340 STAM_COUNTER_RESET(&pThis->StatIotlbeCached); 1313 RTListInit(&pThisR3->LstLruIotlbe);1314 1341 } 1315 1342 … … 1409 1436 size_t cPages = cbIova / X86_PAGE_4K_SIZE; 1410 1437 cPages = RT_MIN(cPages, IOMMU_IOTLBE_MAX); 1438 1439 Assert((cbIova % X86_PAGE_4K_SIZE) == 0); 1440 Assert(cPages > 0); 1411 1441 1412 1442 IOMMU_CACHE_LOCK(pDevIns, pThis); … … 1556 1586 int rc = VINF_SUCCESS; 1557 1587 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1588 Assert(idDevice != pThis->uPciAddress); 1558 1589 IOMMU_CACHE_LOCK(pDevIns, pThis); 1559 1590 … … 2452 2483 pThis->EvtLogHeadPtr.au32[0] = offBuf; 2453 2484 2454 Log FlowFunc(("Set EvtLogHeadPtr to %#RX32\n", offBuf));2485 Log4Func(("Set EvtLogHeadPtr to %#RX32\n", offBuf)); 2455 2486 return VINF_SUCCESS; 2456 2487 } … … 2492 2523 pThis->EvtLogTailPtr.au32[0] = offBuf; 2493 2524 2494 Log FlowFunc(("Set EvtLogTailPtr to %#RX32\n", offBuf));2525 Log4Func(("Set EvtLogTailPtr to %#RX32\n", offBuf)); 2495 2526 return VINF_SUCCESS; 2496 2527 } … … 3318 3349 AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_IO_PAGE_FAULT_T)); 3319 3350 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIoPageFault; 3351 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 3352 STAM_COUNTER_INC(&pThis->StatIopfs); 3320 3353 3321 3354 #ifdef IOMMU_WITH_DTE_CACHE … … 3645 3678 IOMMUOP enmOp, PIOPAGELOOKUP pPageLookup) 3646 3679 { 3680 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 3647 3681 Assert(pDte->n.u1Valid); 3648 3682 Assert(!(uIova & X86_PAGE_4K_OFFSET_MASK)); … … 3650 3684 /* The virtual address bits indexing table. */ 3651 3685 static uint8_t const s_acIovaLevelShifts[] = { 0, 12, 21, 30, 39, 48, 57, 0 }; 3652 static uint64_t const s_auIovaLevelMasks[] = { UINT64_C(0x0000000000000000),3653 UINT64_C(0x00000000001ff000),3654 UINT64_C(0x000000003fe00000),3655 UINT64_C(0x0000007fc0000000),3656 UINT64_C(0x0000ff8000000000),3657 UINT64_C(0x01ff000000000000),3658 UINT64_C(0xfe00000000000000),3659 UINT64_C(0x0000000000000000) };3660 AssertCompile(RT_ELEMENTS(s_acIovaLevelShifts) == RT_ELEMENTS(s_auIovaLevelMasks));3661 3686 AssertCompile(RT_ELEMENTS(s_acIovaLevelShifts) > IOMMU_MAX_HOST_PT_LEVEL); 3662 3687 3663 /* Traverse the I/O page table starting with the page directory in the DTE. */ 3688 /* 3689 * Traverse the I/O page table starting with the page directory in the DTE. 3690 * 3691 * The Valid (Present bit), Translation Valid and Mode (Next-Level bits) in 3692 * the DTE have been validated already, see iommuAmdPreTranslateChecks. 3693 */ 3664 3694 IOPTENTITY_T PtEntity; 3665 3695 PtEntity.u64 = pDte->au64[0]; 3666 3696 for (;;) 3667 3697 { 3668 /* Figure out the system physical address of the page table at the current level. */3669 3698 uint8_t const uLevel = PtEntity.n.u3NextLevel; 3670 3699 … … 3676 3705 uint64_t const offPte = idxPte << 3; 3677 3706 RTGCPHYS const GCPhysPtEntity = (PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK) + offPte; 3678 int rc = PDMDevHlpP CIPhysRead(pDevIns, GCPhysPtEntity, &PtEntity.u64, sizeof(PtEntity));3707 int rc = PDMDevHlpPhysRead(pDevIns, GCPhysPtEntity, &PtEntity.u64, sizeof(PtEntity)); 3679 3708 if (RT_FAILURE(rc)) 3680 3709 { … … 3701 3730 } 3702 3731 3732 /* Validate the encoding of the next level. */ 3733 uint8_t const uNextLevel = PtEntity.n.u3NextLevel; 3734 #if IOMMU_MAX_HOST_PT_LEVEL < 6 3735 if (uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL) 3736 { /* likely */ } 3737 else 3738 { 3739 LogFunc(("Next-level/paging-mode field of the paging entity invalid. uNextLevel=%#x -> IOPF\n", uNextLevel)); 3740 EVT_IO_PAGE_FAULT_T EvtIoPageFault; 3741 iommuAmdIoPageFaultEventInit(idDevice, pDte->n.u16DomainId, uIova, true /* fPresent */, true /* fRsvdNotZero */, 3742 false /* fPermDenied */, enmOp, &EvtIoPageFault); 3743 iommuAmdIoPageFaultEventRaiseWithDte(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, 3744 kIoPageFaultType_PteInvalidLvlEncoding); 3745 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 3746 } 3747 #endif 3748 3749 /* Check reserved bits. */ 3750 uint64_t const fRsvdMask = uNextLevel == 0 || uNextLevel == 7 ? IOMMU_PTE_RSVD_MASK : IOMMU_PDE_RSVD_MASK; 3751 if (!(PtEntity.u64 & fRsvdMask)) 3752 { /* likely */ } 3753 else 3754 { 3755 LogFunc(("Page table entity (%#RX64 level=%u) reserved bits set -> IOPF\n", PtEntity.u64, uNextLevel)); 3756 EVT_IO_PAGE_FAULT_T EvtIoPageFault; 3757 iommuAmdIoPageFaultEventInit(idDevice, pDte->n.u16DomainId, uIova, true /* fPresent */, true /* fRsvdNotZero */, 3758 false /* fPermDenied */, enmOp, &EvtIoPageFault); 3759 iommuAmdIoPageFaultEventRaiseWithDte(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault, 3760 kIoPageFaultType_PteRsvdNotZero); 3761 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 3762 } 3763 3703 3764 /* Check permission bits. */ 3704 uint8_t const fPtePerm 3765 uint8_t const fPtePerm = (PtEntity.u64 >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK; 3705 3766 if ((fPerm & fPtePerm) == fPerm) 3706 3767 { /* likely */ } … … 3716 3777 } 3717 3778 3718 /* If this is a PTE, we're at the final level and we're done. */ 3719 uint8_t const uNextLevel = PtEntity.n.u3NextLevel; 3779 /* If the next level is 0 or 7, this is the final level PTE. */ 3720 3780 if (uNextLevel == 0) 3721 3781 { 3722 /* The page size of the translation is the default (4K). */ 3723 pPageLookup->GCPhysSpa = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK; 3724 pPageLookup->cShift = X86_PAGE_4K_SHIFT; 3782 /* The page size of the translation is the default size for the level. */ 3783 uint8_t const cShift = s_acIovaLevelShifts[uLevel]; 3784 RTGCPHYS const GCPhysPte = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK; 3785 pPageLookup->GCPhysSpa = GCPhysPte & X86_GET_PAGE_BASE_MASK(cShift); 3786 pPageLookup->cShift = cShift; 3725 3787 pPageLookup->fPerm = fPtePerm; 3726 3788 return VINF_SUCCESS; … … 3729 3791 { 3730 3792 /* The default page size of the translation is overridden. */ 3793 uint8_t cShift = X86_PAGE_4K_SHIFT; 3731 3794 RTGCPHYS const GCPhysPte = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK; 3732 uint8_t cShift = X86_PAGE_4K_SHIFT;3733 3795 while (GCPhysPte & RT_BIT_64(cShift++)) 3734 3796 ; 3735 3797 3736 3798 /* The page size must be larger than the default size and lower than the default size of the higher level. */ 3737 Assert(uLevel < IOMMU_MAX_HOST_PT_LEVEL); /* PTE at level 6 handled outside the loop, uLevel should be <= 5. */3738 3799 if ( cShift > s_acIovaLevelShifts[uLevel] 3739 3800 && cShift < s_acIovaLevelShifts[uLevel + 1]) 3740 3801 { 3741 pPageLookup->GCPhysSpa = GCPhysPte ;3802 pPageLookup->GCPhysSpa = GCPhysPte & X86_GET_PAGE_BASE_MASK(cShift); 3742 3803 pPageLookup->cShift = cShift; 3743 3804 pPageLookup->fPerm = fPtePerm; 3805 STAM_COUNTER_INC(&pThis->StatNonStdPageSize); 3744 3806 return VINF_SUCCESS; 3745 3807 } 3746 3808 3747 LogFunc(("Page size invalid cShift=% #x-> IOPF\n", cShift));3809 LogFunc(("Page size invalid cShift=%u -> IOPF\n", cShift)); 3748 3810 EVT_IO_PAGE_FAULT_T EvtIoPageFault; 3749 3811 iommuAmdIoPageFaultEventInit(idDevice, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */, … … 3753 3815 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 3754 3816 } 3755 3756 /* Validate the next level encoding of the PDE. */3757 #if IOMMU_MAX_HOST_PT_LEVEL < 63758 if (uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL)3759 { /* likely */ }3760 else3761 {3762 LogFunc(("Next level of PDE invalid uNextLevel=%#x -> IOPF\n", uNextLevel));3763 EVT_IO_PAGE_FAULT_T EvtIoPageFault;3764 iommuAmdIoPageFaultEventInit(idDevice, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,3765 false /* fPermDenied */, enmOp, &EvtIoPageFault);3766 iommuAmdIoPageFaultEventRaiseWithDte(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,3767 kIoPageFaultType_PteInvalidLvlEncoding);3768 return VERR_IOMMU_ADDR_TRANSLATION_FAILED;3769 }3770 #else3771 Assert(uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL);3772 #endif3773 3817 3774 3818 /* Validate level transition. */ … … 3786 3830 } 3787 3831 3788 /* Ensure IOVA bits of skipped levels are zero. */ 3789 Assert(uLevel > 0); 3790 uint64_t uIovaSkipMask = 0; 3791 for (unsigned idxLevel = uLevel - 1; idxLevel > uNextLevel; idxLevel--) 3792 uIovaSkipMask |= s_auIovaLevelMasks[idxLevel]; 3793 if (!(uIova & uIovaSkipMask)) 3832 /* Ensure IOVA bits of skipped levels (if any) are zero. */ 3833 uint64_t const fIovaSkipMask = IOMMU_GET_MAX_VALID_IOVA(uLevel - 1) - IOMMU_GET_MAX_VALID_IOVA(uNextLevel); 3834 if (!(uIova & fIovaSkipMask)) 3794 3835 { /* likely */ } 3795 3836 else 3796 3837 { 3797 LogFunc(("IOVA of skipped levels are not zero %#RX64 (SkipMask=%#RX64) -> IOPF\n", uIova, uIovaSkipMask));3838 LogFunc(("IOVA of skipped levels are not zero. uIova=%#RX64 fSkipMask=%#RX64 -> IOPF\n", uIova, fIovaSkipMask)); 3798 3839 EVT_IO_PAGE_FAULT_T EvtIoPageFault; 3799 3840 iommuAmdIoPageFaultEventInit(idDevice, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */, … … 3804 3845 } 3805 3846 3806 /* Continue with traversing the page directory at thislevel. */3847 /* Traverse to the next level. */ 3807 3848 } 3808 3849 } … … 3886 3927 uint64_t const offMask = X86_GET_PAGE_OFFSET_MASK(PageLookup.cShift); 3887 3928 uint64_t const offSpa = uIova & offMask; 3888 Assert(!(PageLookup.GCPhysSpa & offMask)); 3929 AssertMsg(!(PageLookup.GCPhysSpa & offMask), ("GCPhysSpa=%#RX64 offMask=%#RX64\n", 3930 PageLookup.GCPhysSpa, offMask)); 3889 3931 GCPhysSpa = PageLookup.GCPhysSpa | offSpa; 3890 3932 } … … 3908 3950 cbRemaining -= (cbPage - offIova); /* Calculate how much more we need to access. */ 3909 3951 uIovaPage += cbPage; /* Update address of the next access. */ 3910 offIova = 0; /* After first page, all pages are accessed from off0. */3952 offIova = 0; /* After first page, remaining pages are accessed from offset 0. */ 3911 3953 } 3912 3954 else … … 3925 3967 pAddrOut->fPerm = PageLookupPrev.fPerm; /* Update the allowed permissions for this access. */ 3926 3968 if (pcbPages) 3927 *pcbPages = cbPages; /* Update the size of the pages accessed. */3969 *pcbPages = cbPages; /* Update the size (in bytes) of the pages accessed. */ 3928 3970 return rc; 3929 3971 } … … 4749 4791 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 4750 4792 4793 /* If this MSI was generated by the IOMMU itself, it's not subject to remapping, see @bugref{9654#c104}. */ 4794 if (idDevice == pThis->uPciAddress) 4795 return VERR_IOMMU_CANNOT_CALL_SELF; 4796 4751 4797 /* Interrupts are forwarded with remapping when the IOMMU is disabled. */ 4752 4798 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrlUnlocked(pThis); … … 5193 5239 { 5194 5240 RT_NOREF(pThread); 5195 Log FlowFunc(("\n"));5241 Log4Func(("\n")); 5196 5242 PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 5197 5243 return PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtCmdThread); … … 6787 6833 { 6788 6834 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns); 6789 RT_NOREF(pCfg); 6790 6791 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 6792 PIOMMUR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUR3); 6835 6836 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 6837 PIOMMUR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUR3); 6838 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3; 6839 6793 6840 pThis->u32Magic = IOMMU_MAGIC; 6794 6841 pThisR3->pDevInsR3 = pDevIns; 6795 6842 6796 6843 LogFlowFunc(("iInstance=%d\n", iInstance)); 6844 6845 /* 6846 * Validate and read the configuration. 6847 */ 6848 PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns, "PCIAddress", ""); 6849 int rc = pHlp->pfnCFGMQueryU32Def(pCfg, "PCIAddress", &pThis->uPciAddress, NIL_PCIBDF); 6850 if (RT_FAILURE(rc)) 6851 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed to query 32-bit integer \"PCIAddress\"")); 6852 if (!PCIBDF_IS_VALID(pThis->uPciAddress)) 6853 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed \"PCIAddress\" of the AMD IOMMU cannot be invalid")); 6797 6854 6798 6855 /* … … 6806 6863 IommuReg.pfnMsiRemap = iommuAmdMsiRemap; 6807 6864 IommuReg.u32TheEnd = PDM_IOMMUREGCC_VERSION; 6808 intrc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisR3->CTX_SUFF(pIommuHlp), &pThis->idxIommu);6865 rc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisR3->CTX_SUFF(pIommuHlp), &pThis->idxIommu); 6809 6866 if (RT_FAILURE(rc)) 6810 6867 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to register ourselves as an IOMMU device")); … … 7025 7082 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntrCacheHit, STAMTYPE_COUNTER, "Interrupt/CacheHit", STAMUNIT_OCCURENCES, "Number of cache hits."); 7026 7083 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntrCacheMiss, STAMTYPE_COUNTER, "Interrupt/CacheMiss", STAMUNIT_OCCURENCES, "Number of cache misses."); 7084 7085 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatNonStdPageSize, STAMTYPE_COUNTER, "MemAccess/NonStdPageSize", STAMUNIT_OCCURENCES, "Number of non-standard page size translations."); 7086 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIopfs, STAMTYPE_COUNTER, "MemAccess/IOPFs", STAMUNIT_OCCURENCES, "Number of I/O page faults."); 7027 7087 # endif 7028 7088 -
trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp
r89953 r90028 1603 1603 InsertConfigNode(pInst, "Config", &pCfg); 1604 1604 hrc = pBusMgr->assignPCIDevice("iommu-amd", pInst); H(); 1605 1606 /* The AMD IOMMU device needs to know which PCI slot it's in, see @bugref{9654#c104}. */ 1607 { 1608 PCIBusAddress Address; 1609 if (pBusMgr->findPCIAddress("iommu-amd", 0, Address)) 1610 { 1611 uint32_t const u32IommuAddress = (Address.miDevice << 16) | Address.miFn; 1612 InsertConfigInteger(pCfg, "PCIAddress", u32IommuAddress); 1613 } 1614 else 1615 return VMR3SetError(pUVM, VERR_INVALID_PARAMETER, RT_SRC_POS, 1616 N_("Failed to find PCI address of the assigned IOMMU device!")); 1617 } 1605 1618 1606 1619 PCIBusAddress PCIAddr = PCIBusAddress((int32_t)uIoApicPciAddress); -
trunk/src/VBox/VMM/VMMAll/PDMAllIommu.cpp
r88638 r90028 133 133 { 134 134 /** @todo Handle strict return codes from PGMPhysRead. */ 135 rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysRead(pDevIns, GCPhysOut, pvBuf, cb Read, fFlags);135 rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysRead(pDevIns, GCPhysOut, pvBuf, cbContig, fFlags); 136 136 if (RT_SUCCESS(rc)) 137 137 { … … 201 201 { 202 202 /** @todo Handle strict return codes from PGMPhysWrite. */ 203 rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysWrite(pDevIns, GCPhysOut, pvBuf, cb Write, fFlags);203 rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysWrite(pDevIns, GCPhysOut, pvBuf, cbContig, fFlags); 204 204 if (RT_SUCCESS(rc)) 205 205 {
Note:
See TracChangeset
for help on using the changeset viewer.