Changeset 87714 in vbox
- Timestamp:
- Feb 11, 2021 7:12:58 AM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 142758
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp
r87713 r87714 204 204 205 205 /** 206 * I/O page walk result.207 */ 208 typedef struct IO WALKRESULT206 * I/O page lookup. 207 */ 208 typedef struct IOPAGELOOKUP 209 209 { 210 210 /** The translated system physical address. */ … … 214 214 /** The I/O permissions for this translation, see IOMMU_IO_PERM_XXX. */ 215 215 uint8_t fPerm; 216 } IO WALKRESULT;217 /** Pointer to an I/O walk result struct. */218 typedef IO WALKRESULT *PIOWALKRESULT;219 /** Pointer to a const I/O walk result struct. */220 typedef IO WALKRESULT const *PCIOWALKRESULT;216 } IOPAGELOOKUP; 217 /** Pointer to an I/O page lookup. */ 218 typedef IOPAGELOOKUP *PIOPAGELOOKUP; 219 /** Pointer to a const I/O page lookup. */ 220 typedef IOPAGELOOKUP const *PCIOPAGELOOKUP; 221 221 222 222 /** … … 248 248 /** The least recently used (LRU) list node. */ 249 249 RTLISTNODE NdLru; 250 /** The I/O walk resultof the translation. */251 IO WALKRESULT WalkResult;250 /** The I/O page lookup of the translation. */ 251 IOPAGELOOKUP PageLookup; 252 252 /** Whether the entry needs to be evicted from the cache. */ 253 253 bool fEvictPending; … … 456 456 STAMCOUNTER StatIotlbeCacheMiss; /**< Number of IOTLB cache misses. */ 457 457 STAMCOUNTER StatIotlbeLazyEvictReuse; /**< Number of IOTLB entries re-used after lazy eviction. */ 458 STAMPROFILEADV StatIotlbeLookup; /**< Profiling of IOTLB entry lookup ( cached). */458 STAMPROFILEADV StatIotlbeLookup; /**< Profiling of IOTLB entry lookup (from cache). */ 459 459 460 460 STAMCOUNTER StatDteLookupNonContig; /**< Number of DTE lookups that result in non-contiguous regions. */ 461 STAMPROFILEADV StatIoPageWalkLookup; /**< Profiling of I/O page walk ( uncached). */461 STAMPROFILEADV StatIoPageWalkLookup; /**< Profiling of I/O page walk (from memory). */ 462 462 /** @} */ 463 463 #endif … … 691 691 692 692 /** 693 * Checks whether two consecutive I/O page walkresults translates to a physically693 * Checks whether two consecutive I/O page lookup results translates to a physically 694 694 * contiguous region. 695 695 * 696 696 * @returns @c true if they are contiguous, @c false otherwise. 697 * @param p WalkResultPrev The I/O walkresult of the previous page.698 * @param p WalkResult The I/O walkresult of the current page.699 */ 700 static bool iommuAmdLookupIsAccessContig(PCIO WALKRESULT pWalkResultPrev, PCIOWALKRESULT pWalkResult)701 { 702 Assert(p WalkResultPrev->fPerm == pWalkResult->fPerm);703 size_t const cbPrev = RT_BIT_64(p WalkResultPrev->cShift);704 RTGCPHYS const GCPhysPrev = p WalkResultPrev->GCPhysSpa;705 RTGCPHYS const GCPhys = p WalkResult->GCPhysSpa;706 uint64_t const offMaskPrev = IOMMU_GET_PAGE_OFF_MASK(p WalkResultPrev->cShift);707 uint64_t const offMask = IOMMU_GET_PAGE_OFF_MASK(p WalkResult->cShift);697 * @param pPageLookupPrev The I/O page lookup result of the previous page. 698 * @param pPageLookup The I/O page lookup result of the current page. 699 */ 700 static bool iommuAmdLookupIsAccessContig(PCIOPAGELOOKUP pPageLookupPrev, PCIOPAGELOOKUP pPageLookup) 701 { 702 Assert(pPageLookupPrev->fPerm == pPageLookup->fPerm); 703 size_t const cbPrev = RT_BIT_64(pPageLookupPrev->cShift); 704 RTGCPHYS const GCPhysPrev = pPageLookupPrev->GCPhysSpa; 705 RTGCPHYS const GCPhys = pPageLookup->GCPhysSpa; 706 uint64_t const offMaskPrev = IOMMU_GET_PAGE_OFF_MASK(pPageLookupPrev->cShift); 707 uint64_t const offMask = IOMMU_GET_PAGE_OFF_MASK(pPageLookup->cShift); 708 708 709 709 /* Paranoia: Ensure offset bits are 0. */ … … 808 808 AVLU64KEY const uKey = pIotlbe->Core.Key; 809 809 uint64_t const uIova = IOMMU_IOTLB_KEY_GET_IOVA(uKey); 810 RTGCPHYS const GCPhysSpa = pIotlbe-> WalkResult.GCPhysSpa;811 uint8_t const cShift = pIotlbe-> WalkResult.cShift;810 RTGCPHYS const GCPhysSpa = pIotlbe->PageLookup.GCPhysSpa; 811 uint8_t const cShift = pIotlbe->PageLookup.cShift; 812 812 size_t const cbPage = RT_BIT_64(cShift); 813 uint8_t const fPerm = pIotlbe-> WalkResult.fPerm;813 uint8_t const fPerm = pIotlbe->PageLookup.fPerm; 814 814 const char *pszPerm = iommuAmdMemAccessGetPermName(fPerm); 815 815 bool const fEvictPending = pIotlbe->fEvictPending; … … 862 862 * @param uDomainId The domain ID. 863 863 * @param uIova The I/O virtual address. 864 * @param p WalkResult The I/O page walkresult of the access.864 * @param pPageLookup The I/O page lookup result of the access. 865 865 */ 866 866 static void iommuAmdIotlbEntryInsert(PIOMMU pThis, PIOTLBE pIotlbe, uint16_t uDomainId, uint64_t uIova, 867 PCIO WALKRESULT pWalkResult)867 PCIOPAGELOOKUP pPageLookup) 868 868 { 869 869 /* Initialize the IOTLB entry with results of the I/O page walk. */ 870 870 pIotlbe->Core.Key = IOMMU_IOTLB_KEY_MAKE(uDomainId, uIova); 871 pIotlbe-> WalkResult = *pWalkResult;871 pIotlbe->PageLookup = *pPageLookup; 872 872 873 873 /* Validate. */ … … 894 894 STAM_COUNTER_INC(&pThis->StatIotlbeLazyEvictReuse); 895 895 } 896 Assert(pFound-> WalkResult.cShift == pWalkResult->cShift);897 pFound-> WalkResult.fPerm = pWalkResult->fPerm;898 pFound-> WalkResult.GCPhysSpa = pWalkResult->GCPhysSpa;896 Assert(pFound->PageLookup.cShift == pPageLookup->cShift); 897 pFound->PageLookup.fPerm = pPageLookup->fPerm; 898 pFound->PageLookup.GCPhysSpa = pPageLookup->GCPhysSpa; 899 899 } 900 900 } … … 918 918 919 919 RT_ZERO(pIotlbe->Core); 920 RT_ZERO(pIotlbe-> WalkResult);920 RT_ZERO(pIotlbe->PageLookup); 921 921 /* We must not erase the LRU node connections here! */ 922 922 pIotlbe->fEvictPending = false; … … 962 962 * @param uDomainId The domain ID. 963 963 * @param uIova The I/O virtual address. 964 * @param p WalkResult The I/O page walkresult of the access.965 */ 966 static void iommuAmdIotlbAdd(PIOMMU pThis, uint16_t uDomainId, uint64_t uIova, PCIO WALKRESULT pWalkResult)964 * @param pPageLookup The I/O page lookup result of the access. 965 */ 966 static void iommuAmdIotlbAdd(PIOMMU pThis, uint16_t uDomainId, uint64_t uIova, PCIOPAGELOOKUP pPageLookup) 967 967 { 968 968 Assert(!(uIova & X86_PAGE_4K_OFFSET_MASK)); 969 Assert(p WalkResult);970 Assert(p WalkResult->cShift <= 31);971 Assert(p WalkResult->fPerm != IOMMU_IO_PERM_NONE);969 Assert(pPageLookup); 970 Assert(pPageLookup->cShift <= 31); 971 Assert(pPageLookup->fPerm != IOMMU_IO_PERM_NONE); 972 972 973 973 /* … … 986 986 987 987 /* Initialize and insert the IOTLB entry into the cache. */ 988 iommuAmdIotlbEntryInsert(pThis, pIotlbe, uDomainId, uIova, p WalkResult);988 iommuAmdIotlbEntryInsert(pThis, pIotlbe, uDomainId, uIova, pPageLookup); 989 989 990 990 /* Move the entry to the most recently used slot. */ … … 998 998 999 999 /* Initialize and insert the IOTLB entry into the cache. */ 1000 iommuAmdIotlbEntryInsert(pThis, pIotlbe, uDomainId, uIova, p WalkResult);1000 iommuAmdIotlbEntryInsert(pThis, pIotlbe, uDomainId, uIova, pPageLookup); 1001 1001 1002 1002 /* Add the entry to the most recently used slot. */ … … 1090 1090 1091 1091 /** 1092 * Adds or updates an IOTLB entry for the given I/O page walk result.1092 * Adds or updates IOTLB entries for the given range of I/O virtual addresses. 1093 1093 * 1094 1094 * @param pDevIns The IOMMU instance data. … … 1099 1099 * @param fPerm The I/O permissions for the access, see IOMMU_IO_PERM_XXX. 1100 1100 */ 1101 static void iommuAmdIotlb Update(PPDMDEVINS pDevIns, uint16_t uDomainId, uint64_t uIova, size_t cbAccess, RTGCPHYS GCPhysSpa,1102 uint8_t fPerm)1101 static void iommuAmdIotlbAddRange(PPDMDEVINS pDevIns, uint16_t uDomainId, uint64_t uIova, size_t cbAccess, RTGCPHYS GCPhysSpa, 1102 uint8_t fPerm) 1103 1103 { 1104 1104 Assert(!(uIova & X86_PAGE_4K_OFFSET_MASK)); … … 1110 1110 1111 1111 /* Add IOTLB entries for every page in the access. */ 1112 IO WALKRESULT WalkResult;1113 RT_ZERO( WalkResult);1114 WalkResult.cShift = X86_PAGE_4K_SHIFT;1115 WalkResult.fPerm = fPerm;1116 WalkResult.GCPhysSpa = GCPhysSpa;1112 IOPAGELOOKUP PageLookup; 1113 RT_ZERO(PageLookup); 1114 PageLookup.cShift = X86_PAGE_4K_SHIFT; 1115 PageLookup.fPerm = fPerm; 1116 PageLookup.GCPhysSpa = GCPhysSpa; 1117 1117 1118 1118 size_t cPages = cbAccess / X86_PAGE_4K_SIZE; … … 1122 1122 do 1123 1123 { 1124 iommuAmdIotlbAdd(pThis, uDomainId, uIova, & WalkResult);1124 iommuAmdIotlbAdd(pThis, uDomainId, uIova, &PageLookup); 1125 1125 uIova += X86_PAGE_4K_SIZE; 1126 WalkResult.GCPhysSpa += X86_PAGE_4K_SIZE;1126 PageLookup.GCPhysSpa += X86_PAGE_4K_SIZE; 1127 1127 --cPages; 1128 1128 } while (cPages > 0); … … 3182 3182 * @param pDte The device table entry. 3183 3183 * @param enmOp The IOMMU operation being performed. 3184 * @param p WalkResult Where to store the results of the I/O page walk. Thisis3185 * only updated when VINF_SUCCESS is returned.3184 * @param pPageLookup Where to store the results of the I/O page lookup. This 3185 * is only updated when VINF_SUCCESS is returned. 3186 3186 * 3187 3187 * @thread Any. 3188 3188 */ 3189 3189 static int iommuAmdIoPageTableWalk(PPDMDEVINS pDevIns, uint16_t uDevId, uint64_t uIova, uint8_t fPerm, PCDTE_T pDte, 3190 IOMMUOP enmOp, PIO WALKRESULT pWalkResult)3190 IOMMUOP enmOp, PIOPAGELOOKUP pPageLookup) 3191 3191 { 3192 3192 Assert(pDte->n.u1Valid); … … 3266 3266 { 3267 3267 /* The page size of the translation is the default (4K). */ 3268 p WalkResult->GCPhysSpa = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK;3269 p WalkResult->cShift = X86_PAGE_4K_SHIFT;3270 p WalkResult->fPerm = fPtePerm;3268 pPageLookup->GCPhysSpa = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK; 3269 pPageLookup->cShift = X86_PAGE_4K_SHIFT; 3270 pPageLookup->fPerm = fPtePerm; 3271 3271 return VINF_SUCCESS; 3272 3272 } … … 3284 3284 && cShift < s_acIovaLevelShifts[uLevel + 1]) 3285 3285 { 3286 p WalkResult->GCPhysSpa = GCPhysPte;3287 p WalkResult->cShift = cShift;3288 p WalkResult->fPerm = fPtePerm;3286 pPageLookup->GCPhysSpa = GCPhysPte; 3287 pPageLookup->cShift = cShift; 3288 pPageLookup->fPerm = fPtePerm; 3289 3289 return VINF_SUCCESS; 3290 3290 } … … 3400 3400 uint64_t offIova = uIova & X86_PAGE_4K_OFFSET_MASK; 3401 3401 uint64_t cbPages = 0; 3402 IO WALKRESULT WalkResultPrev;3403 RT_ZERO( WalkResultPrev);3402 IOPAGELOOKUP PageLookupPrev; 3403 RT_ZERO(PageLookupPrev); 3404 3404 for (;;) 3405 3405 { 3406 3406 /** @todo split this into a separate function and reuse from 3407 3407 * iommuAmdCacheLookup(). */ 3408 IO WALKRESULT WalkResult;3409 RT_ZERO( WalkResult);3408 IOPAGELOOKUP PageLookup; 3409 RT_ZERO(PageLookup); 3410 3410 STAM_PROFILE_ADV_START(&pThis->StatIoPageWalkLookup, a); 3411 rc = iommuAmdIoPageTableWalk(pDevIns, uDevId, uIovaPage, fPerm, &Dte, enmOp, & WalkResult);3411 rc = iommuAmdIoPageTableWalk(pDevIns, uDevId, uIovaPage, fPerm, &Dte, enmOp, &PageLookup); 3412 3412 STAM_PROFILE_ADV_STOP(&pThis->StatIoPageWalkLookup, a); 3413 3413 if (RT_SUCCESS(rc)) 3414 3414 { 3415 3415 /* Store the translated address before continuing to access more pages. */ 3416 Assert( WalkResult.cShift >= X86_PAGE_4K_SHIFT);3416 Assert(PageLookup.cShift >= X86_PAGE_4K_SHIFT); 3417 3417 if (cbRemaining == cbAccess) 3418 3418 { 3419 uint64_t const offMask = IOMMU_GET_PAGE_OFF_MASK( WalkResult.cShift);3419 uint64_t const offMask = IOMMU_GET_PAGE_OFF_MASK(PageLookup.cShift); 3420 3420 uint64_t const offSpa = uIova & offMask; 3421 Assert(!( WalkResult.GCPhysSpa & offMask));3422 GCPhysSpa = WalkResult.GCPhysSpa | offSpa;3421 Assert(!(PageLookup.GCPhysSpa & offMask)); 3422 GCPhysSpa = PageLookup.GCPhysSpa | offSpa; 3423 3423 } 3424 3424 /* Check if addresses translated so far result in a physically contiguous region. */ 3425 else if (!iommuAmdLookupIsAccessContig(& WalkResultPrev, &WalkResult))3425 else if (!iommuAmdLookupIsAccessContig(&PageLookupPrev, &PageLookup)) 3426 3426 { 3427 3427 STAM_COUNTER_INC(&pThis->StatDteLookupNonContig); … … 3429 3429 } 3430 3430 3431 /* Store the walkresult from the first/previous page. */3432 WalkResultPrev = WalkResult;3431 /* Store the page lookup result from the first/previous page. */ 3432 PageLookupPrev = PageLookup; 3433 3433 3434 3434 /* Update size of all pages read thus far. */ 3435 uint64_t const cbPage = RT_BIT_64( WalkResult.cShift);3435 uint64_t const cbPage = RT_BIT_64(PageLookup.cShift); 3436 3436 cbPages += cbPage; 3437 3437 … … 3468 3468 3469 3469 /* Update IOTLB for the contiguous range of I/O virtual addresses. */ 3470 iommuAmdIotlb Update(pDevIns, Dte.n.u16DomainId, uIova & X86_PAGE_4K_BASE_MASK, cbPages,3471 GCPhysSpa & X86_PAGE_4K_BASE_MASK, WalkResultPrev.fPerm);3470 iommuAmdIotlbAddRange(pDevIns, Dte.n.u16DomainId, uIova & X86_PAGE_4K_BASE_MASK, cbPages, 3471 GCPhysSpa & X86_PAGE_4K_BASE_MASK, PageLookupPrev.fPerm); 3472 3472 } 3473 3473 #endif … … 3562 3562 { 3563 3563 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 3564 int rc = VERR_NOT_FOUND; 3565 3566 /* 3567 * We hold the cache lock across both the device and the IOTLB lookups (if any) because 3568 * we don't want the device cache to be invalidate while we perform IOTBL lookups. 3569 */ 3564 3570 IOMMU_LOCK_CACHE(pDevIns, pThis); 3565 3571 3566 3572 /* Lookup the device from the level 1 cache. */ 3567 int rc = VERR_NOT_FOUND;3568 3573 PCIODEVICE pDevice = &pThis->paDevices[uDevId]; 3569 3574 if ((pDevice->fFlags & (IOMMU_DEV_F_PRESENT | IOMMU_DEV_F_VALID | IOMMU_DEV_F_ADDR_TRANSLATE)) … … 3576 3581 uint64_t uIovaPage = uIova & X86_PAGE_4K_BASE_MASK; 3577 3582 uint64_t offIova = uIova & X86_PAGE_4K_OFFSET_MASK; 3578 IO WALKRESULT WalkResultPrev;3579 RT_ZERO( WalkResultPrev);3583 IOPAGELOOKUP PageLookupPrev; 3584 RT_ZERO(PageLookupPrev); 3580 3585 for (;;) 3581 3586 { … … 3585 3590 if (pIotlbe) 3586 3591 { 3587 PCIO WALKRESULT pWalkResult = &pIotlbe->WalkResult;3588 if ((p WalkResult->fPerm & fPerm) == fPerm)3592 PCIOPAGELOOKUP pPageLookup = &pIotlbe->PageLookup; 3593 if ((pPageLookup->fPerm & fPerm) == fPerm) 3589 3594 { /* likely */ } 3590 3595 else … … 3600 3605 3601 3606 /* Store the translated address before continuing to translate more pages. */ 3602 Assert(p WalkResult->cShift >= X86_PAGE_4K_SHIFT);3607 Assert(pPageLookup->cShift >= X86_PAGE_4K_SHIFT); 3603 3608 if (cbRemaining == cbAccess) 3604 3609 { 3605 uint64_t const offMask = IOMMU_GET_PAGE_OFF_MASK(p WalkResult->cShift);3610 uint64_t const offMask = IOMMU_GET_PAGE_OFF_MASK(pPageLookup->cShift); 3606 3611 uint64_t const offSpa = uIova & offMask; 3607 Assert(!(p WalkResult->GCPhysSpa & offMask));3608 GCPhysSpa = p WalkResult->GCPhysSpa | offSpa;3612 Assert(!(pPageLookup->GCPhysSpa & offMask)); 3613 GCPhysSpa = pPageLookup->GCPhysSpa | offSpa; 3609 3614 } 3610 3615 /* Check if addresses translated so far result in a physically contiguous region. */ 3611 else if (!iommuAmdLookupIsAccessContig(& WalkResultPrev, pWalkResult))3616 else if (!iommuAmdLookupIsAccessContig(&PageLookupPrev, pPageLookup)) 3612 3617 { 3613 3618 STAM_COUNTER_INC(&pThis->StatIotlbeLookupNonContig); … … 3616 3621 } 3617 3622 3618 /* Store the walkresult from the first/previous page. */3619 WalkResultPrev = *pWalkResult;3623 /* Store the page lookup result from the first/previous page. */ 3624 PageLookupPrev = *pPageLookup; 3620 3625 3621 3626 /* Check if we need to access more pages. */ 3622 uint64_t const cbPage = RT_BIT_64(p WalkResult->cShift);3627 uint64_t const cbPage = RT_BIT_64(pPageLookup->cShift); 3623 3628 if (cbRemaining > cbPage - offIova) 3624 3629 { … … 5880 5885 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbeCacheMiss, STAMTYPE_COUNTER, "IOTLB/CacheMiss", STAMUNIT_OCCURENCES, "Number of IOTLB cache misses."); 5881 5886 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbeLazyEvictReuse, STAMTYPE_COUNTER, "IOTLB/LazyEvictReuse", STAMUNIT_OCCURENCES, "Number of IOTLB entries reused after lazy eviction."); 5882 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbeLookup, STAMTYPE_PROFILE, "Profile/IotlbeLookup", STAMUNIT_TICKS_PER_CALL, "Profiling IOTLB entry lookup .");5887 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIotlbeLookup, STAMTYPE_PROFILE, "Profile/IotlbeLookup", STAMUNIT_TICKS_PER_CALL, "Profiling IOTLB entry lookup (from cache)."); 5883 5888 5884 5889 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatDteLookupNonContig, STAMTYPE_COUNTER, "DTE/LookupNonContig", STAMUNIT_OCCURENCES, "DTE lookups that resulted in non-contiguous translated regions."); 5885 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIoPageWalkLookup, STAMTYPE_PROFILE, "Profile/IoPageWalk", STAMUNIT_TICKS_PER_CALL, "Profiling I/O page walk lookup.");5890 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIoPageWalkLookup, STAMTYPE_PROFILE, "Profile/IoPageWalk", STAMUNIT_TICKS_PER_CALL, "Profiling I/O page walk (from memory)."); 5886 5891 # endif 5887 5892
Note:
See TracChangeset
for help on using the changeset viewer.