- Timestamp:
- Jul 30, 2021 9:21:29 AM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp
r90257 r90421 908 908 909 909 /** 910 * Adds or updates the I/O device flags for the given device ID.910 * Adds a device-table entry to the cache. 911 911 * 912 912 * @returns VBox status code. … … 916 916 * @param idDevice The device ID (bus, device, function). 917 917 * @param pDte The device table entry. 918 * @param fOrMask The device flags (usually compound flags) to OR in with the 919 * basic flags, see IOMMU_DTE_CACHE_F_XXX. 920 */ 921 static int iommuAmdDteCacheAdd(PPDMDEVINS pDevIns, uint16_t idDevice, PCDTE_T pDte, uint16_t fOrMask) 922 { 923 Assert(pDte); 924 Assert(idDevice); 925 918 */ 919 static int iommuAmdDteCacheAdd(PPDMDEVINS pDevIns, uint16_t idDevice, PCDTE_T pDte) 920 { 926 921 int rc = VINF_SUCCESS; 927 uint16_t const fFlags = iommuAmdGetBasicDevFlags(pDte) | IOMMU_DTE_CACHE_F_PRESENT | fOrMask;922 uint16_t const fFlags = iommuAmdGetBasicDevFlags(pDte) | IOMMU_DTE_CACHE_F_PRESENT; 928 923 uint16_t const idDomain = pDte->n.u16DomainId; 929 924 … … 933 928 uint16_t const cDteCache = RT_ELEMENTS(pThis->aDteCache); 934 929 uint16_t idxDte = iommuAmdDteCacheEntryLookup(pThis, idDevice); 935 if (idxDte <cDteCache)936 { 937 pThis->aDteCache[idxDte].fFlags = fFlags;938 pThis->aDteCache[idxDte].idDomain = idDomain;939 }940 else if ((idxDte = iommuAmdDteCacheEntryGetUnused(pThis)) < cDteCache)941 {942 pThis->aDeviceIds[idxDte] = idDevice;943 pThis->aDteCache[idxDte].fFlags = fFlags;944 pThis->aDteCache[idxDte].idDomain = idDomain;945 }946 else947 rc = VERR_OUT_OF_RESOURCES;930 if (idxDte >= cDteCache) 931 { 932 idxDte = iommuAmdDteCacheEntryGetUnused(pThis); 933 if (idxDte < cDteCache) 934 { 935 pThis->aDeviceIds[idxDte] = idDevice; 936 pThis->aDteCache[idxDte].fFlags = fFlags; 937 pThis->aDteCache[idxDte].idDomain = idDomain; 938 } 939 else 940 rc = VERR_OUT_OF_RESOURCES; 941 } 942 /* else: A DTE cache entry already exists, do nothing. */ 948 943 949 944 IOMMU_CACHE_UNLOCK(pDevIns, pThis); … … 957 952 * @param pDevIns The IOMMU instance data. 958 953 * @param idDevice The device ID (bus, device, function). 959 * @param f Flags Additional device flags to OR withexisting flags, see954 * @param fOrMask Device flags to add to the existing flags, see 960 955 * IOMMU_DTE_CACHE_F_XXX. 961 */ 962 static void iommuAmdDteCacheAddFlags(PPDMDEVINS pDevIns, uint16_t idDevice, uint16_t fFlags) 956 * @param fAndMask Device flags to remove from the existing flags, see 957 * IOMMU_DTE_CACHE_F_XXX. 958 */ 959 static void iommuAmdDteCacheUpdateFlags(PPDMDEVINS pDevIns, uint16_t idDevice, uint16_t fOrMask, uint16_t fAndMask) 963 960 { 964 961 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); … … 969 966 if ( idxDte < cDteCache 970 967 && (pThis->aDteCache[idxDte].fFlags & IOMMU_DTE_CACHE_F_PRESENT)) 971 pThis->aDteCache[idxDte].fFlags |= fFlags; 968 { 969 uint16_t const fNewFlags = (pThis->aDteCache[idxDte].fFlags | fOrMask) & ~fAndMask; 970 pThis->aDteCache[idxDte].fFlags = fNewFlags; 971 } 972 972 973 973 IOMMU_CACHE_UNLOCK(pDevIns, pThis); … … 1153 1153 { 1154 1154 /* Initialize the IOTLB entry with results of the I/O page walk. */ 1155 pIotlbe->Core.Key = IOMMU_IOTLB_KEY_MAKE(idDomain, uIova); 1156 pIotlbe->PageLookup = *pPageLookup; 1157 1158 /* Validate. */ 1159 Assert(pIotlbe->Core.Key != IOMMU_IOTLB_KEY_NIL); 1160 Assert(!pIotlbe->fEvictPending); 1155 AVLU64KEY const uKey = IOMMU_IOTLB_KEY_MAKE(idDomain, uIova); 1156 Assert(uKey != IOMMU_IOTLB_KEY_NIL); 1161 1157 1162 1158 /* Check if the entry already exists. */ 1163 PIOTLBE pFound = (PIOTLBE)RTAvlU64Get(&pThisR3->TreeIotlbe, pIotlbe->Core.Key);1159 PIOTLBE pFound = (PIOTLBE)RTAvlU64Get(&pThisR3->TreeIotlbe, uKey); 1164 1160 if (!pFound) 1165 1161 { 1166 1162 /* Insert the entry into the cache. */ 1163 pIotlbe->Core.Key = uKey; 1164 pIotlbe->PageLookup = *pPageLookup; 1165 Assert(!pIotlbe->fEvictPending); 1166 1167 1167 bool const fInserted = RTAvlU64Insert(&pThisR3->TreeIotlbe, &pIotlbe->Core); 1168 1168 Assert(fInserted); NOREF(fInserted); … … 1174 1174 { 1175 1175 /* Update the existing entry. */ 1176 Assert(pFound->Core.Key == uKey); 1176 1177 if (pFound->fEvictPending) 1177 1178 { … … 1179 1180 STAM_COUNTER_INC(&pThis->StatIotlbeLazyEvictReuse); NOREF(pThis); 1180 1181 } 1181 Assert(pFound->PageLookup.cShift == pPageLookup->cShift); 1182 pFound->PageLookup.fPerm = pPageLookup->fPerm; 1183 pFound->PageLookup.GCPhysSpa = pPageLookup->GCPhysSpa; 1182 pFound->PageLookup = *pPageLookup; 1184 1183 } 1185 1184 } … … 1402 1401 1403 1402 IOPAGELOOKUP PageLookup; 1404 PageLookup.GCPhysSpa = pAddrOut->GCPhysSpa & X86_PAGE_4K_BASE_MASK; ;1403 PageLookup.GCPhysSpa = pAddrOut->GCPhysSpa & X86_PAGE_4K_BASE_MASK; 1405 1404 PageLookup.cShift = pAddrOut->cShift; 1406 1405 PageLookup.fPerm = pAddrOut->fPerm; … … 3331 3330 3332 3331 #ifdef IOMMU_WITH_DTE_CACHE 3333 # define IOMMU_DTE_CACHE_SET_PF_RAISED(a_pDevIns, a_DevId) iommuAmdDteCacheAddFlags((a_pDevIns), (a_DevId), \ 3334 IOMMU_DTE_CACHE_F_IO_PAGE_FAULT_RAISED) 3332 # define IOMMU_DTE_CACHE_SET_PF_RAISED(a_pDevIns, a_DevId) iommuAmdDteCacheUpdateFlags((a_pDevIns), (a_DevId), \ 3333 IOMMU_DTE_CACHE_F_IO_PAGE_FAULT_RAISED, \ 3334 0 /* fAndMask */) 3335 3335 #else 3336 3336 # define IOMMU_DTE_CACHE_SET_PF_RAISED(a_pDevIns, a_DevId) do { } while (0) … … 3896 3896 if (RT_SUCCESS(rc)) 3897 3897 { 3898 /* Validate results of the translation. */ 3898 /* 3899 * Validate results of the translation. 3900 */ 3901 /* The IOTLB cache preserves the original page sizes even though the IOVAs are split into 4K pages. */ 3899 3902 Assert(PageLookup.cShift >= X86_PAGE_4K_SHIFT && PageLookup.cShift <= 51); 3900 Assert(!(PageLookup.GCPhysSpa & X86_GET_PAGE_OFFSET_MASK(PageLookup.cShift))); 3903 Assert( pfnIoPageLookup != iommuAmdDteLookupPage 3904 || !(PageLookup.GCPhysSpa & X86_GET_PAGE_OFFSET_MASK(PageLookup.cShift))); 3901 3905 Assert((PageLookup.fPerm & fPerm) == fPerm); 3902 3906 … … 3987 3991 if (RT_SUCCESS(rc)) 3988 3992 { 3993 #ifdef IOMMU_WITH_IOTLBE_CACHE 3994 iommuAmdDteCacheAdd(pDevIns, idDevice, &Dte); 3995 #endif 3989 3996 if (Dte.n.u1Valid) 3990 3997 { … … 4036 4043 { 4037 4044 /* Update that addresses requires translation (cumulative permissions of DTE and I/O page tables). */ 4038 iommuAmdDteCache Add(pDevIns, Aux.idDevice, &Dte, IOMMU_DTE_CACHE_F_ADDR_TRANSLATE);4045 iommuAmdDteCacheUpdateFlags(pDevIns, idDevice, IOMMU_DTE_CACHE_F_ADDR_TRANSLATE, 0 /* fAndMask */); 4039 4046 /* Update IOTLB for the contiguous range of I/O virtual addresses. */ 4040 4047 iommuAmdIotlbAddRange(pDevIns, Aux.idDomain, uIova & X86_PAGE_4K_BASE_MASK, cbContiguous, &AddrOut); … … 4054 4061 #ifdef IOMMU_WITH_IOTLBE_CACHE 4055 4062 /* Update that addresses permissions of DTE apply (but omit address translation). */ 4056 iommuAmdDteCache Add(pDevIns, idDevice, &Dte, IOMMU_DTE_CACHE_F_IO_PERM);4063 iommuAmdDteCacheUpdateFlags(pDevIns, idDevice, IOMMU_DTE_CACHE_F_IO_PERM, IOMMU_DTE_CACHE_F_ADDR_TRANSLATE); 4057 4064 #endif 4058 4065 } … … 4084 4091 GCPhysSpa = uIova; 4085 4092 cbContiguous = cbIova; 4086 4087 #ifdef IOMMU_WITH_IOTLBE_CACHE4088 /* Update that addresses don't require translation (nor permission checks) but a DTE is present. */4089 iommuAmdDteCacheAdd(pDevIns, idDevice, &Dte, 0 /* fFlags */);4090 #endif4091 4093 } 4092 4094 } … … 4601 4603 { 4602 4604 #ifdef IOMMU_WITH_IRTE_CACHE 4603 iommuAmdDteCacheAdd(pDevIns, idDevice, &Dte , 0 /* fFlags */);4605 iommuAmdDteCacheAdd(pDevIns, idDevice, &Dte); 4604 4606 #endif 4605 4607 /* If the DTE is not valid, all interrupts are forwarded without remapping. */
Note:
See TracChangeset
for help on using the changeset viewer.