Changeset 89407 in vbox for trunk/src/VBox
- Timestamp:
- May 31, 2021 4:32:28 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 144759
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp
r89375 r89407 54 54 #define DMAR_IS_MMIO_OFF_VALID(a_off) ( (a_off) < DMAR_MMIO_GROUP_0_OFF_END \ 55 55 || (a_off) - DMAR_MMIO_GROUP_1_OFF_FIRST < DMAR_MMIO_GROUP_1_SIZE) 56 57 /** Gets the page offset mask given the number of bits to shift. */ 58 #define DMAR_GET_PAGE_OFF_MASK(a_cShift) (~(UINT64_C(0xffffffffffffffff) << (a_cShift))) 56 59 57 60 /** Acquires the DMAR lock but returns with the given busy error code on failure. */ … … 139 142 #define DMAR_ND 6 140 143 144 /** @name DMAR_PERM_XXX: DMA request permissions. 145 * The order of R, W, X bits is important as it corresponds to those bits in 146 * page-table entries. 147 * 148 * @{ */ 149 /** DMA request permission: Read. */ 150 #define DMAR_PERM_READ RT_BIT(0) 151 /** DMA request permission: Write. */ 152 #define DMAR_PERM_WRITE RT_BIT(1) 153 /** DMA request permission: Execute. */ 154 #define DMAR_PERM_EXE RT_BIT(2) 155 /** DMA request permission: Supervisor privilege. */ 156 #define DMAR_PERM_PRIV RT_BIT(3) 157 /** DMA request permissions: All. */ 158 #define DMAR_PERM_ALL (DMAR_PERM_READ | DMAR_PERM_WRITE | DMAR_PERM_EXE | DMAR_PERM_PRIV) 159 /** @} */ 160 141 161 /** Release log prefix string. */ 142 162 #define DMAR_LOG_PFX "Intel-IOMMU" … … 174 194 kDmarDiag_Atf_Lsl_1, 175 195 kDmarDiag_Atf_Lsl_2, 196 kDmarDiag_Atf_Lsl_2_LargePage, 176 197 kDmarDiag_Atf_Rta_1_1, 177 198 kDmarDiag_Atf_Rta_1_2, 178 199 kDmarDiag_Atf_Rta_1_3, 200 kDmarDiag_Atf_Ssl_1, 201 kDmarDiag_Atf_Ssl_2, 202 kDmarDiag_Atf_Ssl_3, 203 kDmarDiag_Atf_Ssl_3_LargePage, 179 204 180 205 /* CCMD_REG faults. */ … … 242 267 DMARDIAG_DESC(Atf_Lsl_1 ), 243 268 DMARDIAG_DESC(Atf_Lsl_2 ), 269 DMARDIAG_DESC(Atf_Lsl_2_LargePage ), 244 270 DMARDIAG_DESC(Atf_Rta_1_1 ), 245 271 DMARDIAG_DESC(Atf_Rta_1_2 ), 246 272 DMARDIAG_DESC(Atf_Rta_1_3 ), 273 DMARDIAG_DESC(Atf_Ssl_1 ), 274 DMARDIAG_DESC(Atf_Ssl_2 ), 275 DMARDIAG_DESC(Atf_Ssl_3 ), 276 DMARDIAG_DESC(Atf_Ssl_3_LargePage ), 247 277 DMARDIAG_DESC(CcmdReg_NotSupported ), 248 278 DMARDIAG_DESC(CcmdReg_Qi_Enabled ), … … 318 348 /** Maximum supported paging level (3, 4 or 5). */ 319 349 uint8_t uMaxPagingLevel; 350 /** DMA request valid permissions mask. */ 351 uint8_t fPermValidMask; 320 352 321 353 /** The event semaphore the invalidation-queue thread waits on. */ … … 432 464 433 465 /** 466 * I/O TLB entry. 467 */ 468 typedef struct DMARIOTLBE 469 { 470 RTGCPHYS GCPhysBase; 471 uint8_t cShift; 472 uint8_t fPerm; 473 uint16_t idDomain; 474 uint16_t uPadding0; 475 } DMARIOTLBE; 476 /** Pointer to an IOTLB entry. */ 477 typedef DMARIOTLBE *PDMARIOTLBE; 478 /** Pointer to a const IOTLB entry. */ 479 typedef DMARIOTLBE const *PCDMARIOTLBE; 480 481 /** 434 482 * DMA Address Remapping Information. 435 483 */ … … 438 486 /** The device ID (bus, device, function). */ 439 487 uint16_t idDevice; 440 /** The extended attributes of the request (VTD_REQ_ATTR_XXX). */441 uint8_t fReq Attr;488 /** The requested permissions (DMAR_PERM_XXX). */ 489 uint8_t fReqPerm; 442 490 /** The fault processing disabled (FPD) bit. */ 443 491 uint8_t fFpd; … … 456 504 size_t cbDma; 457 505 458 /** @todo Might have to split the result fields below into a separate structure and 459 * store extra info like cPageShift, permissions and attributes. */ 460 /** The translated system-physical address (HPA). */ 461 RTGCPHYS GCPhysSpa; 506 /** The IOTLBE for this remapping. */ 507 DMARIOTLBE Iotlbe; 462 508 /** The size of the contiguous translated region (in bytes). */ 463 509 size_t cbContiguous; 464 /** The domain ID. */465 uint16_t idDomain;466 510 } DMARADDRMAP; 467 511 /** Pointer to a DMA address remapping object. */ … … 1446 1490 uint8_t const fType1 = pAddrRemap->enmReqType & RT_BIT(1); 1447 1491 uint8_t const fType2 = pAddrRemap->enmReqType & RT_BIT(0); 1448 uint8_t const fExec = pAddrRemap->fReq Attr & VTD_REQ_ATTR_EXE;1449 uint8_t const fPriv = pAddrRemap->fReq Attr & VTD_REQ_ATTR_PRIV;1492 uint8_t const fExec = pAddrRemap->fReqPerm & DMAR_PERM_EXE; 1493 uint8_t const fPriv = pAddrRemap->fReqPerm & DMAR_PERM_PRIV; 1450 1494 uint64_t const uFrcdHi = RT_BF_MAKE(VTD_BF_1_FRCD_REG_SID, pAddrRemap->idDevice) 1451 1495 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_T2, fType2) … … 1924 1968 * @param SlpEntry The second-level paging entry. 1925 1969 * @param uPagingLevel The paging level. 1970 * @param idDomain The domain ID for the translation. 1926 1971 * @param pAddrRemap The DMA address remap info. 1927 1972 */ 1928 static int dmarDrSecondLevelTranslate(PPDMDEVINS pDevIns, VTD_SLP_ENTRY_T SlpEntry, uint8_t uPagingLevel, PDMARADDRMAP pAddrRemap) 1973 static int dmarDrSecondLevelTranslate(PPDMDEVINS pDevIns, VTD_SLP_ENTRY_T SlpEntry, uint8_t uPagingLevel, uint16_t idDomain, 1974 PDMARADDRMAP pAddrRemap) 1929 1975 { 1930 1976 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PCDMAR); 1977 1978 /* Mask of valid paging entry bits. */ 1979 static uint64_t const s_auPtEntityRsvd[] = { VTD_SL_PTE_VALID_MASK, 1980 VTD_SL_PDE_VALID_MASK, 1981 VTD_SL_PDPE_VALID_MASK, 1982 VTD_SL_PML4E_VALID_MASK, 1983 VTD_SL_PML5E_VALID_MASK }; 1984 1985 /* Mask of valid large-page paging entry bits. */ 1986 static uint64_t const s_auLargePageRsvd[] = { 0, 1987 VTD_SL_PDE2M_VALID_MASK, 1988 VTD_SL_PDPE1G_VALID_MASK, 1989 0, 1990 0 }; 1991 1992 /* Paranoia. */ 1931 1993 Assert(uPagingLevel >= 3 && uPagingLevel <= 5); 1994 AssertCompile(RT_ELEMENTS(s_auPtEntityRsvd) == RT_ELEMENTS(s_auLargePageRsvd)); 1995 AssertCompile(RT_ELEMENTS(s_auPtEntityRsvd) == 5); 1932 1996 1933 1997 /* … … 1940 2004 for (int8_t iLevel = uPagingLevel - 1; iLevel >= 0; iLevel--) 1941 2005 { 1942 /* Read the paging entry for the current level. */ 2006 /* 2007 * Read the paging entry for the current level. 2008 */ 2009 uint8_t const cLevelShift = 12 + ((iLevel - 1) * 9); 1943 2010 { 1944 uint8_t const cLevelShift = 12 + ((iLevel - 1) * 9);1945 2011 uint16_t const idxPte = (uDmaAddr >> cLevelShift) & UINT64_C(0x1ff); 1946 2012 uint64_t const offPte = idxPte << 3; … … 1952 2018 else 1953 2019 { 1954 /** @todo If this function is going to be used for scalable-mode second-level 1955 * translation, we need to report different error codes. The TTM is 1956 * available in pAddrRemap->fTtm, but how cleanly we can handle this is 1957 * something to be decided later. For now we just use legacy mode error 1958 * codes. Asserted as such below. */ 1959 Assert(pAddrRemap->fTtm == VTD_TTM_LEGACY_MODE); 1960 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lsl_1, VTDATFAULT_LSL_1, pAddrRemap); 2020 if (pAddrRemap->fTtm == VTD_TTM_LEGACY_MODE) 2021 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lsl_1, VTDATFAULT_LSL_1, pAddrRemap); 2022 else 2023 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Ssl_1, VTDATFAULT_SSL_1, pAddrRemap); 1961 2024 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 1962 2025 } 1963 2026 } 1964 2027 1965 /** @todo validate page table entity. */ 1966 /** @todo once we reach the level 1, compute final address with page offset. */ 1967 } 1968 return VERR_NOT_IMPLEMENTED; 2028 /* 2029 * Check I/O permissions. 2030 * This must be done prior to check reserved bits for properly reporting errors SSL.2 and SSL.3. 2031 * 2032 * See Intel spec. 7.1.3 "Fault conditions and Remapping hardware behavior for various request". 2033 */ 2034 uint8_t const fReqPerm = pAddrRemap->fReqPerm & pThis->fPermValidMask; 2035 uint8_t const fPtPerm = uPtEntity & pThis->fPermValidMask; 2036 if ((fPtPerm & fReqPerm) == fReqPerm) 2037 { /* likely */ } 2038 else 2039 { 2040 if (pAddrRemap->fTtm == VTD_TTM_LEGACY_MODE) 2041 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lsl_2, VTDATFAULT_LSL_2, pAddrRemap); 2042 else 2043 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Ssl_2, VTDATFAULT_SSL_2, pAddrRemap); 2044 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 2045 } 2046 2047 /* 2048 * Validate reserved bits of the current paging entry. 2049 */ 2050 if (!(uPtEntity & ~s_auPtEntityRsvd[iLevel])) 2051 { /* likely */ } 2052 else 2053 { 2054 if (pAddrRemap->fTtm == VTD_TTM_LEGACY_MODE) 2055 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lsl_2, VTDATFAULT_LSL_2, pAddrRemap); 2056 else 2057 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Ssl_3, VTDATFAULT_SSL_3, pAddrRemap); 2058 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 2059 } 2060 2061 /* 2062 * Check if this is a 1GB page or a 2MB page. 2063 */ 2064 AssertCompile(VTD_BF_SL_PDE_PS_MASK == VTD_BF_SL_PDPE_PS_MASK); 2065 uint8_t const fLargePage = RT_BF_GET(uPtEntity, VTD_BF_SL_PDE_PS); 2066 if (fLargePage && iLevel > 0) 2067 { 2068 Assert(iLevel == 1 || iLevel == 2); 2069 uint8_t const fSllpsMask = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_SLLPS); 2070 if (fSllpsMask & RT_BIT(iLevel - 1)) 2071 { 2072 pAddrRemap->Iotlbe.GCPhysBase = uPtEntity & ~(RT_BIT_64(cLevelShift) - 1); 2073 pAddrRemap->Iotlbe.cShift = cLevelShift; 2074 pAddrRemap->Iotlbe.fPerm = fPtPerm; 2075 pAddrRemap->Iotlbe.idDomain = idDomain; 2076 return VINF_SUCCESS; 2077 } 2078 2079 if (pAddrRemap->fTtm == VTD_TTM_LEGACY_MODE) 2080 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lsl_2_LargePage, VTDATFAULT_LSL_2, pAddrRemap); 2081 else 2082 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Ssl_3_LargePage, VTDATFAULT_SSL_3, pAddrRemap); 2083 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 2084 } 2085 2086 /* 2087 * If this is the final PTE, compute the translation address and we're done. 2088 */ 2089 if (iLevel == 0) 2090 { 2091 pAddrRemap->Iotlbe.GCPhysBase = uPtEntity & ~(RT_BIT_64(cLevelShift) - 1); 2092 pAddrRemap->Iotlbe.cShift = cLevelShift; 2093 pAddrRemap->Iotlbe.fPerm = fPtPerm; 2094 pAddrRemap->Iotlbe.idDomain = idDomain; 2095 return VINF_SUCCESS; 2096 } 2097 } 2098 2099 /* Shouldn't ever reach here. */ 2100 return VERR_IOMMU_IPE_0; 1969 2101 } 1970 2102 … … 2019 2151 && !(uCtxEntryQword1 & ~VTD_CONTEXT_ENTRY_1_VALID_MASK)) 2020 2152 { 2153 /* Get the domain ID for this mapping. */ 2154 uint16_t const idDomain = RT_BF_GET(uCtxEntryQword1, VTD_BF_1_CONTEXT_ENTRY_DID); 2155 2021 2156 /* Validate the translation type (TT). */ 2022 2157 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PCDMAR); … … 2042 2177 if (RT_SUCCESS(rc)) 2043 2178 { 2044 /* Note the domain ID this context-entry maps to. */2045 pAddrRemap->idDomain = RT_BF_GET(uCtxEntryQword1, VTD_BF_1_CONTEXT_ENTRY_DID);2046 2047 2179 /* Finally... perform second-level translation. */ 2048 return dmarDrSecondLevelTranslate(pDevIns, SlpEntry, uPagingLevel, pAddrRemap); 2180 return dmarDrSecondLevelTranslate(pDevIns, SlpEntry, uPagingLevel, idDomain, 2181 pAddrRemap); 2049 2182 } 2050 2183 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lct_4_3, VTDATFAULT_LCT_4_3, pAddrRemap); … … 2070 2203 { 2071 2204 /** @todo Check AW == maximum SAGAW bit? */ 2072 pAddrRemap->GCPhysSpa = pAddrRemap->uDmaAddr; 2073 pAddrRemap->cbContiguous = pAddrRemap->cbDma; 2205 pAddrRemap->Iotlbe.GCPhysBase = pAddrRemap->uDmaAddr & X86_PAGE_4K_BASE_MASK; 2206 pAddrRemap->Iotlbe.cShift = X86_PAGE_4K_SHIFT; 2207 pAddrRemap->Iotlbe.fPerm = DMAR_PERM_ALL; 2208 pAddrRemap->Iotlbe.idDomain = idDomain; 2209 pAddrRemap->cbContiguous = pAddrRemap->cbDma; 2074 2210 return VINF_SUCCESS; 2075 2211 } … … 2198 2334 { 2199 2335 VTDREQTYPE enmReqType; 2336 uint8_t fReqPerm; 2200 2337 if (fFlags & PDMIOMMU_MEM_F_READ) 2201 2338 { 2202 2339 enmReqType = VTDREQTYPE_READ; 2340 fReqPerm = DMAR_PERM_READ; 2203 2341 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMemRead)); 2204 2342 } … … 2206 2344 { 2207 2345 enmReqType = VTDREQTYPE_WRITE; 2346 fReqPerm = DMAR_PERM_WRITE; 2208 2347 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMemWrite)); 2209 2348 } … … 2211 2350 uint8_t const fTtm = RT_BF_GET(uRtaddrReg, VTD_BF_RTADDR_REG_TTM); 2212 2351 DMARADDRMAP AddrRemap; 2213 AddrRemap.idDevice = idDevice; 2214 AddrRemap.Pasid = NIL_PCIPASID; 2215 AddrRemap.enmAddrType = PCIADDRTYPE_UNTRANSLATED; 2216 AddrRemap.enmReqType = enmReqType; 2217 AddrRemap.fTtm = fTtm; 2218 AddrRemap.uDmaAddr = uIova; 2219 AddrRemap.cbDma = cbIova; 2220 AddrRemap.GCPhysSpa = NIL_RTGCPHYS; 2221 AddrRemap.cbContiguous = 0; 2352 RT_ZERO(AddrRemap); 2353 AddrRemap.idDevice = idDevice; 2354 AddrRemap.fReqPerm = fReqPerm; 2355 AddrRemap.Pasid = NIL_PCIPASID; 2356 AddrRemap.enmAddrType = PCIADDRTYPE_UNTRANSLATED; 2357 AddrRemap.enmReqType = enmReqType; 2358 AddrRemap.fTtm = fTtm; 2359 AddrRemap.uDmaAddr = uIova; 2360 AddrRemap.cbDma = cbIova; 2361 AddrRemap.Iotlbe.GCPhysBase = NIL_RTGCPHYS; 2222 2362 2223 2363 int rc; … … 2255 2395 2256 2396 *pcbContiguous = AddrRemap.cbContiguous; 2257 *pGCPhysSpa = AddrRemap. GCPhysSpa;2397 *pGCPhysSpa = AddrRemap.Iotlbe.GCPhysBase | DMAR_GET_PAGE_OFF_MASK(AddrRemap.Iotlbe.cShift); 2258 2398 return rc; 2259 2399 } … … 3334 3474 uint8_t const fEim = 1; /* Extended interrupt mode.*/ 3335 3475 uint8_t const fAdms = 1; /* Abort DMA mode support. */ 3476 uint8_t const fErs = 0; /* Execute Request (not supported). */ 3336 3477 3337 3478 pThis->fExtCapReg = RT_BF_MAKE(VTD_BF_ECAP_REG_C, 0) /* Accesses don't snoop CPU cache. */ … … 3347 3488 | RT_BF_MAKE(VTD_BF_ECAP_REG_NEST, fNest) 3348 3489 | RT_BF_MAKE(VTD_BF_ECAP_REG_PRS, 0) /* 0 as DT not supported. */ 3349 | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS, 0) /* Execute request not supported. */3490 | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS, fErs) 3350 3491 | RT_BF_MAKE(VTD_BF_ECAP_REG_SRS, 0) /* Supervisor request not supported. */ 3351 3492 | RT_BF_MAKE(VTD_BF_ECAP_REG_NWFS, 0) /* 0 as DT not supported. */ … … 3365 3506 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPRIVS, 0); /* 0 as SRS not supported. */ 3366 3507 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_ECAP_REG, pThis->fExtCapReg); 3508 3509 pThis->fPermValidMask = DMAR_PERM_READ | DMAR_PERM_WRITE; 3510 if (fErs) 3511 pThis->fPermValidMask = DMAR_PERM_EXE; 3367 3512 } 3368 3513
Note:
See TracChangeset
for help on using the changeset viewer.