Changeset 87817 in vbox
- Timestamp:
- Feb 20, 2021 8:24:14 AM (4 years ago)
- Location:
- trunk
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/iommu-amd.h
r87786 r87817 545 545 /** Mask of valid bits for EPHSUP (Enhanced Peripheral Page Request Handling 546 546 * Support) feature (bits 52:53). */ 547 #define IOMMU_DTE_QWORD_0_FEAT_EPHSUP_MASK UINT64_C(0x0030000000000000)547 #define IOMMU_DTE_QWORD_0_FEAT_EPHSUP_MASK UINT64_C(0x0030000000000000) 548 548 549 549 /** Mask of valid bits for GTSup (Guest Translation Support) feature (bits 55:60, 550 550 * bits 80:95). */ 551 #define IOMMU_DTE_QWORD_0_FEAT_GTSUP_MASK UINT64_C(0x1f80000000000000)552 #define IOMMU_DTE_QWORD_1_FEAT_GTSUP_MASK UINT64_C(0x00000000ffff0000)551 #define IOMMU_DTE_QWORD_0_FEAT_GTSUP_MASK UINT64_C(0x1f80000000000000) 552 #define IOMMU_DTE_QWORD_1_FEAT_GTSUP_MASK UINT64_C(0x00000000ffff0000) 553 553 554 554 /** Mask of valid bits for GIoSup (Guest I/O Protection Support) feature (bit 54). */ 555 #define IOMMU_DTE_QWORD_0_FEAT_GIOSUP_MASK UINT64_C(0x0040000000000000)555 #define IOMMU_DTE_QWORD_0_FEAT_GIOSUP_MASK UINT64_C(0x0040000000000000) 556 556 557 557 /** Mask of valid DTE feature bits. */ 558 #define IOMMU_DTE_QWORD_0_FEAT_MASK ( IOMMU_DTE_QWORD_0_FEAT_EPHSUP_MASK \559 | IOMMU_DTE_QWORD_0_FEAT_GTSUP_MASK \560 | IOMMU_DTE_QWORD_0_FEAT_GIOSUP_MASK)561 #define IOMMU_DTE_QWORD_1_FEAT_MASK (IOMMU_DTE_QWORD_0_FEAT_GIOSUP_MASK)558 #define IOMMU_DTE_QWORD_0_FEAT_MASK ( IOMMU_DTE_QWORD_0_FEAT_EPHSUP_MASK \ 559 | IOMMU_DTE_QWORD_0_FEAT_GTSUP_MASK \ 560 | IOMMU_DTE_QWORD_0_FEAT_GIOSUP_MASK) 561 #define IOMMU_DTE_QWORD_1_FEAT_MASK IOMMU_DTE_QWORD_0_FEAT_GIOSUP_MASK 562 562 563 563 /** Mask of all valid DTE bits (including all feature bits). */ 564 #define IOMMU_DTE_QWORD_0_VALID_MASK UINT64_C(0x7fffffffffffff83)565 #define IOMMU_DTE_QWORD_1_VALID_MASK UINT64_C(0xfffffbffffffffff)566 #define IOMMU_DTE_QWORD_2_VALID_MASK UINT64_C(0xff0fffffffffffff)567 #define IOMMU_DTE_QWORD_3_VALID_MASK UINT64_C(0xffc0000000000000)564 #define IOMMU_DTE_QWORD_0_VALID_MASK UINT64_C(0x7fffffffffffff83) 565 #define IOMMU_DTE_QWORD_1_VALID_MASK UINT64_C(0xfffffbffffffffff) 566 #define IOMMU_DTE_QWORD_2_VALID_MASK UINT64_C(0xff0fffffffffffff) 567 #define IOMMU_DTE_QWORD_3_VALID_MASK UINT64_C(0xffc0000000000000) 568 568 569 569 /** Mask of the interrupt table root pointer. */ 570 #define IOMMU_DTE_IRTE_ROOT_PTR_MASK UINT64_C(0x000fffffffffffc0)570 #define IOMMU_DTE_IRTE_ROOT_PTR_MASK UINT64_C(0x000fffffffffffc0) 571 571 /** Number of bits to shift to get the interrupt root table pointer at 572 572 qword 2 (qword 0 being the first one) - 128-byte aligned. */ 573 #define IOMMU_DTE_IRTE_ROOT_PTR_SHIFT 6573 #define IOMMU_DTE_IRTE_ROOT_PTR_SHIFT 6 574 574 575 575 /** Maximum encoded IRTE length (exclusive). */ 576 #define IOMMU_DTE_INTR_TAB_LEN_MAX 12576 #define IOMMU_DTE_INTR_TAB_LEN_MAX 12 577 577 /** Gets the interrupt table entries (in bytes) given the DTE pointer. */ 578 #define IOMMU_ GET_INTR_TAB_ENTRIES(a_pDte)(UINT64_C(1) << (a_pDte)->n.u4IntrTableLength)578 #define IOMMU_DTE_GET_INTR_TAB_ENTRIES(a_pDte) (UINT64_C(1) << (a_pDte)->n.u4IntrTableLength) 579 579 /** Gets the interrupt table length (in bytes) given the DTE pointer. */ 580 #define IOMMU_ GET_INTR_TAB_LEN(a_pDte) (IOMMU_GET_INTR_TAB_ENTRIES(a_pDte) * sizeof(IRTE_T))580 #define IOMMU_DTE_GET_INTR_TAB_LEN(a_pDte) (IOMMU_DTE_GET_INTR_TAB_ENTRIES(a_pDte) * sizeof(IRTE_T)) 581 581 /** Mask of interrupt control bits. */ 582 #define IOMMU_DTE_INTR_CTRL_MASK 0x3 583 /** Gets the interrupt control bits given the DTE pointer. */ 584 #define IOMMU_GET_INTR_CTRL(a_pDte) (((a_pDte)->au64[2] >> 60) & IOMMU_DTE_INTR_CTRL_MASK) 582 #define IOMMU_DTE_INTR_CTRL_MASK 0x3 583 /** Gets the interrupt control bits from the DTE. */ 584 #define IOMMU_DTE_GET_INTR_CTRL(a_pDte) (((a_pDte)->au64[2] >> 60) & IOMMU_DTE_INTR_CTRL_MASK) 585 /** Gets the ignore unmapped interrupt bit from DTE. */ 586 #define IOMMU_DTE_GET_IG(a_pDte) (((a_pDte)->au64[2] >> 5) & 0x1) 585 587 586 588 /** … … 699 701 #define IOMMU_MSI_DATA_IRTE_OFFSET_MASK UINT32_C(0x000007ff) 700 702 /** Gets the IRTE offset from the originating MSI interrupt message. */ 701 #define IOMMU_GET_IRTE_OFF(a_u32MsiData) (((a_u32MsiData) & IOMMU_MSI_DATA_IRTE_OFFSET_MASK) * sizeof(IRTE_T)) ;703 #define IOMMU_GET_IRTE_OFF(a_u32MsiData) (((a_u32MsiData) & IOMMU_MSI_DATA_IRTE_OFFSET_MASK) * sizeof(IRTE_T)) 702 704 703 705 /** -
trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp
r87786 r87817 47 47 #define IOMMU_MAGIC 0x10acce55 48 48 49 /** Enable the IOTLBE cache. */ 50 #define IOMMU_WITH_IOTLBE_CACHE 49 /** Enable the IOTLBE cache only in ring-3 for now, see @bugref{9654#c95}. */ 50 #ifdef IN_RING3 51 # define IOMMU_WITH_IOTLBE_CACHE 52 #endif 51 53 /** Enable the interrupt cache. */ 52 54 #define IOMMU_WITH_IRTE_CACHE … … 55 57 #if defined(IOMMU_WITH_IOTLBE_CACHE) || defined(IOMMU_WITH_IRTE_CACHE) 56 58 # define IOMMU_WITH_DTE_CACHE 59 /** The maximum number of device IDs in the cache. */ 60 # define IOMMU_DEV_CACHE_COUNT 16 61 /** An empty device ID. */ 62 # define IOMMU_DTE_CACHE_KEY_NIL 0 57 63 #endif 58 64 59 65 #ifdef IOMMU_WITH_IRTE_CACHE 60 /** The maximum number of interrupt cache entries configurable through CFGM. */ 61 # define IOMMU_IRTE_CACHE_MAX 32 62 /** The default number of interrupt cache entries. */ 63 # define IOMMU_IRTE_CACHE_DEFAULT 16 64 /** The minimum number of interrupt cache entries configurable through CFGM. */ 65 # define IOMMU_IRTE_CACHE_MIN 8 66 66 /** The maximum number of IRTE cache entries. */ 67 # define IOMMU_IRTE_CACHE_COUNT 32 67 68 /** A NIL IRTE cache entry key. */ 68 69 # define IOMMU_IRTE_CACHE_KEY_NIL (~(uint32_t)0U) … … 108 109 109 110 #ifdef IOMMU_WITH_DTE_CACHE 110 /** The maximum number of DTE entries. */ 111 # define IOMMU_DTE_CACHE_MAX UINT16_MAX 112 113 /** @name IOMMU_DTECACHE_F_XXX: DTE cache flags. 111 /** @name IOMMU_DTE_CACHE_F_XXX: DTE cache flags. 114 112 * 115 113 * Some of these flags are "basic" i.e. they correspond directly to their bits in … … 131 129 * @{ */ 132 130 /** The DTE is present. */ 133 # define IOMMU_DTE CACHE_F_PRESENT RT_BIT(0)131 # define IOMMU_DTE_CACHE_F_PRESENT RT_BIT(0) 134 132 /** The DTE is valid. */ 135 # define IOMMU_DTE CACHE_F_VALID RT_BIT(1)133 # define IOMMU_DTE_CACHE_F_VALID RT_BIT(1) 136 134 /** The DTE permissions apply for address translations. */ 137 # define IOMMU_DTE CACHE_F_IO_PERM RT_BIT(2)135 # define IOMMU_DTE_CACHE_F_IO_PERM RT_BIT(2) 138 136 /** DTE permission - I/O read allowed. */ 139 # define IOMMU_DTE CACHE_F_IO_PERM_READ RT_BIT(3)137 # define IOMMU_DTE_CACHE_F_IO_PERM_READ RT_BIT(3) 140 138 /** DTE permission - I/O write allowed. */ 141 # define IOMMU_DTE CACHE_F_IO_PERM_WRITE RT_BIT(4)139 # define IOMMU_DTE_CACHE_F_IO_PERM_WRITE RT_BIT(4) 142 140 /** DTE permission - reserved. */ 143 # define IOMMU_DTE CACHE_F_IO_PERM_RSVD RT_BIT(5)141 # define IOMMU_DTE_CACHE_F_IO_PERM_RSVD RT_BIT(5) 144 142 /** Address translation required. */ 145 # define IOMMU_DTE CACHE_F_ADDR_TRANSLATE RT_BIT(6)143 # define IOMMU_DTE_CACHE_F_ADDR_TRANSLATE RT_BIT(6) 146 144 /** Suppress all I/O page faults. */ 147 # define IOMMU_DTE CACHE_F_SUPPRESS_ALL_IOPF RT_BIT(7)145 # define IOMMU_DTE_CACHE_F_SUPPRESS_ALL_IOPF RT_BIT(7) 148 146 /** Suppress I/O page faults. */ 149 # define IOMMU_DTE CACHE_F_SUPPRESS_IOPF RT_BIT(8)147 # define IOMMU_DTE_CACHE_F_SUPPRESS_IOPF RT_BIT(8) 150 148 /** Interrupt map valid. */ 151 # define IOMMU_DTE CACHE_F_INTR_MAP_VALID RT_BIT(9)149 # define IOMMU_DTE_CACHE_F_INTR_MAP_VALID RT_BIT(9) 152 150 /** Ignore unmapped interrupts. */ 153 # define IOMMU_DTE CACHE_F_IGNORE_UNMAPPED_INTR RT_BIT(10)151 # define IOMMU_DTE_CACHE_F_IGNORE_UNMAPPED_INTR RT_BIT(10) 154 152 /** An I/O page fault has been raised for this device. */ 155 # define IOMMU_DTE CACHE_F_IO_PAGE_FAULT_RAISED RT_BIT(11)153 # define IOMMU_DTE_CACHE_F_IO_PAGE_FAULT_RAISED RT_BIT(11) 156 154 /** Fixed and arbitrary interrupt control: Target Abort. */ 157 # define IOMMU_DTE CACHE_F_INTR_CTRL_TARGET_ABORT RT_BIT(12)155 # define IOMMU_DTE_CACHE_F_INTR_CTRL_TARGET_ABORT RT_BIT(12) 158 156 /** Fixed and arbitrary interrupt control: Forward unmapped. */ 159 # define IOMMU_DTE CACHE_F_INTR_CTRL_FWD_UNMAPPED RT_BIT(13)157 # define IOMMU_DTE_CACHE_F_INTR_CTRL_FWD_UNMAPPED RT_BIT(13) 160 158 /** Fixed and arbitrary interrupt control: Remapped. */ 161 # define IOMMU_DTE CACHE_F_INTR_CTRL_REMAPPED RT_BIT(14)159 # define IOMMU_DTE_CACHE_F_INTR_CTRL_REMAPPED RT_BIT(14) 162 160 /** Fixed and arbitrary interrupt control: Reserved. */ 163 # define IOMMU_DTE CACHE_F_INTR_CTRL_RSVD RT_BIT(15)161 # define IOMMU_DTE_CACHE_F_INTR_CTRL_RSVD RT_BIT(15) 164 162 /** @} */ 165 163 166 164 /** The number of bits to shift I/O device flags for DTE permissions. */ 167 # define IOMMU_DTE CACHE_F_IO_PERM_SHIFT 3165 # define IOMMU_DTE_CACHE_F_IO_PERM_SHIFT 3 168 166 /** The mask of DTE permissions in I/O device flags. */ 169 # define IOMMU_DTE CACHE_F_IO_PERM_MASK 0x3167 # define IOMMU_DTE_CACHE_F_IO_PERM_MASK 0x3 170 168 /** The number of bits to shift I/O device flags for interrupt control bits. */ 171 # define IOMMU_DTE CACHE_F_INTR_CTRL_SHIFT 12169 # define IOMMU_DTE_CACHE_F_INTR_CTRL_SHIFT 12 172 170 /** The mask of interrupt control bits in I/O device flags. */ 173 # define IOMMU_DTECACHE_F_INTR_CTRL_MASK 0x3 171 # define IOMMU_DTE_CACHE_F_INTR_CTRL_MASK 0x3 172 /** The number of bits to shift for ignore-unmapped interrupts bit. */ 173 # define IOMMU_DTE_CACHE_F_IGNORE_UNMAPPED_INTR_SHIFT 10 174 174 175 175 /** Acquires the cache lock. */ … … 283 283 typedef struct DTECACHE 284 284 { 285 /** This device's flags, see IOMMU_DTE CACHE_F_XXX. */285 /** This device's flags, see IOMMU_DTE_CACHE_F_XXX. */ 286 286 uint16_t fFlags; 287 287 /** The domain ID assigned for this device by software. */ … … 325 325 typedef struct IRTECACHE 326 326 { 327 /** The key, see IOMMU_IRTE_CACHE_KEY_MAKE. */ 328 uint32_t uKey; 327 329 /** The IRTE. */ 328 330 IRTE_T Irte; 329 /** The key, see IOMMU_IRTE_CACHE_KEY_MAKE. */330 uint32_t uKey;331 331 } IRTECACHE; 332 332 /** Pointer to an IRTE cache struct. */ … … 364 364 /** The critsect that protects the cache from concurrent access. */ 365 365 PDMCRITSECT CritSectCache; 366 /** Maps [DeviceId] to [DomainId]. */ 367 PDTECACHE paDteCache; 368 #endif 369 #ifdef IOMMU_WITH_IOTLBE_CACHE 370 /** Pointer to array of pre-allocated IOTLBEs. */ 371 PIOTLBE paIotlbes; 372 /** Maps [DomainId,Iova] to [IOTLBE]. */ 373 AVLU64TREE TreeIotlbe; 374 /** LRU list anchor for IOTLB entries. */ 375 RTLISTANCHOR LstLruIotlbe; 376 /** Index of the next unused IOTLB. */ 377 uint32_t idxUnusedIotlbe; 378 /** Number of cached IOTLB entries in the tree. */ 379 uint32_t cCachedIotlbes; 366 /** Array of device IDs. */ 367 uint16_t aDeviceIds[IOMMU_DEV_CACHE_COUNT]; 368 /** Array of DTE cache entries. */ 369 DTECACHE aDteCache[IOMMU_DEV_CACHE_COUNT]; 380 370 #endif 381 371 #ifdef IOMMU_WITH_IRTE_CACHE 382 /** Maps [DeviceId] to [IRTE]. */ 383 PIRTECACHE paIrteCache; 384 /** Maximum number of entries in the IRTE cache. */ 385 uint16_t cIrteCache; 386 /** Padding. */ 387 uint16_t auPadding[3]; 372 /** Array of IRTE cache entries. */ 373 IRTECACHE aIrteCache[IOMMU_IRTE_CACHE_COUNT]; 388 374 #endif 389 375 … … 568 554 AssertCompileMemberAlignment(IOMMU, hMmio, 8); 569 555 #ifdef IOMMU_WITH_DTE_CACHE 570 AssertCompileMemberAlignment(IOMMU, paDteCache, 8); 571 #endif 572 #ifdef IOMMU_WITH_IOTLBE_CACHE 573 AssertCompileMemberAlignment(IOMMU, paIotlbes, 8); 574 AssertCompileMemberAlignment(IOMMU, TreeIotlbe, 8); 575 AssertCompileMemberAlignment(IOMMU, LstLruIotlbe, 8); 556 AssertCompileMemberAlignment(IOMMU, aDeviceIds, 8); 557 AssertCompileMemberAlignment(IOMMU, aDteCache, 8); 576 558 #endif 577 559 #ifdef IOMMU_WITH_IRTE_CACHE 578 AssertCompileMemberAlignment(IOMMU, paIrteCache, 8);560 AssertCompileMemberAlignment(IOMMU, aIrteCache, 8); 579 561 #endif 580 562 AssertCompileMemberAlignment(IOMMU, IommuBar, 8); … … 594 576 /** The command thread handle. */ 595 577 R3PTRTYPE(PPDMTHREAD) pCmdThread; 578 #ifdef IOMMU_WITH_IOTLBE_CACHE 579 /** Pointer to array of pre-allocated IOTLBEs. */ 580 PIOTLBE paIotlbes; 581 /** Maps [DomainId,Iova] to [IOTLBE]. */ 582 AVLU64TREE TreeIotlbe; 583 /** LRU list anchor for IOTLB entries. */ 584 RTLISTANCHOR LstLruIotlbe; 585 /** Index of the next unused IOTLB. */ 586 uint32_t idxUnusedIotlbe; 587 /** Number of cached IOTLB entries in the tree. */ 588 uint32_t cCachedIotlbes; 589 #endif 596 590 } IOMMUR3; 597 591 /** Pointer to the ring-3 IOMMU device state. */ 598 592 typedef IOMMUR3 *PIOMMUR3; 593 #ifdef IOMMU_WITH_IOTLBE_CACHE 594 AssertCompileMemberAlignment(IOMMUR3, paIotlbes, 8); 595 AssertCompileMemberAlignment(IOMMUR3, TreeIotlbe, 8); 596 AssertCompileMemberAlignment(IOMMUR3, LstLruIotlbe, 8); 597 #endif 599 598 600 599 /** … … 649 648 typedef struct IOTLBEFLUSHARG 650 649 { 651 /** The IOMMU device state. */652 PIOMMU pIommu;650 /** The ring-3 IOMMU device state. */ 651 PIOMMUR3 pIommuR3; 653 652 /** The domain ID to flush. */ 654 653 uint16_t uDomainId; … … 664 663 typedef struct IOTLBEINFOARG 665 664 { 666 /** The IOMMU device state. */667 PIOMMU pIommu;665 /** The ring-3 IOMMU device state. */ 666 PIOMMUR3 pIommuR3; 668 667 /** The info helper. */ 669 668 PCDBGFINFOHLP pHlp; … … 766 765 * 767 766 * @returns The number of entries in the event log. 768 * @param pThis TheIOMMU device state.767 * @param pThis The shared IOMMU device state. 769 768 */ 770 769 static uint32_t iommuAmdGetEvtLogEntryCount(PIOMMU pThis) … … 833 832 if (pDte->n.u1Valid) 834 833 { 835 fFlags |= IOMMU_DTECACHE_F_VALID; 836 834 fFlags |= IOMMU_DTE_CACHE_F_VALID; 835 836 /** @todo Skip the if checks here (shift/mask the relevant bits over). */ 837 837 if (pDte->n.u1SuppressAllPfEvents) 838 fFlags |= IOMMU_DTE CACHE_F_SUPPRESS_ALL_IOPF;838 fFlags |= IOMMU_DTE_CACHE_F_SUPPRESS_ALL_IOPF; 839 839 if (pDte->n.u1SuppressPfEvents) 840 fFlags |= IOMMU_DTE CACHE_F_SUPPRESS_IOPF;840 fFlags |= IOMMU_DTE_CACHE_F_SUPPRESS_IOPF; 841 841 842 842 uint16_t const fDtePerm = (pDte->au64[0] >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK; 843 AssertCompile(IOMMU_DTE CACHE_F_IO_PERM_MASK == IOMMU_IO_PERM_MASK);844 fFlags |= fDtePerm << IOMMU_DTE CACHE_F_IO_PERM_SHIFT;843 AssertCompile(IOMMU_DTE_CACHE_F_IO_PERM_MASK == IOMMU_IO_PERM_MASK); 844 fFlags |= fDtePerm << IOMMU_DTE_CACHE_F_IO_PERM_SHIFT; 845 845 } 846 846 … … 848 848 if (pDte->n.u1IntrMapValid) 849 849 { 850 fFlags |= IOMMU_DTECACHE_F_INTR_MAP_VALID; 850 fFlags |= IOMMU_DTE_CACHE_F_INTR_MAP_VALID; 851 852 /** @todo Skip the if check here (shift/mask the relevant bit over). */ 851 853 if (pDte->n.u1IgnoreUnmappedIntrs) 852 fFlags |= IOMMU_DTE CACHE_F_IGNORE_UNMAPPED_INTR;853 854 uint16_t const fIntrCtrl = IOMMU_ GET_INTR_CTRL(pDte);855 AssertCompile(IOMMU_DTE CACHE_F_INTR_CTRL_MASK == IOMMU_DTE_INTR_CTRL_MASK);856 fFlags |= fIntrCtrl << IOMMU_DTE CACHE_F_INTR_CTRL_SHIFT;854 fFlags |= IOMMU_DTE_CACHE_F_IGNORE_UNMAPPED_INTR; 855 856 uint16_t const fIntrCtrl = IOMMU_DTE_GET_INTR_CTRL(pDte); 857 AssertCompile(IOMMU_DTE_CACHE_F_INTR_CTRL_MASK == IOMMU_DTE_INTR_CTRL_MASK); 858 fFlags |= fIntrCtrl << IOMMU_DTE_CACHE_F_INTR_CTRL_SHIFT; 857 859 } 858 860 return fFlags; … … 880 882 881 883 884 #ifdef IOMMU_WITH_DTE_CACHE 885 /** 886 * Looks up an entry in the DTE cache for the given device ID. 887 * 888 * @returns The index of the entry, or the cache capacity if no entry was found. 889 * @param pThis The shared IOMMU device state. 890 * @param uDevId The device ID (bus, device, function). 891 */ 892 DECLINLINE(uint16_t) iommuAmdDteCacheEntryLookup(PIOMMU pThis, uint16_t uDevId) 893 { 894 uint16_t const cDeviceIds = RT_ELEMENTS(pThis->aDeviceIds); 895 for (uint16_t i = 0; i < cDeviceIds; i++) 896 { 897 if (pThis->aDeviceIds[i] == uDevId) 898 return i; 899 } 900 return cDeviceIds; 901 } 902 903 904 /** 905 * Gets an free/unused DTE cache entry. 906 * 907 * @returns The index of an unused entry, or cache capacity if the cache is full. 908 * @param pThis The shared IOMMU device state. 909 */ 910 DECLINLINE(uint16_t) iommuAmdDteCacheEntryGetUnused(PCIOMMU pThis) 911 { 912 /* 913 * ASSUMES device ID 0 is the PCI host bridge or the IOMMU itself 914 * (the latter being an ugly hack) and cannot be a valid device ID. 915 */ 916 uint16_t const cDeviceIds = RT_ELEMENTS(pThis->aDeviceIds); 917 for (uint16_t i = 0; i < cDeviceIds; i++) 918 { 919 if (!pThis->aDeviceIds[i]) 920 return i; 921 } 922 return cDeviceIds; 923 } 924 925 926 /** 927 * Adds or updates the I/O device flags for the given device ID. 928 * 929 * @returns VBox status code. 930 * @retval VERR_OUT_OF_RESOURCES if the cache is full. 931 * 932 * @param pDevIns The IOMMU instance data. 933 * @param uDevId The device ID (bus, device, function). 934 * @param pDte The device table entry. 935 * @param fOrMask The device flags (usually compound flags) to OR in with the 936 * basic flags, see IOMMU_DTE_CACHE_F_XXX. 937 */ 938 static int iommuAmdDteCacheAdd(PPDMDEVINS pDevIns, uint16_t uDevId, PCDTE_T pDte, uint16_t fOrMask) 939 { 940 Assert(pDte); 941 Assert(uDevId); 942 943 int rc = VINF_SUCCESS; 944 uint16_t const fFlags = iommuAmdGetBasicDevFlags(pDte) | IOMMU_DTE_CACHE_F_PRESENT | fOrMask; 945 uint16_t const uDomainId = pDte->n.u16DomainId; 946 947 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 948 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 949 950 uint16_t const cDteCache = RT_ELEMENTS(pThis->aDteCache); 951 uint16_t idxDte = iommuAmdDteCacheEntryLookup(pThis, uDevId); 952 if (idxDte < cDteCache) 953 { 954 pThis->aDteCache[idxDte].fFlags = fFlags; 955 pThis->aDteCache[idxDte].uDomainId = uDomainId; 956 } 957 else if ((idxDte = iommuAmdDteCacheEntryGetUnused(pThis)) < cDteCache) 958 { 959 pThis->aDeviceIds[idxDte] = uDevId; 960 pThis->aDteCache[idxDte].fFlags = fFlags; 961 pThis->aDteCache[idxDte].uDomainId = uDomainId; 962 } 963 else 964 rc = VERR_OUT_OF_RESOURCES; 965 966 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 967 return rc; 968 } 969 970 971 /** 972 * Adds one or more I/O device flags if the device is already present in the cache. 973 * 974 * @param pDevIns The IOMMU instance data. 975 * @param uDevId The device ID (bus, device, function). 976 * @param fFlags Additional device flags to OR with existing flags, see 977 * IOMMU_DTE_CACHE_F_XXX. 978 */ 979 static void iommuAmdDteCacheAddFlags(PPDMDEVINS pDevIns, uint16_t uDevId, uint16_t fFlags) 980 { 981 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 982 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 983 984 uint16_t const cDteCache = RT_ELEMENTS(pThis->aDteCache); 985 uint16_t const idxDte = iommuAmdDteCacheEntryLookup(pThis, uDevId); 986 if ( idxDte < cDteCache 987 && (pThis->aDteCache[idxDte].fFlags & IOMMU_DTE_CACHE_F_PRESENT)) 988 pThis->aDteCache[idxDte].fFlags |= fFlags; 989 990 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 991 } 992 993 994 /** 995 * Removes a DTE cache entry. 996 * 997 * @param pDevIns The IOMMU instance data. 998 * @param uDevId The device ID to remove cache entries for. 999 */ 1000 static void iommuAmdDteCacheRemove(PPDMDEVINS pDevIns, uint16_t uDevId) 1001 { 1002 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1003 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1004 1005 uint16_t const cDteCache = RT_ELEMENTS(pThis->aDteCache); 1006 uint16_t const idxDte = iommuAmdDteCacheEntryLookup(pThis, uDevId); 1007 if (idxDte < cDteCache) 1008 { 1009 pThis->aDteCache[idxDte].fFlags = 0; 1010 pThis->aDteCache[idxDte].uDomainId = 0; 1011 } 1012 1013 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1014 } 1015 1016 1017 /** 1018 * Removes all entries in the device table entry cache. 1019 * 1020 * @param pDevIns The IOMMU instance data. 1021 */ 1022 static void iommuAmdDteCacheRemoveAll(PPDMDEVINS pDevIns) 1023 { 1024 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1025 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1026 RT_ZERO(pThis->aDeviceIds); 1027 RT_ZERO(pThis->aDteCache); 1028 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1029 } 1030 #endif /* IOMMU_WITH_DTE_CACHE */ 1031 1032 882 1033 #ifdef IOMMU_WITH_IOTLBE_CACHE 883 1034 /** 884 1035 * Moves the IOTLB entry to the least recently used slot. 885 1036 * 886 * @param pThis TheIOMMU device state.887 * @param pIotlbe The IOTLB entry .888 */ 889 static void iommuAmdIotlbEntryMoveToLru(PIOMMU pThis, PIOTLBE pIotlbe)890 { 891 if (!RTListNodeIsFirst(&pThis ->LstLruIotlbe, &pIotlbe->NdLru))1037 * @param pThisR3 The ring-3 IOMMU device state. 1038 * @param pIotlbe The IOTLB entry to move. 1039 */ 1040 static void iommuAmdIotlbEntryMoveToLru(PIOMMUR3 pThisR3, PIOTLBE pIotlbe) 1041 { 1042 if (!RTListNodeIsFirst(&pThisR3->LstLruIotlbe, &pIotlbe->NdLru)) 892 1043 { 893 1044 RTListNodeRemove(&pIotlbe->NdLru); 894 RTListPrepend(&pThis ->LstLruIotlbe, &pIotlbe->NdLru);1045 RTListPrepend(&pThisR3->LstLruIotlbe, &pIotlbe->NdLru); 895 1046 } 896 1047 } … … 900 1051 * Moves the IOTLB entry to the most recently used slot. 901 1052 * 902 * @param pThis TheIOMMU device state.903 * @param pIotlbe The IOTLB entry .904 */ 905 static void iommuAmdIotlbEntryMoveToMru(PIOMMU pThis, PIOTLBE pIotlbe)906 { 907 if (!RTListNodeIsLast(&pThis ->LstLruIotlbe, &pIotlbe->NdLru))1053 * @param pThisR3 The ring-3 IOMMU device state. 1054 * @param pIotlbe The IOTLB entry to move. 1055 */ 1056 DECLINLINE(void) iommuAmdIotlbEntryMoveToMru(PIOMMUR3 pThisR3, PIOTLBE pIotlbe) 1057 { 1058 if (!RTListNodeIsLast(&pThisR3->LstLruIotlbe, &pIotlbe->NdLru)) 908 1059 { 909 1060 RTListNodeRemove(&pIotlbe->NdLru); 910 RTListAppend(&pThis ->LstLruIotlbe, &pIotlbe->NdLru);1061 RTListAppend(&pThisR3->LstLruIotlbe, &pIotlbe->NdLru); 911 1062 } 912 1063 } … … 918 1069 * 919 1070 * @returns VINF_SUCCESS. 920 * @param pNode Pointer to an IOTLB E.1071 * @param pNode Pointer to an IOTLB entry to dump info. 921 1072 * @param pvUser Pointer to an IOTLBEINFOARG. 922 1073 */ … … 926 1077 PCIOTLBEINFOARG pArgs = (PCIOTLBEINFOARG)pvUser; 927 1078 AssertPtr(pArgs); 928 AssertPtr(pArgs->pIommu );1079 AssertPtr(pArgs->pIommuR3); 929 1080 AssertPtr(pArgs->pHlp); 930 Assert(pArgs->pIommu->u32Magic == IOMMU_MAGIC);1081 //Assert(pArgs->pIommuCC->u32Magic == IOMMU_MAGIC); 931 1082 932 1083 uint16_t const uDomainId = IOMMU_IOTLB_KEY_GET_DOMAIN_ID(pNode->Key); … … 968 1119 PCIOTLBEFLUSHARG pArgs = (PCIOTLBEFLUSHARG)pvUser; 969 1120 AssertPtr(pArgs); 970 AssertPtr(pArgs->pIommu );971 Assert(pArgs->pIommu->u32Magic == IOMMU_MAGIC);1121 AssertPtr(pArgs->pIommuR3); 1122 //Assert(pArgs->pIommuR3->u32Magic == IOMMU_MAGIC); 972 1123 973 1124 uint16_t const uDomainId = IOMMU_IOTLB_KEY_GET_DOMAIN_ID(pNode->Key); … … 977 1128 PIOTLBE pIotlbe = (PIOTLBE)pNode; 978 1129 pIotlbe->fEvictPending = true; 979 iommuAmdIotlbEntryMoveToLru(pArgs->pIommu , (PIOTLBE)pNode);1130 iommuAmdIotlbEntryMoveToLru(pArgs->pIommuR3, (PIOTLBE)pNode); 980 1131 } 981 1132 return VINF_SUCCESS; … … 986 1137 * Inserts an IOTLB entry into the cache. 987 1138 * 988 * @param pThis The IOMMU device state. 1139 * @param pThis The shared IOMMU device state. 1140 * @param pThisR3 The ring-3 IOMMU device state. 989 1141 * @param pIotlbe The IOTLB entry to initialize and insert. 990 1142 * @param uDomainId The domain ID. … … 992 1144 * @param pPageLookup The I/O page lookup result of the access. 993 1145 */ 994 static void iommuAmdIotlbEntryInsert(PIOMMU pThis, PIO TLBE pIotlbe, uint16_t uDomainId, uint64_t uIova,1146 static void iommuAmdIotlbEntryInsert(PIOMMU pThis, PIOMMUR3 pThisR3, PIOTLBE pIotlbe, uint16_t uDomainId, uint64_t uIova, 995 1147 PCIOPAGELOOKUP pPageLookup) 996 1148 { … … 1004 1156 1005 1157 /* Check if the entry already exists. */ 1006 PIOTLBE pFound = (PIOTLBE)RTAvlU64Get(&pThis ->TreeIotlbe, pIotlbe->Core.Key);1158 PIOTLBE pFound = (PIOTLBE)RTAvlU64Get(&pThisR3->TreeIotlbe, pIotlbe->Core.Key); 1007 1159 if (!pFound) 1008 1160 { 1009 1161 /* Insert the entry into the cache. */ 1010 bool const fInserted = RTAvlU64Insert(&pThis ->TreeIotlbe, &pIotlbe->Core);1162 bool const fInserted = RTAvlU64Insert(&pThisR3->TreeIotlbe, &pIotlbe->Core); 1011 1163 Assert(fInserted); NOREF(fInserted); 1012 Assert(pThis ->cCachedIotlbes < IOMMU_IOTLBE_MAX);1013 ++pThis ->cCachedIotlbes;1164 Assert(pThisR3->cCachedIotlbes < IOMMU_IOTLBE_MAX); 1165 ++pThisR3->cCachedIotlbes; 1014 1166 STAM_COUNTER_INC(&pThis->StatIotlbeCached); 1015 1167 } … … 1034 1186 * @returns Pointer to the removed IOTLB entry, NULL if the entry wasn't found in 1035 1187 * the tree. 1036 * @param pThis The IOMMU device state. 1037 * @param uKey The key of the IOTLB entry to remove. 1038 */ 1039 static PIOTLBE iommuAmdIotlbEntryRemove(PIOMMU pThis, AVLU64KEY uKey) 1040 { 1041 PIOTLBE pIotlbe = (PIOTLBE)RTAvlU64Remove(&pThis->TreeIotlbe, uKey); 1188 * @param pThis The shared IOMMU device state. 1189 * @param pThisR3 The ring-3 IOMMU device state. 1190 * @param uKey The key of the IOTLB entry to remove. 1191 */ 1192 static PIOTLBE iommuAmdIotlbEntryRemove(PIOMMU pThis, PIOMMUR3 pThisR3, AVLU64KEY uKey) 1193 { 1194 PIOTLBE pIotlbe = (PIOTLBE)RTAvlU64Remove(&pThisR3->TreeIotlbe, uKey); 1042 1195 if (pIotlbe) 1043 1196 { … … 1051 1204 Assert(pIotlbe->Core.Key == IOMMU_IOTLB_KEY_NIL); 1052 1205 1053 Assert(pThis ->cCachedIotlbes > 0);1054 --pThis ->cCachedIotlbes;1206 Assert(pThisR3->cCachedIotlbes > 0); 1207 --pThisR3->cCachedIotlbes; 1055 1208 STAM_COUNTER_DEC(&pThis->StatIotlbeCached); 1056 1209 } … … 1063 1216 * 1064 1217 * @returns Pointer to IOTLB entry if found, NULL otherwise. 1065 * @param pThis The IOMMU device state. 1066 * @param uDomainId The domain ID. 1067 * @param uIova The I/O virtual address. 1068 */ 1069 static PIOTLBE iommuAmdIotlbLookup(PIOMMU pThis, uint64_t uDomainId, uint64_t uIova) 1070 { 1218 * @param pThis The shared IOMMU device state. 1219 * @param pThisR3 The ring-3 IOMMU device state. 1220 * @param uDomainId The domain ID. 1221 * @param uIova The I/O virtual address. 1222 */ 1223 static PIOTLBE iommuAmdIotlbLookup(PIOMMU pThis, PIOMMUR3 pThisR3, uint64_t uDomainId, uint64_t uIova) 1224 { 1225 RT_NOREF(pThis); 1226 1071 1227 uint64_t const uKey = IOMMU_IOTLB_KEY_MAKE(uDomainId, uIova); 1072 PIOTLBE pIotlbe = (PIOTLBE)RTAvlU64Get(&pThis ->TreeIotlbe, uKey);1228 PIOTLBE pIotlbe = (PIOTLBE)RTAvlU64Get(&pThisR3->TreeIotlbe, uKey); 1073 1229 if ( pIotlbe 1074 1230 && !pIotlbe->fEvictPending) … … 1087 1243 * Adds an IOTLB entry to the cache. 1088 1244 * 1089 * @param pThis The IOMMU device state. 1245 * @param pThis The shared IOMMU device state. 1246 * @param pThis The ring-3 IOMMU device state. 1090 1247 * @param uDomainId The domain ID. 1091 1248 * @param uIova The I/O virtual address. 1092 1249 * @param pPageLookup The I/O page lookup result of the access. 1093 1250 */ 1094 static void iommuAmdIotlbAdd(PIOMMU pThis, uint16_t uDomainId, uint64_t uIova, PCIOPAGELOOKUP pPageLookup)1251 static void iommuAmdIotlbAdd(PIOMMU pThis, PIOMMUR3 pThisR3, uint16_t uDomainId, uint64_t uIova, PCIOPAGELOOKUP pPageLookup) 1095 1252 { 1096 1253 Assert(!(uIova & X86_PAGE_4K_OFFSET_MASK)); … … 1103 1260 * Otherwise, get a new IOTLB entry from the pre-allocated list. 1104 1261 */ 1105 if (pThis ->idxUnusedIotlbe == IOMMU_IOTLBE_MAX)1262 if (pThisR3->idxUnusedIotlbe == IOMMU_IOTLBE_MAX) 1106 1263 { 1107 1264 /* Grab the least recently used entry. */ 1108 PIOTLBE pIotlbe = RTListGetFirst(&pThis ->LstLruIotlbe, IOTLBE, NdLru);1265 PIOTLBE pIotlbe = RTListGetFirst(&pThisR3->LstLruIotlbe, IOTLBE, NdLru); 1109 1266 Assert(pIotlbe); 1110 1267 1111 1268 /* If the entry is in the cache, remove it. */ 1112 1269 if (pIotlbe->Core.Key != IOMMU_IOTLB_KEY_NIL) 1113 iommuAmdIotlbEntryRemove(pThis, p Iotlbe->Core.Key);1270 iommuAmdIotlbEntryRemove(pThis, pThisR3, pIotlbe->Core.Key); 1114 1271 1115 1272 /* Initialize and insert the IOTLB entry into the cache. */ 1116 iommuAmdIotlbEntryInsert(pThis, p Iotlbe, uDomainId, uIova, pPageLookup);1273 iommuAmdIotlbEntryInsert(pThis, pThisR3, pIotlbe, uDomainId, uIova, pPageLookup); 1117 1274 1118 1275 /* Move the entry to the most recently used slot. */ 1119 iommuAmdIotlbEntryMoveToMru(pThis , pIotlbe);1276 iommuAmdIotlbEntryMoveToMru(pThisR3, pIotlbe); 1120 1277 } 1121 1278 else 1122 1279 { 1123 1280 /* Grab an unused IOTLB entry from the pre-allocated list. */ 1124 PIOTLBE pIotlbe = &pThis ->paIotlbes[pThis->idxUnusedIotlbe];1125 ++pThis ->idxUnusedIotlbe;1281 PIOTLBE pIotlbe = &pThisR3->paIotlbes[pThisR3->idxUnusedIotlbe]; 1282 ++pThisR3->idxUnusedIotlbe; 1126 1283 1127 1284 /* Initialize and insert the IOTLB entry into the cache. */ 1128 iommuAmdIotlbEntryInsert(pThis, p Iotlbe, uDomainId, uIova, pPageLookup);1285 iommuAmdIotlbEntryInsert(pThis, pThisR3, pIotlbe, uDomainId, uIova, pPageLookup); 1129 1286 1130 1287 /* Add the entry to the most recently used slot. */ 1131 RTListAppend(&pThis ->LstLruIotlbe, &pIotlbe->NdLru);1288 RTListAppend(&pThisR3->LstLruIotlbe, &pIotlbe->NdLru); 1132 1289 } 1133 1290 } … … 1141 1298 static void iommuAmdIotlbRemoveAll(PPDMDEVINS pDevIns) 1142 1299 { 1143 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1300 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1301 PIOMMUCC pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUR3); 1144 1302 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1145 1303 1146 if (pThis->cCachedIotlbes > 0) 1147 { 1148 pThis->idxUnusedIotlbe = 0; 1304 if (pThisR3->cCachedIotlbes > 0) 1305 { 1149 1306 size_t const cbIotlbes = sizeof(IOTLBE) * IOMMU_IOTLBE_MAX; 1150 RT_BZERO(pThis->paIotlbes, cbIotlbes); 1151 pThis->cCachedIotlbes = 0; 1307 RT_BZERO(pThisR3->paIotlbes, cbIotlbes); 1308 pThisR3->idxUnusedIotlbe = 0; 1309 pThisR3->cCachedIotlbes = 0; 1152 1310 STAM_COUNTER_RESET(&pThis->StatIotlbeCached); 1153 RTListInit(&pThis ->LstLruIotlbe);1311 RTListInit(&pThisR3->LstLruIotlbe); 1154 1312 } 1155 1313 … … 1174 1332 Assert(cbInvalidate >= X86_PAGE_4K_SIZE); 1175 1333 1176 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1334 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1335 PIOMMUR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUR3); 1177 1336 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1178 1337 … … 1180 1339 { 1181 1340 uint64_t const uKey = IOMMU_IOTLB_KEY_MAKE(uDomainId, uIova); 1182 PIOTLBE pIotlbe = iommuAmdIotlbEntryRemove(pThis, uKey);1341 PIOTLBE pIotlbe = iommuAmdIotlbEntryRemove(pThis, pThisR3, uKey); 1183 1342 if (pIotlbe) 1184 iommuAmdIotlbEntryMoveToLru(pThis , pIotlbe);1343 iommuAmdIotlbEntryMoveToLru(pThisR3, pIotlbe); 1185 1344 uIova += X86_PAGE_4K_SIZE; 1186 1345 cbInvalidate -= X86_PAGE_4K_SIZE; … … 1205 1364 * so they will eventually get evicted and re-cycled as the cache gets re-populated. 1206 1365 */ 1207 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1366 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1367 PIOMMUR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUR3); 1208 1368 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1209 1369 1210 1370 IOTLBEFLUSHARG Args; 1211 Args.pIommu = pThis;1371 Args.pIommuR3 = pThisR3; 1212 1372 Args.uDomainId = uDomainId; 1213 RTAvlU64DoWithAll(&pThis ->TreeIotlbe, true /* fFromLeft */, iommuAmdIotlbEntryRemoveDomainId, &Args);1373 RTAvlU64DoWithAll(&pThisR3->TreeIotlbe, true /* fFromLeft */, iommuAmdIotlbEntryRemoveDomainId, &Args); 1214 1374 1215 1375 IOMMU_UNLOCK_CACHE(pDevIns, pThis); … … 1235 1395 Assert(cbAccess >= X86_PAGE_4K_SIZE); 1236 1396 1237 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1397 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1398 PIOMMUR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUR3); 1238 1399 1239 1400 /* Add IOTLB entries for every page in the access. */ … … 1251 1412 do 1252 1413 { 1253 iommuAmdIotlbAdd(pThis, uDomainId, uIova, &PageLookup);1414 iommuAmdIotlbAdd(pThis, pThisR3, uDomainId, uIova, &PageLookup); 1254 1415 uIova += X86_PAGE_4K_SIZE; 1255 1416 PageLookup.GCPhysSpa += X86_PAGE_4K_SIZE; … … 1266 1427 * 1267 1428 * @returns Index of the found entry, or cache capacity if not found. 1268 * @param pThis The IOMMU device state.1429 * @param pThis The shared IOMMU device state. 1269 1430 * @param uDevId The device ID (bus, device, function). 1270 1431 * @param offIrte The offset into the interrupt remap table. … … 1277 1438 * often. */ 1278 1439 uint32_t const uKey = IOMMU_IRTE_CACHE_KEY_MAKE(uDevId, offIrte); 1279 uint16_t const cIrteCache = pThis->cIrteCache;1440 uint16_t const cIrteCache = RT_ELEMENTS(pThis->aIrteCache); 1280 1441 for (uint16_t i = 0; i < cIrteCache; i++) 1281 { 1282 PCIRTECACHE pIrteCache = &pThis->paIrteCache[i]; 1283 if (pIrteCache->uKey == uKey) 1442 if (pThis->aIrteCache[i].uKey == uKey) 1284 1443 return i; 1285 }1286 1444 return cIrteCache; 1287 1445 } … … 1289 1447 1290 1448 /** 1291 * Gets a nfree/unused IRTE cache entry.1449 * Gets a free/unused IRTE cache entry. 1292 1450 * 1293 1451 * @returns The index of an unused entry, or cache capacity if the cache is full. 1294 * @param pThis TheIOMMU device state.1452 * @param pThis The shared IOMMU device state. 1295 1453 */ 1296 1454 static uint16_t iommuAmdIrteCacheEntryGetUnused(PCIOMMU pThis) 1297 1455 { 1298 uint16_t const cIrteCache = pThis->cIrteCache;1456 uint16_t const cIrteCache = RT_ELEMENTS(pThis->aIrteCache); 1299 1457 for (uint16_t i = 0; i < cIrteCache; i++) 1300 { 1301 PCIRTECACHE pIrteCache = &pThis->paIrteCache[i]; 1302 if (pIrteCache->uKey == IOMMU_IRTE_CACHE_KEY_NIL) 1303 { 1304 Assert(!pIrteCache->Irte.u32); 1458 if (pThis->aIrteCache[i].uKey == IOMMU_IRTE_CACHE_KEY_NIL) 1459 { 1460 Assert(!pThis->aIrteCache[i].Irte.u32); 1305 1461 return i; 1306 1462 } 1307 }1308 1463 return cIrteCache; 1309 1464 } … … 1334 1489 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1335 1490 1336 PCDTECACHE pDteCache = &pThis->paDteCache[uDevId]; 1337 if ((pDteCache->fFlags & (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_INTR_MAP_VALID)) 1338 == (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_INTR_MAP_VALID)) 1339 { 1340 Assert((pMsiIn->Addr.u64 & VBOX_MSI_ADDR_ADDR_MASK) == VBOX_MSI_ADDR_BASE); /* Paranoia. */ 1341 1342 /* Currently, we only cache remapping of fixed and arbitrated interrupts. */ 1343 uint8_t const u8DeliveryMode = pMsiIn->Data.n.u3DeliveryMode; 1344 if (u8DeliveryMode <= VBOX_MSI_DELIVERY_MODE_LOWEST_PRIO) 1345 { 1346 uint8_t const uIntrCtrl = (pDteCache->fFlags >> IOMMU_DTECACHE_F_INTR_CTRL_SHIFT) 1347 & IOMMU_DTECACHE_F_INTR_CTRL_MASK; 1348 if (uIntrCtrl == IOMMU_INTR_CTRL_REMAP) 1491 uint16_t const idxDteCache = iommuAmdDteCacheEntryLookup(pThis, uDevId); 1492 if (idxDteCache < RT_ELEMENTS(pThis->aDteCache)) 1493 { 1494 PCDTECACHE pDteCache = &pThis->aDteCache[idxDteCache]; 1495 if ((pDteCache->fFlags & (IOMMU_DTE_CACHE_F_PRESENT | IOMMU_DTE_CACHE_F_INTR_MAP_VALID)) 1496 == (IOMMU_DTE_CACHE_F_PRESENT | IOMMU_DTE_CACHE_F_INTR_MAP_VALID)) 1497 { 1498 Assert((pMsiIn->Addr.u64 & VBOX_MSI_ADDR_ADDR_MASK) == VBOX_MSI_ADDR_BASE); /* Paranoia. */ 1499 1500 /* Currently, we only cache remapping of fixed and arbitrated interrupts. */ 1501 uint8_t const u8DeliveryMode = pMsiIn->Data.n.u3DeliveryMode; 1502 if (u8DeliveryMode <= VBOX_MSI_DELIVERY_MODE_LOWEST_PRIO) 1349 1503 { 1350 /* Interrupt table length has been verified prior to adding entries to the cache. */ 1351 uint16_t const offIrte = IOMMU_GET_IRTE_OFF(pMsiIn->Data.u32); 1352 uint16_t const idxIrteCache = iommuAmdIrteCacheEntryLookup(pThis, uDevId, offIrte); 1353 if (idxIrteCache < pThis->cIrteCache) 1504 uint8_t const uIntrCtrl = (pDteCache->fFlags >> IOMMU_DTE_CACHE_F_INTR_CTRL_SHIFT) 1505 & IOMMU_DTE_CACHE_F_INTR_CTRL_MASK; 1506 if (uIntrCtrl == IOMMU_INTR_CTRL_REMAP) 1354 1507 { 1355 PCIRTE_T pIrte = &pThis->paIrteCache[idxIrteCache].Irte; 1356 iommuAmdIrteRemapMsi(pMsiIn, pMsiOut, pIrte); 1508 /* Interrupt table length has been verified prior to adding entries to the cache. */ 1509 uint16_t const offIrte = IOMMU_GET_IRTE_OFF(pMsiIn->Data.u32); 1510 uint16_t const idxIrteCache = iommuAmdIrteCacheEntryLookup(pThis, uDevId, offIrte); 1511 if (idxIrteCache < RT_ELEMENTS(pThis->aIrteCache)) 1512 { 1513 PCIRTE_T pIrte = &pThis->aIrteCache[idxIrteCache].Irte; 1514 Assert(pIrte->n.u1RemapEnable); 1515 Assert(pIrte->n.u3IntrType <= VBOX_MSI_DELIVERY_MODE_LOWEST_PRIO); 1516 iommuAmdIrteRemapMsi(pMsiIn, pMsiOut, pIrte); 1517 rc = VINF_SUCCESS; 1518 } 1519 } 1520 else if (uIntrCtrl == IOMMU_INTR_CTRL_FWD_UNMAPPED) 1521 { 1522 *pMsiOut = *pMsiIn; 1357 1523 rc = VINF_SUCCESS; 1358 1524 } 1359 1525 } 1360 else if (uIntrCtrl == IOMMU_INTR_CTRL_FWD_UNMAPPED) 1361 { 1362 *pMsiOut = *pMsiIn; 1363 rc = VINF_SUCCESS; 1364 } 1365 } 1366 } 1367 else if (pDteCache->fFlags & IOMMU_DTECACHE_F_PRESENT) 1368 { 1369 *pMsiOut = *pMsiIn; 1370 rc = VINF_SUCCESS; 1526 } 1527 else if (pDteCache->fFlags & IOMMU_DTE_CACHE_F_PRESENT) 1528 { 1529 *pMsiOut = *pMsiIn; 1530 rc = VINF_SUCCESS; 1531 } 1371 1532 } 1372 1533 … … 1391 1552 Assert(offIrte != 0xffff); /* Shouldn't be a valid IRTE table offset since sizeof(IRTE) is a multiple of 4. */ 1392 1553 1554 int rc = VINF_SUCCESS; 1393 1555 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1394 1556 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1395 1557 1396 1558 /* Find an existing entry or get an unused slot. */ 1397 uint16_t const cIrteCache = pThis->cIrteCache; 1398 uint16_t idxIrteCache = iommuAmdIrteCacheEntryLookup(pThis, uDevId, offIrte); 1399 if (idxIrteCache == pThis->cIrteCache) 1400 idxIrteCache = iommuAmdIrteCacheEntryGetUnused(pThis); 1401 1402 /* Update the cache entry. */ 1403 int rc; 1404 if (idxIrteCache < cIrteCache) 1405 { 1406 PIRTECACHE pIrteCache = &pThis->paIrteCache[idxIrteCache]; 1407 pIrteCache->uKey = IOMMU_IRTE_CACHE_KEY_MAKE(uDevId, offIrte); 1408 pIrteCache->Irte.u32 = pIrte->u32; 1409 rc = VINF_SUCCESS; 1559 uint16_t const cIrteCache = RT_ELEMENTS(pThis->aIrteCache); 1560 uint16_t idxIrteCache = iommuAmdIrteCacheEntryLookup(pThis, uDevId, offIrte); 1561 if ( idxIrteCache < cIrteCache 1562 || (idxIrteCache = iommuAmdIrteCacheEntryGetUnused(pThis)) < cIrteCache) 1563 { 1564 pThis->aIrteCache[idxIrteCache].uKey = IOMMU_IRTE_CACHE_KEY_MAKE(uDevId, offIrte); 1565 pThis->aIrteCache[idxIrteCache].Irte = *pIrte; 1410 1566 } 1411 1567 else … … 1427 1583 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1428 1584 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1429 uint16_t const cIrteCache = pThis->cIrteCache;1585 uint16_t const cIrteCache = RT_ELEMENTS(pThis->aIrteCache); 1430 1586 for (uint16_t i = 0; i < cIrteCache; i++) 1431 1587 { 1432 PIRTECACHE pIrteCache = &pThis-> paIrteCache[i];1588 PIRTECACHE pIrteCache = &pThis->aIrteCache[i]; 1433 1589 if (uDevId == IOMMU_IRTE_CACHE_KEY_GET_DEVICE_ID(pIrteCache->uKey)) 1434 1590 { 1435 pIrteCache->uKey = IOMMU_IRTE_CACHE_KEY_NIL; 1436 pIrteCache->Irte.u32 = 0; 1591 pIrteCache->uKey = IOMMU_IRTE_CACHE_KEY_NIL; 1592 pIrteCache->Irte.u32 = 0; 1593 /* There could multiple IRTE entries for a device ID, continue searching. */ 1437 1594 } 1438 1595 } … … 1450 1607 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1451 1608 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1452 uint16_t const cIrteCache = pThis->cIrteCache;1609 uint16_t const cIrteCache = RT_ELEMENTS(pThis->aIrteCache); 1453 1610 for (uint16_t i = 0; i < cIrteCache; i++) 1454 1611 { 1455 PIRTECACHE pIrteCache = &pThis->paIrteCache[i]; 1456 pIrteCache->uKey = IOMMU_IRTE_CACHE_KEY_NIL; 1457 pIrteCache->Irte.u32 = 0; 1612 pThis->aIrteCache[i].uKey = IOMMU_IRTE_CACHE_KEY_NIL; 1613 pThis->aIrteCache[i].Irte.u32 = 0; 1458 1614 } 1459 1615 IOMMU_UNLOCK_CACHE(pDevIns, pThis); … … 1462 1618 1463 1619 1464 #ifdef IOMMU_WITH_DTE_CACHE1465 /**1466 * Updates the I/O device flags for the given device ID.1467 *1468 * @param pDevIns The IOMMU instance data.1469 * @param uDevId The device ID (bus, device, function).1470 * @param pDte The device table entry. Can be NULL only when @a fFlags is1471 * 0.1472 * @param fOrMask The device flags (usually compound flags) to OR in with the1473 * basic flags, see IOMMU_DTECACHE_F_XXX. Pass 0 to flush the DTE1474 * from the cache.1475 */1476 static void iommuAmdDteCacheUpdate(PPDMDEVINS pDevIns, uint16_t uDevId, PCDTE_T pDte, uint16_t fOrMask)1477 {1478 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);1479 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis);1480 1481 if (fOrMask & IOMMU_DTECACHE_F_PRESENT)1482 {1483 Assert(pDte);1484 pThis->paDteCache[uDevId].fFlags = iommuAmdGetBasicDevFlags(pDte) | fOrMask;1485 pThis->paDteCache[uDevId].uDomainId = pDte->n.u16DomainId;1486 }1487 else1488 {1489 pThis->paDteCache[uDevId].fFlags = 0;1490 pThis->paDteCache[uDevId].uDomainId = 0;1491 }1492 1493 IOMMU_UNLOCK_CACHE(pDevIns, pThis);1494 }1495 1496 1497 /**1498 * Sets one or more I/O device flags if the device is present in the cache.1499 *1500 * @param pDevIns The IOMMU instance data.1501 * @param uDevId The device ID (bus, device, function).1502 * @param fDevIoFlags The device flags to set.1503 */1504 static void iommuAmdDteCacheSetFlags(PPDMDEVINS pDevIns, uint16_t uDevId, uint16_t fDevIoFlags)1505 {1506 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);1507 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis);1508 1509 if (fDevIoFlags & IOMMU_DTECACHE_F_PRESENT)1510 pThis->paDteCache[uDevId].fFlags |= fDevIoFlags;1511 1512 IOMMU_UNLOCK_CACHE(pDevIns, pThis);1513 }1514 1515 1516 /**1517 * Removes all entries in the device table entry cache.1518 *1519 * @param pDevIns The IOMMU instance data.1520 */1521 static void iommuAmdDteCacheRemoveAll(PPDMDEVINS pDevIns)1522 {1523 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);1524 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis);1525 1526 size_t const cbDteCache = sizeof(DTECACHE) * IOMMU_DTE_CACHE_MAX;1527 RT_BZERO(pThis->paDteCache, cbDteCache);1528 1529 IOMMU_UNLOCK_CACHE(pDevIns, pThis);1530 }1531 #endif /* IOMMU_WITH_DTE_CACHE */1532 1533 1534 1620 /** 1535 1621 * Atomically reads the control register without locking the IOMMU device. 1536 1622 * 1537 1623 * @returns The control register. 1538 * @param pThis TheIOMMU device state.1624 * @param pThis The shared IOMMU device state. 1539 1625 */ 1540 1626 DECL_FORCE_INLINE(IOMMU_CTRL_T) iommuAmdGetCtrlUnlocked(PCIOMMU pThis) … … 3187 3273 * 3188 3274 * @param pDevIns The IOMMU instance data. 3189 * @param fIoDevFlags The I/O device flags, see IOMMU_DTE CACHE_F_XXX.3275 * @param fIoDevFlags The I/O device flags, see IOMMU_DTE_CACHE_F_XXX. 3190 3276 * @param pIrte The interrupt remapping table entry, can be NULL. 3191 3277 * @param enmOp The IOMMU operation being performed. … … 3201 3287 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIoPageFault; 3202 3288 3203 #ifdef IOMMU_WITH_ IOTLBE_CACHE3204 # define IOMMU_DTE_CACHE_SET_PF_RAISED(a_pDevIns, a_DevId) iommuAmdDteCache SetFlags((a_pDevIns), (a_DevId), \3205 IOMMU_DTE CACHE_F_IO_PAGE_FAULT_RAISED)3289 #ifdef IOMMU_WITH_DTE_CACHE 3290 # define IOMMU_DTE_CACHE_SET_PF_RAISED(a_pDevIns, a_DevId) iommuAmdDteCacheAddFlags((a_pDevIns), (a_DevId), \ 3291 IOMMU_DTE_CACHE_F_IO_PAGE_FAULT_RAISED) 3206 3292 #else 3207 3293 # define IOMMU_DTE_CACHE_SET_PF_RAISED(a_pDevIns, a_DevId) do { } while (0) … … 3212 3298 || enmOp == IOMMUOP_MEM_WRITE) 3213 3299 { 3214 uint16_t const fSuppressIopf = IOMMU_DTECACHE_F_VALID 3215 | IOMMU_DTECACHE_F_SUPPRESS_IOPF 3216 | IOMMU_DTECACHE_F_IO_PAGE_FAULT_RAISED; 3217 uint16_t const fSuppressAllIopf = IOMMU_DTECACHE_F_VALID 3218 | IOMMU_DTECACHE_F_SUPPRESS_ALL_IOPF; 3300 uint16_t const fSuppressIopf = IOMMU_DTE_CACHE_F_VALID 3301 | IOMMU_DTE_CACHE_F_SUPPRESS_IOPF | IOMMU_DTE_CACHE_F_IO_PAGE_FAULT_RAISED; 3302 uint16_t const fSuppressAllIopf = IOMMU_DTE_CACHE_F_VALID | IOMMU_DTE_CACHE_F_SUPPRESS_ALL_IOPF; 3219 3303 if ( (fIoDevFlags & fSuppressAllIopf) == fSuppressAllIopf 3220 3304 || (fIoDevFlags & fSuppressIopf) == fSuppressIopf) … … 3225 3309 else if (enmOp == IOMMUOP_INTR_REQ) 3226 3310 { 3227 uint16_t const fSuppressIopf = IOMMU_DTECACHE_F_VALID 3228 | IOMMU_DTECACHE_F_INTR_MAP_VALID 3229 | IOMMU_DTECACHE_F_IGNORE_UNMAPPED_INTR; 3311 uint16_t const fSuppressIopf = IOMMU_DTE_CACHE_F_INTR_MAP_VALID | IOMMU_DTE_CACHE_F_IGNORE_UNMAPPED_INTR; 3230 3312 if ((fIoDevFlags & fSuppressIopf) == fSuppressIopf) 3231 3313 fSuppressEvtLogging = true; 3232 else if (pIrte) 3314 else if (pIrte) /** @todo Make this compulsary and assert if it isn't provided. */ 3233 3315 fSuppressEvtLogging = pIrte->n.u1SuppressIoPf; 3234 3316 } … … 3890 3972 STAM_COUNTER_INC(&pThis->StatAccessDtePermDenied); 3891 3973 3892 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE)3974 #ifdef IOMMU_WITH_IOTLBE_CACHE 3893 3975 if (RT_SUCCESS(rc)) 3894 3976 { 3895 3977 /* Update that addresses requires translation (cumulative permissions of DTE and I/O page tables). */ 3896 iommuAmdDteCache Update(pDevIns, uDevId, &Dte, IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_ADDR_TRANSLATE);3978 iommuAmdDteCacheAdd(pDevIns, uDevId, &Dte, IOMMU_DTE_CACHE_F_ADDR_TRANSLATE); 3897 3979 /* Update IOTLB for the contiguous range of I/O virtual addresses. */ 3898 3980 iommuAmdIotlbAddRange(pDevIns, Dte.n.u16DomainId, uIova & X86_PAGE_4K_BASE_MASK, cbPages, … … 3911 3993 rc = VINF_SUCCESS; 3912 3994 3913 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE)3995 #ifdef IOMMU_WITH_IOTLBE_CACHE 3914 3996 /* Update that addresses permissions of DTE apply (but omit address translation). */ 3915 iommuAmdDteCache Update(pDevIns, uDevId, &Dte, IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_IO_PERM);3997 iommuAmdDteCacheAdd(pDevIns, uDevId, &Dte, IOMMU_DTE_CACHE_F_IO_PERM); 3916 3998 #endif 3917 3999 } … … 3944 4026 cbContiguous = cbAccess; 3945 4027 3946 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE)4028 #ifdef IOMMU_WITH_IOTLBE_CACHE 3947 4029 /* Update that addresses don't require translation (nor permission checks) but a DTE is present. */ 3948 iommuAmdDteCache Update(pDevIns, uDevId, &Dte, IOMMU_DTECACHE_F_PRESENT);4030 iommuAmdDteCacheAdd(pDevIns, uDevId, &Dte, 0 /* fFlags */); 3949 4031 #endif 3950 4032 } … … 3963 4045 3964 4046 3965 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE)4047 #ifdef IOMMU_WITH_IOTLBE_CACHE 3966 4048 /** 3967 4049 * I/O page lookup callback for finding an I/O page from the IOTLB. … … 3988 4070 Assert(!(uIovaPage & X86_PAGE_4K_OFFSET_MASK)); 3989 4071 3990 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 4072 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 4073 PIOMMUR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUR3); 3991 4074 3992 4075 STAM_PROFILE_ADV_START(&pThis->StatProfIotlbeLookup, a); 3993 PCIOTLBE pIotlbe = iommuAmdIotlbLookup(pThis, p Aux->uDomainId, uIovaPage);4076 PCIOTLBE pIotlbe = iommuAmdIotlbLookup(pThis, pThisR3, pAux->uDomainId, uIovaPage); 3994 4077 STAM_PROFILE_ADV_STOP(&pThis->StatProfIotlbeLookup, a); 3995 4078 if (pIotlbe) … … 4035 4118 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 4036 4119 4120 #define IOMMU_IOTLB_LOOKUP_FAILED(a_rc) \ 4121 do { \ 4122 *pGCPhysSpa = NIL_RTGCPHYS; \ 4123 *pcbContiguous = 0; \ 4124 rc = (a_rc); \ 4125 } while (0) 4126 4037 4127 /* 4038 4128 * We hold the cache lock across both the DTE and the IOTLB lookups (if any) because … … 4042 4132 4043 4133 /* Lookup the DTE cache entry. */ 4044 PCDTECACHE pDteCache = &pThis->paDteCache[uDevId]; 4045 if ((pDteCache->fFlags & (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_VALID | IOMMU_DTECACHE_F_ADDR_TRANSLATE)) 4046 == (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_VALID | IOMMU_DTECACHE_F_ADDR_TRANSLATE)) 4047 { 4048 /* Lookup IOTLB entries. */ 4049 IOADDRRANGE AddrIn; 4050 AddrIn.uAddr = uIova; 4051 AddrIn.cb = cbAccess; 4052 AddrIn.fPerm = fPerm; 4053 4054 IOMMUOPAUX Aux; 4055 Aux.enmOp = enmOp; 4056 Aux.pDte = NULL; 4057 Aux.uDeviceId = uDevId; 4058 Aux.uDomainId = pDteCache->uDomainId; 4059 4060 IOADDRRANGE AddrOut; 4061 rc = iommuAmdLookupIoAddrRange(pDevIns, iommuAmdCacheLookupPage, &AddrIn, &Aux, &AddrOut, NULL /* pcbPages */); 4062 Assert(AddrOut.cb <= cbAccess); 4063 *pGCPhysSpa = AddrOut.uAddr; 4064 *pcbContiguous = AddrOut.cb; 4065 } 4066 else if ((pDteCache->fFlags & (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_VALID | IOMMU_DTECACHE_F_IO_PERM)) 4067 == (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_VALID | IOMMU_DTECACHE_F_IO_PERM)) 4068 { 4069 /* Address translation is disabled, but DTE permissions apply. */ 4070 Assert(!(pDteCache->fFlags & IOMMU_DTECACHE_F_ADDR_TRANSLATE)); 4071 uint8_t const fDtePerm = (pDteCache->fFlags >> IOMMU_DTECACHE_F_IO_PERM_SHIFT) & IOMMU_DTECACHE_F_IO_PERM_MASK; 4072 if ((fDtePerm & fPerm) == fPerm) 4073 { 4134 uint16_t const idxDteCache = iommuAmdDteCacheEntryLookup(pThis, uDevId); 4135 if (idxDteCache < RT_ELEMENTS(pThis->aDteCache)) 4136 { 4137 PCDTECACHE pDteCache = &pThis->aDteCache[idxDteCache]; 4138 if ((pDteCache->fFlags & (IOMMU_DTE_CACHE_F_PRESENT | IOMMU_DTE_CACHE_F_VALID | IOMMU_DTE_CACHE_F_ADDR_TRANSLATE)) 4139 == (IOMMU_DTE_CACHE_F_PRESENT | IOMMU_DTE_CACHE_F_VALID | IOMMU_DTE_CACHE_F_ADDR_TRANSLATE)) 4140 { 4141 /* Lookup IOTLB entries. */ 4142 IOADDRRANGE AddrIn; 4143 AddrIn.uAddr = uIova; 4144 AddrIn.cb = cbAccess; 4145 AddrIn.fPerm = fPerm; 4146 4147 IOMMUOPAUX Aux; 4148 Aux.enmOp = enmOp; 4149 Aux.pDte = NULL; 4150 Aux.uDeviceId = uDevId; 4151 Aux.uDomainId = pDteCache->uDomainId; 4152 4153 IOADDRRANGE AddrOut; 4154 rc = iommuAmdLookupIoAddrRange(pDevIns, iommuAmdCacheLookupPage, &AddrIn, &Aux, &AddrOut, NULL /* pcbPages */); 4155 Assert(AddrOut.cb <= cbAccess); 4156 *pGCPhysSpa = AddrOut.uAddr; 4157 *pcbContiguous = AddrOut.cb; 4158 } 4159 else if ((pDteCache->fFlags & (IOMMU_DTE_CACHE_F_PRESENT | IOMMU_DTE_CACHE_F_VALID | IOMMU_DTE_CACHE_F_IO_PERM)) 4160 == (IOMMU_DTE_CACHE_F_PRESENT | IOMMU_DTE_CACHE_F_VALID | IOMMU_DTE_CACHE_F_IO_PERM)) 4161 { 4162 /* Address translation is disabled, but DTE permissions apply. */ 4163 Assert(!(pDteCache->fFlags & IOMMU_DTE_CACHE_F_ADDR_TRANSLATE)); 4164 uint8_t const fDtePerm = (pDteCache->fFlags >> IOMMU_DTE_CACHE_F_IO_PERM_SHIFT) & IOMMU_DTE_CACHE_F_IO_PERM_MASK; 4165 if ((fDtePerm & fPerm) == fPerm) 4166 { 4167 *pGCPhysSpa = uIova; 4168 *pcbContiguous = cbAccess; 4169 rc = VINF_SUCCESS; 4170 } 4171 else 4172 IOMMU_IOTLB_LOOKUP_FAILED(VERR_IOMMU_ADDR_ACCESS_DENIED); 4173 } 4174 else if (pDteCache->fFlags & IOMMU_DTE_CACHE_F_PRESENT) 4175 { 4176 /* Forward addresses untranslated, without checking permissions. */ 4074 4177 *pGCPhysSpa = uIova; 4075 4178 *pcbContiguous = cbAccess; … … 4077 4180 } 4078 4181 else 4079 { 4080 *pGCPhysSpa = NIL_RTGCPHYS; 4081 *pcbContiguous = 0; 4082 rc = VERR_IOMMU_ADDR_ACCESS_DENIED; 4083 } 4084 } 4085 else if (pDteCache->fFlags & IOMMU_DTECACHE_F_PRESENT) 4086 { 4087 /* Forward addresses untranslated, without checking permissions. */ 4088 *pGCPhysSpa = uIova; 4089 *pcbContiguous = cbAccess; 4090 rc = VINF_SUCCESS; 4182 IOMMU_IOTLB_LOOKUP_FAILED(VERR_NOT_FOUND); 4091 4183 } 4092 4184 else 4093 { 4094 rc = VERR_NOT_FOUND; 4095 *pGCPhysSpa = NIL_RTGCPHYS; 4096 *pcbContiguous = 0; 4097 } 4185 IOMMU_IOTLB_LOOKUP_FAILED(VERR_NOT_FOUND); 4098 4186 4099 4187 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 4100 4188 4101 /* Raise event if address translation resulted in a permission failure. */4102 if (rc == VERR_IOMMU_ADDR_ACCESS_DENIED)4103 {4104 EVT_IO_PAGE_FAULT_T EvtIoPageFault;4105 iommuAmdIoPageFaultEventInit(uDevId, pDteCache->uDomainId, uIova, true /* fPresent */,4106 false /* fRsvdNotZero */, true /* fPermDenied */, enmOp, &EvtIoPageFault);4107 iommuAmdIoPageFaultEventRaise(pDevIns, pDteCache->fFlags, NULL /* pIrte */, enmOp, &EvtIoPageFault,4108 kIoPageFaultType_PermDenied);4109 }4110 4111 4189 return rc; 4112 } 4113 #endif /* IN_RING3 && IOMMU_WITH_IOTLBE_CACHE */ 4190 4191 #undef IOMMU_IOTLB_LOOKUP_FAILED 4192 } 4193 #endif /* IOMMU_WITH_IOTLBE_CACHE */ 4114 4194 4115 4195 … … 4176 4256 4177 4257 int rc; 4178 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE)4258 #ifdef IOMMU_WITH_IOTLBE_CACHE 4179 4259 /* Lookup the IOVA from the cache. */ 4180 4260 rc = iommuAmdCacheLookup(pDevIns, uDevId, uIova, cbAccess, fPerm, enmOp, pGCPhysSpa, pcbContiguous); … … 4187 4267 return rc; 4188 4268 } 4189 if (rc == VERR_OUT_OF_RANGE) 4269 if (rc != VERR_OUT_OF_RANGE) 4270 { /* likely */ } 4271 else 4190 4272 { 4191 4273 /* Access stopped when translations resulted in non-contiguous memory, let caller resume access. */ … … 4194 4276 return VINF_SUCCESS; 4195 4277 } 4196 if (rc == VERR_IOMMU_ADDR_ACCESS_DENIED) 4197 { 4198 /* Access denied due to insufficient permissions. */ 4199 STAM_COUNTER_INC(&pThis->StatAccessCachePermDenied); 4200 return rc; 4201 } 4202 4203 /* Access incomplete as not all pages were in the cache. Lookup the rest from the device table. */ 4204 AssertMsg(rc == VERR_NOT_FOUND, ("Invalid cache lookup result: %Rrc\n", rc)); 4278 4279 /* 4280 * Access incomplete as not all pages were in the cache. 4281 * Or permissions were denied for the access (which typically doesn't happen) 4282 * so go through the slower path and raise the required event. 4283 */ 4205 4284 AssertMsg(*pcbContiguous < cbAccess, ("Invalid size: cbContiguous=%zu cbAccess=%zu\n", *pcbContiguous, cbAccess)); 4206 4285 uIova += *pcbContiguous; 4207 4286 cbAccess -= *pcbContiguous; 4287 /* FYI: We currently would be also be including permission denied as cache misses too.*/ 4208 4288 STAM_COUNTER_INC(&pThis->StatAccessCacheMiss); 4209 4289 #endif … … 4312 4392 4313 4393 RTGCPHYS const GCPhysIntrTable = pDte->au64[2] & IOMMU_DTE_IRTE_ROOT_PTR_MASK; 4314 uint16_t const cbIntrTable = IOMMU_ GET_INTR_TAB_LEN(pDte);4394 uint16_t const cbIntrTable = IOMMU_DTE_GET_INTR_TAB_LEN(pDte); 4315 4395 uint16_t const offIrte = IOMMU_GET_IRTE_OFF(uDataIn); 4316 4396 RTGCPHYS const GCPhysIrte = GCPhysIntrTable + offIrte; … … 4378 4458 iommuAmdIrteRemapMsi(pMsiIn, pMsiOut, &Irte); 4379 4459 #ifdef IOMMU_WITH_IRTE_CACHE 4380 /* Add/Update the interrupt cache with the remapped results. */ 4381 uint16_t const offIrte = IOMMU_GET_IRTE_OFF(uMsiInData); 4382 int const rcUpdate = iommuAmdIrteCacheAdd(pDevIns, uDevId, offIrte, &Irte); 4383 if (RT_FAILURE(rcUpdate)) 4384 LogRelMax(1, ("%s: Warning! Interrupt cache full. Consider increasing cache capacity.\n", IOMMU_LOG_PFX)); 4460 iommuAmdIrteCacheAdd(pDevIns, uDevId, IOMMU_GET_IRTE_OFF(uMsiInData), &Irte); 4385 4461 #endif 4386 4462 return VINF_SUCCESS; … … 4430 4506 static int iommuAmdIntrTableLookup(PPDMDEVINS pDevIns, uint16_t uDevId, IOMMUOP enmOp, PCMSIMSG pMsiIn, PMSIMSG pMsiOut) 4431 4507 { 4432 /* Read the device table entry from memory. */4433 4508 LogFlowFunc(("uDevId=%#x (%#x:%#x:%#x) enmOp=%u\n", uDevId, 4434 4509 ((uDevId >> VBOX_PCI_BUS_SHIFT) & VBOX_PCI_BUS_MASK), 4435 4510 ((uDevId >> VBOX_PCI_DEVFN_DEV_SHIFT) & VBOX_PCI_DEVFN_DEV_MASK), (uDevId & VBOX_PCI_DEVFN_FUN_MASK), enmOp)); 4436 4511 4512 /* Read the device table entry from memory. */ 4437 4513 DTE_T Dte; 4438 4514 int rc = iommuAmdDteRead(pDevIns, uDevId, enmOp, &Dte); 4439 4515 if (RT_SUCCESS(rc)) 4440 4516 { 4517 #ifdef IOMMU_WITH_IRTE_CACHE 4518 int rc2 = iommuAmdDteCacheAdd(pDevIns, uDevId, &Dte, 0 /* fFlags */); 4519 if (RT_FAILURE(rc2)) 4520 { 4521 LogRelMax(10, ("%s: IOMMU DTE cache is full.\n", IOMMU_LOG_PFX)); 4522 } 4523 #endif 4441 4524 /* If the DTE is not valid, all interrupts are forwarded without remapping. */ 4442 4525 if (Dte.n.u1IntrMapValid) … … 4445 4528 uint64_t const fRsvd0 = Dte.au64[2] & ~IOMMU_DTE_QWORD_2_VALID_MASK; 4446 4529 uint64_t const fRsvd1 = Dte.au64[3] & ~IOMMU_DTE_QWORD_3_VALID_MASK; 4447 if (RT_LIKELY( !fRsvd0 4448 && !fRsvd1)) 4530 if (RT_LIKELY(!fRsvd0 && !fRsvd1)) 4449 4531 { /* likely */ } 4450 4532 else 4451 4533 { 4452 LogFunc(("Invalid reserved bits in DTE (u64[2]=%#RX64 u64[3]=%#RX64) -> Illegal DTE\n", fRsvd0, 4453 fRsvd1)); 4534 LogFunc(("Invalid reserved bits in DTE (u64[2]=%#RX64 u64[3]=%#RX64) -> Illegal DTE\n", fRsvd0, fRsvd1)); 4454 4535 EVT_ILLEGAL_DTE_T Event; 4455 4536 iommuAmdIllegalDteEventInit(uDevId, pMsiIn->Addr.u64, true /* fRsvdNotZero */, enmOp, &Event); … … 4457 4538 return VERR_IOMMU_INTR_REMAP_FAILED; 4458 4539 } 4459 4460 #ifdef IOMMU_WITH_IRTE_CACHE4461 /* Update the DTE cache -after- we've checked reserved bits (above) when the interrupt map is valid. */4462 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DTECACHE_F_PRESENT);4463 #endif4464 4540 4465 4541 /* … … 4585 4661 else 4586 4662 { 4587 #ifdef IOMMU_WITH_IRTE_CACHE4588 /* Update the DTE cache that the interrupt map isn't valid. */4589 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DTECACHE_F_PRESENT);4590 #endif4591 4663 LogFlowFunc(("DTE interrupt map not valid\n")); 4592 4664 *pMsiOut = *pMsiIn; … … 4755 4827 { 4756 4828 STAM_COUNTER_INC(&pThis->StatCmdInvDte); 4757 #ifdef IOMMU_WITH_IOTLBE_CACHE4758 4829 PCCMD_INV_DTE_T pCmdInvDte = (PCCMD_INV_DTE_T)pCmd; 4759 4830 AssertCompile(sizeof(*pCmdInvDte) == sizeof(*pCmd)); … … 4763 4834 && !(pCmdInvDte->au64[1] & ~IOMMU_CMD_INV_DTE_QWORD_1_VALID_MASK)) 4764 4835 { 4765 iommuAmdDteCacheUpdate(pDevIns, pCmdInvDte->n.u16DevId, NULL /* pDte */, 0 /* fFlags */); 4836 #ifdef IOMMU_WITH_DTE_CACHE 4837 iommuAmdDteCacheRemove(pDevIns, pCmdInvDte->n.u16DevId); 4838 #endif 4766 4839 return VINF_SUCCESS; 4767 4840 } 4768 4841 iommuAmdIllegalCmdEventInit(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError); 4769 4842 return VERR_IOMMU_CMD_INVALID_FORMAT; 4770 #else4771 return VINF_SUCCESS;4772 #endif4773 4843 } 4774 4844 … … 4776 4846 { 4777 4847 STAM_COUNTER_INC(&pThis->StatCmdInvIommuPages); 4778 #ifdef IOMMU_WITH_IOTLBE_CACHE4779 4848 PCCMD_INV_IOMMU_PAGES_T pCmdInvPages = (PCCMD_INV_IOMMU_PAGES_T)pCmd; 4780 4849 AssertCompile(sizeof(*pCmdInvPages) == sizeof(*pCmd)); … … 4784 4853 && !(pCmdInvPages->au64[1] & ~IOMMU_CMD_INV_IOMMU_PAGES_QWORD_1_VALID_MASK)) 4785 4854 { 4855 #ifdef IOMMU_WITH_IOTLBE_CACHE 4786 4856 uint64_t const uIova = RT_MAKE_U64(pCmdInvPages->n.u20AddrLo << X86_PAGE_4K_SHIFT, pCmdInvPages->n.u32AddrHi); 4787 4857 uint16_t const uDomainId = pCmdInvPages->n.u16DomainId; … … 4830 4900 iommuAmdIotlbRemoveDomainId(pDevIns, uDomainId); 4831 4901 } 4832 4902 #endif 4833 4903 return VINF_SUCCESS; 4834 4904 } 4835 4905 iommuAmdIllegalCmdEventInit(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError); 4836 4906 return VERR_IOMMU_CMD_INVALID_FORMAT; 4837 #else4838 return VINF_SUCCESS;4839 #endif4840 4907 } 4841 4908 … … 4898 4965 if (pThis->ExtFeat.n.u1InvAllSup) 4899 4966 { 4900 #ifdef IOMMU_WITH_IOTLBE_CACHE4901 4967 PCCMD_INV_IOMMU_ALL_T pCmdInvAll = (PCCMD_INV_IOMMU_ALL_T)pCmd; 4902 4968 AssertCompile(sizeof(*pCmdInvAll) == sizeof(*pCmd)); … … 4906 4972 && !(pCmdInvAll->au64[1] & ~IOMMU_CMD_INV_IOMMU_ALL_QWORD_1_VALID_MASK)) 4907 4973 { 4974 #ifdef IOMMU_WITH_DTE_CACHE 4908 4975 iommuAmdDteCacheRemoveAll(pDevIns); 4976 #endif 4977 #ifdef IOMMU_WITH_IOTLBE_CACHE 4909 4978 iommuAmdIotlbRemoveAll(pDevIns); 4979 #endif 4910 4980 return VINF_SUCCESS; 4911 4981 } 4912 4982 iommuAmdIllegalCmdEventInit(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError); 4913 4983 return VERR_IOMMU_CMD_INVALID_FORMAT; 4914 #else4915 return VINF_SUCCESS;4916 #endif4917 4984 } 4918 4985 iommuAmdIllegalCmdEventInit(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError); … … 5809 5876 if (uIntrTabLen < IOMMU_DTE_INTR_TAB_LEN_MAX) 5810 5877 { 5811 uint16_t const cEntries = IOMMU_ GET_INTR_TAB_ENTRIES(pDte);5812 uint16_t const cbIntrTable = IOMMU_ GET_INTR_TAB_LEN(pDte);5878 uint16_t const cEntries = IOMMU_DTE_GET_INTR_TAB_ENTRIES(pDte); 5879 uint16_t const cbIntrTable = IOMMU_DTE_GET_INTR_TAB_LEN(pDte); 5813 5880 pHlp->pfnPrintf(pHlp, "%sInterrupt Table Length = %#x (%u entries, %u bytes)\n", pszPrefix, uIntrTabLen, cEntries, 5814 5881 cbIntrTable); … … 5865 5932 5866 5933 5934 # ifdef IOMMU_WITH_DTE_CACHE 5935 /** 5936 * @callback_method_impl{FNDBGFHANDLERDEV} 5937 */ 5938 static DECLCALLBACK(void) iommuAmdR3DbgInfoDteCache(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs) 5939 { 5940 RT_NOREF(pszArgs); 5941 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 5942 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 5943 5944 uint16_t const cDteCache = RT_ELEMENTS(pThis->aDeviceIds); 5945 pHlp->pfnPrintf(pHlp, "DTE Cache: Capacity=%u entries\n", cDteCache); 5946 for (uint16_t i = 0; i < cDteCache; i++) 5947 { 5948 uint16_t const uDeviceId = pThis->aDeviceIds[i]; 5949 if (uDeviceId) 5950 { 5951 pHlp->pfnPrintf(pHlp, " Entry[%u]: Device=%#x (BDF %02x:%02x.%d)\n", i, uDeviceId, 5952 (uDeviceId >> VBOX_PCI_BUS_SHIFT) & VBOX_PCI_BUS_MASK, 5953 (uDeviceId >> VBOX_PCI_DEVFN_DEV_SHIFT) & VBOX_PCI_DEVFN_DEV_MASK, 5954 uDeviceId & VBOX_PCI_DEVFN_FUN_MASK); 5955 5956 PCDTECACHE pDteCache = &pThis->aDteCache[i]; 5957 pHlp->pfnPrintf(pHlp, " Flags = %#x\n", pDteCache->fFlags); 5958 pHlp->pfnPrintf(pHlp, " Domain Id = %u\n", pDteCache->uDomainId); 5959 pHlp->pfnPrintf(pHlp, "\n"); 5960 } 5961 } 5962 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 5963 } 5964 # endif /* IOMMU_WITH_DTE_CACHE */ 5965 5966 5867 5967 # ifdef IOMMU_WITH_IOTLBE_CACHE 5868 5968 /** … … 5878 5978 { 5879 5979 pHlp->pfnPrintf(pHlp, "IOTLBEs for domain %u (%#x):\n", uDomainId, uDomainId); 5880 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 5980 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 5981 PIOMMUCC pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUR3); 5881 5982 IOTLBEINFOARG Args; 5882 Args.pIommu = pThis;5983 Args.pIommuR3 = pThisR3; 5883 5984 Args.pHlp = pHlp; 5884 5985 Args.uDomainId = uDomainId; 5885 5986 5886 5987 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 5887 RTAvlU64DoWithAll(&pThis ->TreeIotlbe, true /* fFromLeft */, iommuAmdR3IotlbEntryInfo, &Args);5988 RTAvlU64DoWithAll(&pThisR3->TreeIotlbe, true /* fFromLeft */, iommuAmdR3IotlbEntryInfo, &Args); 5888 5989 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 5889 5990 } … … 5894 5995 pHlp->pfnPrintf(pHlp, "Missing domain ID.\n"); 5895 5996 } 5896 # endif 5897 5898 5899 # ifdef IOMMU_WITH_IRTE_CACHE5997 # endif /* IOMMU_WITH_IOTLBE_CACHE */ 5998 5999 6000 # ifdef IOMMU_WITH_IRTE_CACHE 5900 6001 /** 5901 6002 * Gets the interrupt type name for an interrupt type in the IRTE. … … 5917 6018 * @callback_method_impl{FNDBGFHANDLERDEV} 5918 6019 */ 5919 static DECLCALLBACK(void) iommuAmdR3DbgInfoIrte s(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)6020 static DECLCALLBACK(void) iommuAmdR3DbgInfoIrteCache(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs) 5920 6021 { 5921 6022 RT_NOREF(pszArgs); … … 5924 6025 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 5925 6026 5926 uint16_t const cIrteCache = pThis->cIrteCache;6027 uint16_t const cIrteCache = RT_ELEMENTS(pThis->aIrteCache); 5927 6028 pHlp->pfnPrintf(pHlp, "IRTE Cache: Capacity=%u entries\n", cIrteCache); 5928 6029 for (uint16_t idxIrte = 0; idxIrte < cIrteCache; idxIrte++) 5929 6030 { 5930 PCIRTECACHE pIrteCache = &pThis-> paIrteCache[idxIrte];6031 PCIRTECACHE pIrteCache = &pThis->aIrteCache[idxIrte]; 5931 6032 uint32_t const uKey = pIrteCache->uKey; 5932 6033 if (uKey != IOMMU_IRTE_CACHE_KEY_NIL) … … 5954 6055 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 5955 6056 } 5956 # endif6057 # endif /* IOMMU_WITH_IRTE_CACHE */ 5957 6058 5958 6059 … … 6166 6267 { 6167 6268 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns); 6168 PIOMMU 6269 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 6169 6270 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 6170 6271 LogFlowFunc(("\n")); … … 6179 6280 } 6180 6281 6181 #ifdef IOMMU_WITH_DTE_CACHE6182 /* Destroy the DTE cache. */6183 if (pThis->paDteCache)6184 {6185 PDMDevHlpMMHeapFree(pDevIns, pThis->paDteCache);6186 pThis->paDteCache = NULL;6187 }6188 #endif6189 6190 6282 #ifdef IOMMU_WITH_IOTLBE_CACHE 6191 6283 /* Destroy the IOTLB cache. */ 6192 if (pThis->paIotlbes) 6193 { 6194 PDMDevHlpMMHeapFree(pDevIns, pThis->paIotlbes); 6195 pThis->paIotlbes = NULL; 6196 pThis->idxUnusedIotlbe = 0; 6197 } 6198 #endif 6199 6200 #ifdef IOMMU_WITH_IRTE_CACHE 6201 /* Destroy the interrupt cache. */ 6202 if (pThis->paIrteCache) 6203 { 6204 PDMDevHlpMMHeapFree(pDevIns, pThis->paIrteCache); 6205 pThis->paIrteCache = NULL; 6284 if (pThisCC->paIotlbes) 6285 { 6286 PDMDevHlpMMHeapFree(pDevIns, pThisCC->paIotlbes); 6287 pThisCC->paIotlbes = NULL; 6288 pThisCC->idxUnusedIotlbe = 0; 6206 6289 } 6207 6290 #endif … … 6385 6468 */ 6386 6469 PDMDevHlpDBGFInfoRegister(pDevIns, "iommu", "Display IOMMU state.", iommuAmdR3DbgInfo); 6387 PDMDevHlpDBGFInfoRegister(pDevIns, "iommudte", "Display the DTE for a device . Arguments: DeviceID.", iommuAmdR3DbgInfoDte);6388 PDMDevHlpDBGFInfoRegister(pDevIns, "iommudevtabs", "Display active IOMMU device tables.", iommuAmdR3DbgInfoDevTabs);6470 PDMDevHlpDBGFInfoRegister(pDevIns, "iommudte", "Display the DTE for a device (from memory). Arguments: DeviceID.", iommuAmdR3DbgInfoDte); 6471 PDMDevHlpDBGFInfoRegister(pDevIns, "iommudevtabs", "Display I/O device tables with translation enabled.", iommuAmdR3DbgInfoDevTabs); 6389 6472 #ifdef IOMMU_WITH_IOTLBE_CACHE 6390 6473 PDMDevHlpDBGFInfoRegister(pDevIns, "iommutlb", "Display IOTLBs for a domain. Arguments: DomainID.", iommuAmdR3DbgInfoIotlb); 6391 6474 #endif 6475 #ifdef IOMMU_WITH_DTE_CACHE 6476 PDMDevHlpDBGFInfoRegister(pDevIns, "iommudtecache", "Display the DTE cache.", iommuAmdR3DbgInfoDteCache); 6477 #endif 6392 6478 #ifdef IOMMU_WITH_IRTE_CACHE 6393 PDMDevHlpDBGFInfoRegister(pDevIns, "iommuirte s", "Display the IRTE cache.", iommuAmdR3DbgInfoIrtes);6479 PDMDevHlpDBGFInfoRegister(pDevIns, "iommuirtecache", "Display the IRTE cache.", iommuAmdR3DbgInfoIrteCache); 6394 6480 #endif 6395 6481 … … 6439 6525 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatProfIrteCacheLookup, STAMTYPE_PROFILE, "Profile/IrteCacheLookup", STAMUNIT_TICKS_PER_CALL, "Profiling IRTE cache lookup."); 6440 6526 6441 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheHit, STAMTYPE_COUNTER, " Access/CacheHit", STAMUNIT_OCCURENCES, "Number of cache hits.");6442 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheMiss, STAMTYPE_COUNTER, " Access/CacheMiss", STAMUNIT_OCCURENCES, "Number of cache misses.");6443 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheHitFull, STAMTYPE_COUNTER, " Access/CacheHitFull", STAMUNIT_OCCURENCES, "Number of accesses that was entirely in the cache.");6444 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheNonContig, STAMTYPE_COUNTER, " Access/CacheNonContig", STAMUNIT_OCCURENCES, "Number of cache accesses that resulted in non-contiguous translated regions.");6445 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCachePermDenied, STAMTYPE_COUNTER, " Access/CacheAddrDenied", STAMUNIT_OCCURENCES, "Number of cache accesses that resulted in denied permissions.");6446 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessDteNonContig, STAMTYPE_COUNTER, " Access/DteNonContig", STAMUNIT_OCCURENCES, "Number of DTE accesses that resulted in non-contiguous translated regions.");6447 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessDtePermDenied, STAMTYPE_COUNTER, " Access/DtePermDenied", STAMUNIT_OCCURENCES, "Number of DTE accesses that resulted in denied permissions.");6448 6449 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntrCacheHit, STAMTYPE_COUNTER, "Int r/CacheHit", STAMUNIT_OCCURENCES, "Number of cache hits.");6450 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntrCacheMiss, STAMTYPE_COUNTER, "Int r/CacheMiss", STAMUNIT_OCCURENCES, "Number of cache misses.");6527 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheHit, STAMTYPE_COUNTER, "MemAccess/CacheHit", STAMUNIT_OCCURENCES, "Number of cache hits."); 6528 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheMiss, STAMTYPE_COUNTER, "MemAccess/CacheMiss", STAMUNIT_OCCURENCES, "Number of cache misses."); 6529 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheHitFull, STAMTYPE_COUNTER, "MemAccess/CacheHitFull", STAMUNIT_OCCURENCES, "Number of accesses that was entirely in the cache."); 6530 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheNonContig, STAMTYPE_COUNTER, "MemAccess/CacheNonContig", STAMUNIT_OCCURENCES, "Number of cache accesses that resulted in non-contiguous translated regions."); 6531 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCachePermDenied, STAMTYPE_COUNTER, "MemAccess/CacheAddrDenied", STAMUNIT_OCCURENCES, "Number of cache accesses that resulted in denied permissions."); 6532 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessDteNonContig, STAMTYPE_COUNTER, "MemAccess/DteNonContig", STAMUNIT_OCCURENCES, "Number of DTE accesses that resulted in non-contiguous translated regions."); 6533 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessDtePermDenied, STAMTYPE_COUNTER, "MemAccess/DtePermDenied", STAMUNIT_OCCURENCES, "Number of DTE accesses that resulted in denied permissions."); 6534 6535 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntrCacheHit, STAMTYPE_COUNTER, "Interrupt/CacheHit", STAMUNIT_OCCURENCES, "Number of cache hits."); 6536 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntrCacheMiss, STAMTYPE_COUNTER, "Interrupt/CacheMiss", STAMUNIT_OCCURENCES, "Number of cache misses."); 6451 6537 # endif 6452 6538 … … 6471 6557 AssertLogRelRCReturn(rc, rc); 6472 6558 6473 /* 6474 * Allocate the device table entry cache. 6475 * PCI devices are hotpluggable and we don't have a way of querying the bus for all 6476 * assigned PCI BDF slots. So while this wastes some memory, it should work regardless 6477 * of how code, features and devices around the IOMMU change. 6478 */ 6479 size_t cbCache = 0; 6480 size_t const cbDteCache = sizeof(DTECACHE) * IOMMU_DTE_CACHE_MAX; 6481 AssertCompile(IOMMU_DTE_CACHE_MAX >= UINT16_MAX); 6482 pThis->paDteCache = (PDTECACHE)PDMDevHlpMMHeapAllocZ(pDevIns, cbDteCache); 6483 if (!pThis->paDteCache) 6484 return PDMDevHlpVMSetError(pDevIns, VERR_NO_MEMORY, RT_SRC_POS, 6485 N_("Failed to allocate %zu bytes from the hyperheap for the DTE cache."), cbDteCache); 6486 cbCache += cbDteCache; 6559 /* Several places in this code relies on this basic assumption - assert it! */ 6560 AssertCompile(RT_ELEMENTS(pThis->aDeviceIds) == RT_ELEMENTS(pThis->aDteCache)); 6487 6561 #endif 6488 6562 … … 6496 6570 */ 6497 6571 size_t const cbIotlbes = sizeof(IOTLBE) * IOMMU_IOTLBE_MAX; 6498 pThis ->paIotlbes = (PIOTLBE)PDMDevHlpMMHeapAllocZ(pDevIns, cbIotlbes);6499 if (!pThis ->paIotlbes)6572 pThisCC->paIotlbes = (PIOTLBE)PDMDevHlpMMHeapAllocZ(pDevIns, cbIotlbes); 6573 if (!pThisCC->paIotlbes) 6500 6574 return PDMDevHlpVMSetError(pDevIns, VERR_NO_MEMORY, RT_SRC_POS, 6501 6575 N_("Failed to allocate %zu bytes from the hyperheap for the IOTLB cache."), cbIotlbes); 6502 RTListInit(&pThis->LstLruIotlbe); 6503 cbCache += cbIotlbes; 6504 #endif 6505 6506 #ifdef IOMMU_WITH_IRTE_CACHE 6507 /* Maximum number of elements in the IRTE cache. */ 6508 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3; 6509 rc = pHlp->pfnCFGMQueryU16Def(pCfg, "InterruptCacheCount", &pThis->cIrteCache, IOMMU_IRTE_CACHE_DEFAULT); 6510 if (RT_FAILURE(rc)) 6511 return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, N_("IOMMU: failed to read InterruptCacheCount as integer")); 6512 AssertCompile(IOMMU_IRTE_CACHE_DEFAULT >= IOMMU_IRTE_CACHE_MIN); 6513 AssertCompile(IOMMU_IRTE_CACHE_DEFAULT <= IOMMU_IRTE_CACHE_MAX); 6514 if ( pThis->cIrteCache < IOMMU_IRTE_CACHE_MIN 6515 || pThis->cIrteCache > IOMMU_IRTE_CACHE_MAX) 6516 return PDMDevHlpVMSetError(pDevIns, VERR_INVALID_PARAMETER, RT_SRC_POS, 6517 N_("IOMMU: InterruptCacheCount invalid (must be between %u and %u)."), 6518 IOMMU_IRTE_CACHE_MIN, IOMMU_IRTE_CACHE_MAX); 6519 6520 /* 6521 * Allocate the interrupt remapping cache. 6522 * This is an array of devices and their corresponding interrupt remap table entries. 6523 * Typically only a handful of PCI devices are used in VMs so this is kept rather small. 6524 * If we ever need to support a vast number of interrupt-remapped devices, we can 6525 * implement a more sophisticated cache solution then. 6526 * 6527 * NOTE: IRTE cache entry keys are initialized later in this function by calling 6528 * iommuAmdR3Reset() -> iommuAmdIrteCacheRemoveAll(). 6529 */ 6530 size_t const cbIrteCache = sizeof(IRTECACHE) * pThis->cIrteCache; 6531 pThis->paIrteCache = (PIRTECACHE)PDMDevHlpMMHeapAllocZ(pDevIns, cbIrteCache); 6532 if (!pThis->paIrteCache) 6533 return PDMDevHlpVMSetError(pDevIns, VERR_NO_MEMORY, RT_SRC_POS, 6534 N_("Failed to allocate %zu bytes from the hyperheap for the interrupt cache."), cbIrteCache); 6535 cbCache += cbIrteCache; 6536 #endif 6537 6538 #ifdef IOMMU_WITH_DTE_CACHE 6539 LogRel(("%s: Allocated %zu bytes from the hyperheap for the IOMMU cache\n", IOMMU_LOG_PFX, cbCache)); 6576 RTListInit(&pThisCC->LstLruIotlbe); 6577 LogRel(("%s: Allocated %zu bytes from the hyperheap for the IOTLB cache\n", IOMMU_LOG_PFX, cbIotlbes)); 6540 6578 #endif 6541 6579
Note:
See TracChangeset
for help on using the changeset viewer.