Changeset 87691 in vbox
- Timestamp:
- Feb 10, 2021 4:20:11 PM (4 years ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/pdmdev.h
r87477 r87691 1492 1492 /** Structure version. PDM_IOMMUHLP_VERSION defines the current version. */ 1493 1493 uint32_t u32Version; 1494 1495 /** 1496 * Acquires the PDM lock. 1497 * 1498 * @returns VINF_SUCCESS on success. 1499 * @returns rc if we failed to acquire the lock. 1500 * @param pDevIns The PCI device instance. 1501 * @param rc What to return if we fail to acquire the lock. 1502 */ 1503 DECLR0CALLBACKMEMBER(int, pfnLock,(PPDMDEVINS pDevIns, int rc)); 1504 1505 /** 1506 * Releases the PDM lock. 1507 * 1508 * @param pDevIns The PCI device instance. 1509 */ 1510 DECLR0CALLBACKMEMBER(void, pfnUnlock,(PPDMDEVINS pDevIns)); 1511 1494 1512 /** Just a safety precaution. */ 1495 1513 uint32_t u32TheEnd; … … 1501 1519 1502 1520 /** Current PDMIOMMUHLPR0 version number. */ 1503 #define PDM_IOMMUHLPR0_VERSION PDM_VERSION_MAKE(0xff13, 1, 0)1521 #define PDM_IOMMUHLPR0_VERSION PDM_VERSION_MAKE(0xff13, 2, 0) 1504 1522 1505 1523 … … 1511 1529 /** Structure version. PDM_IOMMUHLP_VERSION defines the current version. */ 1512 1530 uint32_t u32Version; 1531 1532 /** 1533 * Acquires the PDM lock. 1534 * 1535 * @returns VINF_SUCCESS on success. 1536 * @returns rc if we failed to acquire the lock. 1537 * @param pDevIns The PCI device instance. 1538 * @param rc What to return if we fail to acquire the lock. 1539 */ 1540 DECLRCCALLBACKMEMBER(int, pfnLock,(PPDMDEVINS pDevIns, int rc)); 1541 1542 /** 1543 * Releases the PDM lock. 1544 * 1545 * @param pDevIns The PCI device instance. 1546 */ 1547 DECLRCCALLBACKMEMBER(void, pfnUnlock,(PPDMDEVINS pDevIns)); 1548 1513 1549 /** Just a safety precaution. */ 1514 1550 uint32_t u32TheEnd; … … 1520 1556 1521 1557 /** Current PDMIOMMUHLPRC version number. */ 1522 #define PDM_IOMMUHLPRC_VERSION PDM_VERSION_MAKE(0xff14, 1, 0)1558 #define PDM_IOMMUHLPRC_VERSION PDM_VERSION_MAKE(0xff14, 2, 0) 1523 1559 1524 1560 … … 1530 1566 /** Structure version. PDM_IOMMUHLP_VERSION defines the current version. */ 1531 1567 uint32_t u32Version; 1568 1569 /** 1570 * Acquires the PDM lock. 1571 * 1572 * @returns VINF_SUCCESS on success. 1573 * @returns rc if we failed to acquire the lock. 1574 * @param pDevIns The PCI device instance. 1575 * @param rc What to return if we fail to acquire the lock. 1576 */ 1577 DECLR3CALLBACKMEMBER(int, pfnLock,(PPDMDEVINS pDevIns, int rc)); 1578 1579 /** 1580 * Releases the PDM lock. 1581 * 1582 * @param pDevIns The PCI device instance. 1583 */ 1584 DECLR3CALLBACKMEMBER(void, pfnUnlock,(PPDMDEVINS pDevIns)); 1585 1532 1586 /** Just a safety precaution. */ 1533 1587 uint32_t u32TheEnd; … … 1539 1593 1540 1594 /** Current PDMIOMMUHLPR3 version number. */ 1541 #define PDM_IOMMUHLPR3_VERSION PDM_VERSION_MAKE(0xff15, 1, 0)1595 #define PDM_IOMMUHLPR3_VERSION PDM_VERSION_MAKE(0xff15, 2, 0) 1542 1596 1543 1597 -
trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp
r87671 r87691 77 77 # define IOMMU_IOTLB_KEY_MAKE(a_DomainId, a_uIova) ( ((uint64_t)(a_DomainId) << IOMMU_IOTLB_DOMAIN_ID_SHIFT) \ 78 78 | (((a_uIova) >> X86_PAGE_4K_SHIFT) & IOMMU_IOTLB_IOVA_MASK)) 79 80 /** Acquires the cache lock. */ 81 # define IOMMU_LOCK_CACHE(a_pDevIns, a_pThis) \ 82 do { \ 83 int const rcLock = PDMDevHlpCritSectEnter((a_pDevIns), &(a_pThis)->CritSectCache, VERR_SEM_BUSY); \ 84 if (rcLock == VINF_SUCCESS) \ 85 { /* likely */ } \ 86 else \ 87 { \ 88 AssertRC(rcLock); \ 89 return rcLock; \ 90 } \ 91 } while (0) 92 93 /** Acquires the cache lock (asserts on failure). */ 94 # define IOMMU_LOCK_CACHE_NORET(a_pDevIns, a_pThis) \ 95 do { \ 96 int const rcLock = PDMDevHlpCritSectEnter((a_pDevIns), &(a_pThis)->CritSectCache, VERR_SEM_BUSY); \ 97 AssertRC(rcLock); \ 98 } while (0) 99 100 /** Releases the cache lock. */ 101 # define IOMMU_UNLOCK_CACHE(a_pDevIns, a_pThis) PDMDevHlpCritSectLeave((a_pDevIns), &(a_pThis)->CritSectCache) 79 102 #endif 80 103 … … 131 154 #define IOMMU_GET_PAGE_OFF_MASK(a_cShift) (~(UINT64_C(0xffffffffffffffff) << (a_cShift))) 132 155 133 134 /********************************************************************************************************************************* 135 * Structures and Typedefs * 136 *********************************************************************************************************************************/ 137 /** 138 * Acquires the IOMMU PDM lock. 139 * This will make a long jump to ring-3 to acquire the lock if necessary. 140 */ 141 #define IOMMU_LOCK(a_pDevIns) \ 156 /** Acquires the PDM lock. */ 157 #define IOMMU_LOCK(a_pDevIns, a_pThisCC) \ 142 158 do { \ 143 int rcLock = PDMDevHlpCritSectEnter((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo), VINF_SUCCESS); \159 int const rcLock = (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), VERR_SEM_BUSY); \ 144 160 if (RT_LIKELY(rcLock == VINF_SUCCESS)) \ 145 161 { /* likely */ } \ … … 148 164 } while (0) 149 165 150 /** 151 * Acquires the IOMMU PDM lock (asserts on failure rather than returning an error). 152 * This will make a long jump to ring-3 to acquire the lock if necessary. 153 */ 154 #define IOMMU_LOCK_NORET(a_pDevIns) \ 166 /** Acquires the PDM lock (asserts on failure). */ 167 #define IOMMU_LOCK_NORET(a_pDevIns, a_pThisCC) \ 155 168 do { \ 156 int rcLock = PDMDevHlpCritSectEnter((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo), VINF_SUCCESS); \169 int const rcLock = (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), VERR_SEM_BUSY); \ 157 170 AssertRC(rcLock); \ 158 171 } while (0) 159 172 160 /** 161 * Releases the IOMMU PDM lock. 162 */ 163 #define IOMMU_UNLOCK(a_pDevIns) \ 164 do { \ 165 PDMDevHlpCritSectLeave((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo)); \ 166 } while (0) 167 168 /** 169 * Asserts that the critsect is owned by this thread. 170 */ 171 #define IOMMU_ASSERT_LOCKED(a_pDevIns) \ 172 do { \ 173 Assert(PDMDevHlpCritSectIsOwner((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo))); \ 174 } while (0) 175 176 /** 177 * Asserts that the critsect is not owned by this thread. 178 */ 179 #define IOMMU_ASSERT_NOT_LOCKED(a_pDevIns) \ 180 do { \ 181 Assert(!PDMDevHlpCritSectIsOwner((a_pDevIns), (a_pDevIns)->CTX_SUFF(pCritSectRo))); \ 182 } while (0) 183 173 /** Releases the PDM lock. */ 174 # define IOMMU_UNLOCK(a_pDevIns, a_pThisCC) (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnUnlock((a_pDevIns)) 175 176 /** Asserts that the lock is owned by this thread. */ 177 #define IOMMU_ASSERT_LOCKED(a_pDevIns) do { } while (0) 178 179 /** Asserts that the lock isn't owned by this thread. */ 180 #define IOMMU_ASSERT_NOT_LOCKED(a_pDevIns) do { } while (0) 181 182 183 /********************************************************************************************************************************* 184 * Structures and Typedefs * 185 *********************************************************************************************************************************/ 184 186 /** 185 187 * IOMMU operation (transaction). … … 284 286 285 287 #ifdef IOMMU_WITH_IOTLBE_CACHE 288 /** The critsect that protects the cache from concurrent access. */ 289 PDMCRITSECT CritSectCache; 286 290 /** L1 Cache - Maps [DeviceId] to [DomainId]. */ 287 291 PIODEVICE paDevices; … … 486 490 PPDMDEVINSR3 pDevInsR3; 487 491 /** The IOMMU helpers. */ 488 PCPDMIOMMUHLPR3pIommuHlpR3;492 R3PTRTYPE(PCPDMIOMMUHLPR3) pIommuHlpR3; 489 493 /** The command thread handle. */ 490 494 R3PTRTYPE(PPDMTHREAD) pCmdThread; … … 501 505 PPDMDEVINSR0 pDevInsR0; 502 506 /** The IOMMU helpers. */ 503 PCPDMIOMMUHLPR0pIommuHlpR0;507 R0PTRTYPE(PCPDMIOMMUHLPR0) pIommuHlpR0; 504 508 } IOMMUR0; 505 509 /** Pointer to the ring-0 IOMMU device state. */ … … 512 516 { 513 517 /** Device instance. */ 514 PPDMDEVINSR 0pDevInsRC;518 PPDMDEVINSRC pDevInsRC; 515 519 /** The IOMMU helpers. */ 516 PCPDMIOMMUHLPRCpIommuHlpRC;520 RCPTRTYPE(PCPDMIOMMUHLPRC) pIommuHlpRC; 517 521 } IOMMURC; 518 522 /** Pointer to the raw-mode IOMMU device state. */ … … 1028 1032 { 1029 1033 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1030 IOMMU_ ASSERT_LOCKED(pDevIns);1034 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1031 1035 1032 1036 if (pThis->cCachedIotlbes > 0) … … 1039 1043 RTListInit(&pThis->LstLruIotlbe); 1040 1044 } 1045 1046 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1041 1047 } 1042 1048 … … 1046 1052 * domain ID from the cache. 1047 1053 * 1048 * @param p This The IOMMU device state.1054 * @param pDevIns The IOMMU instance data. 1049 1055 * @param uDomainId The domain ID. 1050 1056 * @param uIova The I/O virtual address to invalidate. 1051 1057 * @param cbInvalidate The size of the invalidation (must be 4K aligned). 1052 1058 */ 1053 static void iommuAmdIotlbRemoveRange(P IOMMU pThis, uint16_t uDomainId, uint64_t uIova, size_t cbInvalidate)1059 static void iommuAmdIotlbRemoveRange(PPDMDEVINS pDevIns, uint16_t uDomainId, uint64_t uIova, size_t cbInvalidate) 1054 1060 { 1055 1061 /* Validate. */ … … 1058 1064 Assert(cbInvalidate >= X86_PAGE_4K_SIZE); 1059 1065 1066 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1067 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1068 1060 1069 do 1061 1070 { … … 1070 1079 cbInvalidate -= X86_PAGE_4K_SIZE; 1071 1080 } while (cbInvalidate > 0); 1081 1082 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1072 1083 } 1073 1084 … … 1076 1087 * Removes all IOTLB entries for the specified domain ID. 1077 1088 * 1078 * @param p This The IOMMU device state.1089 * @param pDevIns The IOMMU instance data. 1079 1090 * @param uDomainId The domain ID. 1080 1091 */ 1081 static void iommuAmdIotlbRemoveDomainId(P IOMMU pThis, uint16_t uDomainId)1092 static void iommuAmdIotlbRemoveDomainId(PPDMDEVINS pDevIns, uint16_t uDomainId) 1082 1093 { 1083 1094 /* … … 1087 1098 * so they will eventually get evicted and re-cycled as the cache gets re-populated. 1088 1099 */ 1100 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1101 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1102 1089 1103 IOTLBEFLUSHARG Args; 1090 1104 Args.pIommu = pThis; 1091 1105 Args.uDomainId = uDomainId; 1092 1106 RTAvlU64DoWithAll(&pThis->TreeIotlbe, true /* fFromLeft */, iommuAmdIotlbEntryRemoveDomainId, &Args); 1107 1108 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1093 1109 } 1094 1110 … … 1111 1127 Assert(!(cbAccess & X86_PAGE_4K_OFFSET_MASK)); 1112 1128 Assert(cbAccess >= X86_PAGE_4K_SIZE); 1113 IOMMU_ASSERT_LOCKED(pDevIns);1114 1129 1115 1130 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); … … 1124 1139 size_t cPages = cbAccess / X86_PAGE_4K_SIZE; 1125 1140 cPages = RT_MIN(cPages, IOMMU_IOTLBE_MAX); 1141 1142 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1126 1143 do 1127 1144 { … … 1131 1148 --cPages; 1132 1149 } while (cPages > 0); 1150 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1133 1151 } 1134 1152 … … 1148 1166 { 1149 1167 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1150 IOMMU_ ASSERT_LOCKED(pDevIns);1168 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1151 1169 1152 1170 if (fOrMask & IOMMU_DEV_F_PRESENT) … … 1161 1179 pThis->paDevices[uDevId].uDomainId = 0; 1162 1180 } 1181 1182 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1163 1183 } 1164 1184 … … 1174 1194 { 1175 1195 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1176 IOMMU_ ASSERT_LOCKED(pDevIns);1196 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1177 1197 1178 1198 if (fDevIoFlags & IOMMU_DEV_F_PRESENT) 1179 1199 pThis->paDevices[uDevId].fFlags |= fDevIoFlags; 1200 1201 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1180 1202 } 1181 1203 … … 1189 1211 { 1190 1212 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1191 IOMMU_ ASSERT_LOCKED(pDevIns);1213 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1192 1214 1193 1215 size_t const cbDevices = sizeof(IODEVICE) * IOMMU_DTE_CACHE_MAX; 1194 1216 RT_BZERO(pThis->paDevices, cbDevices); 1217 1218 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1195 1219 } 1196 1220 #endif /* IOMMU_WITH_IOTLBE_CACHE */ … … 1247 1271 * 1248 1272 * @param pDevIns The IOMMU device instance. 1273 * 1274 * @remarks The IOMMU lock must be held. 1249 1275 */ 1250 1276 static void iommuAmdCmdThreadWakeUpIfNeeded(PPDMDEVINS pDevIns) … … 2487 2513 * 2488 2514 * @thread Any. 2515 * @remarks The IOMMU lock must be held while calling this function. 2489 2516 */ 2490 2517 static int iommuAmdEvtLogEntryWrite(PPDMDEVINS pDevIns, PCEVT_GENERIC_T pEvent) 2491 2518 { 2492 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 2493 2494 IOMMU_ASSERT_LOCKED(pDevIns); 2519 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 2520 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 2521 2522 IOMMU_LOCK_NORET(pDevIns, pThisCC); 2495 2523 2496 2524 /* Check if event logging is active and the log has not overflowed. */ … … 2540 2568 } 2541 2569 } 2570 2571 IOMMU_UNLOCK(pDevIns, pThisCC); 2542 2572 2543 2573 return VINF_SUCCESS; … … 2612 2642 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtPageTabHwErr; 2613 2643 2614 IOMMU_LOCK_NORET(pDevIns); 2644 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 2645 IOMMU_LOCK_NORET(pDevIns, pThisCC); 2615 2646 2616 2647 iommuAmdHwErrorSet(pDevIns, (PCEVT_GENERIC_T)pEvent); … … 2619 2650 iommuAmdSetPciTargetAbort(pDevIns); 2620 2651 2621 IOMMU_UNLOCK(pDevIns );2652 IOMMU_UNLOCK(pDevIns, pThisCC); 2622 2653 2623 2654 LogFunc(("Raised PAGE_TAB_HARDWARE_ERROR. uDevId=%#x uDomainId=%#x GCPhysPtEntity=%#RGp enmOp=%u u2Type=%u\n", … … 2657 2688 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 2658 2689 2659 IOMMU_LOCK_NORET(pDevIns); 2690 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 2691 IOMMU_LOCK_NORET(pDevIns, pThisCC); 2660 2692 2661 2693 iommuAmdHwErrorSet(pDevIns, (PCEVT_GENERIC_T)pEvent); … … 2663 2695 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_CMD_BUF_RUNNING); 2664 2696 2665 IOMMU_UNLOCK(pDevIns );2697 IOMMU_UNLOCK(pDevIns, pThisCC); 2666 2698 2667 2699 LogFunc(("Raised COMMAND_HARDWARE_ERROR. GCPhysCmd=%#RGp u2Type=%u\n", pEvtCmdHwErr->n.u64Addr, pEvtCmdHwErr->n.u2Type)); … … 2708 2740 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtDevTabHwErr; 2709 2741 2710 IOMMU_LOCK_NORET(pDevIns); 2742 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 2743 IOMMU_LOCK_NORET(pDevIns, pThisCC); 2711 2744 2712 2745 iommuAmdHwErrorSet(pDevIns, (PCEVT_GENERIC_T)pEvent); … … 2715 2748 iommuAmdSetPciTargetAbort(pDevIns); 2716 2749 2717 IOMMU_UNLOCK(pDevIns );2750 IOMMU_UNLOCK(pDevIns, pThisCC); 2718 2751 2719 2752 LogFunc(("Raised DEV_TAB_HARDWARE_ERROR. uDevId=%#x GCPhysDte=%#RGp enmOp=%u u2Type=%u\n", pEvtDevTabHwErr->n.u16DevId, … … 2751 2784 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 2752 2785 2753 IOMMU_LOCK_NORET(pDevIns);2754 2755 2786 iommuAmdEvtLogEntryWrite(pDevIns, pEvent); 2756 2787 ASMAtomicAndU64(&pThis->Status.u64, ~IOMMU_STATUS_CMD_BUF_RUNNING); 2757 2758 IOMMU_UNLOCK(pDevIns);2759 2788 2760 2789 LogFunc(("Raised ILLEGAL_COMMAND_ERROR. Addr=%#RGp\n", pEvtIllegalCmd->n.u64Addr)); … … 2807 2836 PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIllegalDte; 2808 2837 2809 IOMMU_LOCK_NORET(pDevIns);2810 2811 2838 iommuAmdEvtLogEntryWrite(pDevIns, pEvent); 2812 2839 if (enmOp != IOMMUOP_CMD) 2813 2840 iommuAmdSetPciTargetAbort(pDevIns); 2814 2815 IOMMU_UNLOCK(pDevIns);2816 2841 2817 2842 LogFunc(("Raised ILLEGAL_DTE_EVENT. uDevId=%#x uIova=%#RX64 enmOp=%u enmEvtType=%u\n", pEvtIllegalDte->n.u16DevId, … … 2884 2909 #endif 2885 2910 2886 IOMMU_LOCK_NORET(pDevIns);2887 2888 2911 bool fSuppressEvtLogging = false; 2889 2912 if ( enmOp == IOMMUOP_MEM_READ … … 2995 3018 } 2996 3019 2997 IOMMU_UNLOCK(pDevIns);2998 2999 3020 #undef IOMMU_DTE_CACHE_SET_PF_RAISED 3000 3021 } … … 3035 3056 static int iommuAmdDteRead(PPDMDEVINS pDevIns, uint16_t uDevId, IOMMUOP enmOp, PDTE_T pDte) 3036 3057 { 3037 PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 3038 3039 IOMMU_LOCK(pDevIns); 3058 PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 3059 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 3060 3061 IOMMU_LOCK(pDevIns, pThisCC); 3040 3062 3041 3063 /* Figure out which device table segment is being accessed. */ … … 3055 3077 uint32_t const cbDevTabSeg = (pThis->aDevTabBaseAddrs[idxSeg].n.u9Size + 1) << X86_PAGE_4K_SHIFT; 3056 3078 3057 IOMMU_UNLOCK(pDevIns );3079 IOMMU_UNLOCK(pDevIns, pThisCC); 3058 3080 3059 3081 if (offDte + sizeof(DTE_T) <= cbDevTabSeg) … … 3481 3503 { 3482 3504 /* Update that addresses requires translation (cumulative permissions of DTE and I/O page tables). */ 3483 IOMMU_LOCK(pDevIns);3484 3505 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DEV_F_PRESENT | IOMMU_DEV_F_ADDR_TRANSLATE); 3485 3506 … … 3487 3508 iommuAmdIotlbUpdate(pDevIns, Dte.n.u16DomainId, uIova & X86_PAGE_4K_BASE_MASK, cbPages, 3488 3509 GCPhysSpa & X86_PAGE_4K_BASE_MASK, WalkResultPrev.fPerm); 3489 IOMMU_UNLOCK(pDevIns);3490 3510 } 3491 3511 #endif … … 3503 3523 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE) 3504 3524 /* Update that addresses permissions of DTE apply (but omit address translation). */ 3505 IOMMU_LOCK(pDevIns);3506 3525 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DEV_F_PRESENT | IOMMU_DEV_F_IO_PERM); 3507 IOMMU_UNLOCK(pDevIns);3508 3526 #endif 3509 3527 } … … 3537 3555 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE) 3538 3556 /* Update that addresses don't require translation (nor permission checks) but a DTE is present. */ 3539 IOMMU_LOCK(pDevIns);3540 3557 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DEV_F_PRESENT); 3541 IOMMU_UNLOCK(pDevIns);3542 3558 #endif 3543 3559 } … … 3584 3600 { 3585 3601 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 3586 3587 IOMMU_LOCK(pDevIns); 3602 IOMMU_LOCK_CACHE(pDevIns, pThis); 3588 3603 3589 3604 /* Lookup the device from the level 1 cache. */ … … 3702 3717 } 3703 3718 3704 IOMMU_UNLOCK (pDevIns);3719 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 3705 3720 return rc; 3706 3721 } … … 3787 3802 if (rc == VERR_IOMMU_ADDR_ACCESS_DENIED) 3788 3803 return VERR_IOMMU_ADDR_ACCESS_DENIED; 3789 Assert (rc == VERR_NOT_FOUND);3804 AssertMsg(rc == VERR_NOT_FOUND, ("Cache lokoup failed: %Rrc\n", rc)); 3790 3805 STAM_COUNTER_INC(&pThis->StatIotlbeCacheMiss); 3791 3806 /** @todo r=ramshankar: WARNING! when implementing continuing of lookups because … … 4260 4275 static int iommuAmdR3CmdProcess(PPDMDEVINS pDevIns, PCCMD_GENERIC_T pCmd, RTGCPHYS GCPhysCmd, PEVT_GENERIC_T pEvtError) 4261 4276 { 4262 IOMMU_ASSERT_NOT_LOCKED(pDevIns);4263 4264 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 4277 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 4278 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 4279 4265 4280 STAM_COUNTER_INC(&pThis->StatCmd); 4266 4281 … … 4296 4311 if (pCmdComWait->n.u1Interrupt) 4297 4312 { 4298 IOMMU_LOCK(pDevIns );4313 IOMMU_LOCK(pDevIns, pThisCC); 4299 4314 ASMAtomicOrU64(&pThis->Status.u64, IOMMU_STATUS_COMPLETION_WAIT_INTR); 4300 4315 IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrl(pThis); 4301 4316 bool const fRaiseInt = Ctrl.n.u1CompWaitIntrEn; 4302 IOMMU_UNLOCK(pDevIns );4317 IOMMU_UNLOCK(pDevIns, pThisCC); 4303 4318 4304 4319 if (fRaiseInt) … … 4322 4337 && !(pCmdInvDte->au64[1] & ~IOMMU_CMD_INV_DTE_QWORD_1_VALID_MASK)) 4323 4338 { 4324 IOMMU_LOCK(pDevIns);4325 4339 iommuAmdDteCacheUpdate(pDevIns, pCmdInvDte->n.u16DevId, NULL /* pDte */, 0 /* fFlags */); 4326 IOMMU_UNLOCK(pDevIns);4327 4340 return VINF_SUCCESS; 4328 4341 } … … 4371 4384 } 4372 4385 4373 IOMMU_LOCK(pDevIns);4374 4375 4386 /* 4376 4387 * Validate invalidation size. … … 4383 4394 /* Remove the range of I/O virtual addresses requesting to be invalidated. */ 4384 4395 size_t const cbAccess = RT_BIT_64(cShift); 4385 iommuAmdIotlbRemoveRange(p This, uDomainId, uIova, cbAccess);4396 iommuAmdIotlbRemoveRange(pDevIns, uDomainId, uIova, cbAccess); 4386 4397 } 4387 4398 else … … 4391 4402 * In such situations we must remove all ranges for the specified domain ID. 4392 4403 */ 4393 iommuAmdIotlbRemoveDomainId(p This, uDomainId);4404 iommuAmdIotlbRemoveDomainId(pDevIns, uDomainId); 4394 4405 } 4395 4406 4396 IOMMU_UNLOCK(pDevIns);4397 4407 return VINF_SUCCESS; 4398 4408 } … … 4461 4471 && !(pCmdInvAll->au64[1] & ~IOMMU_CMD_INV_IOMMU_ALL_QWORD_1_VALID_MASK)) 4462 4472 { 4463 IOMMU_LOCK(pDevIns);4464 4473 iommuAmdDteCacheRemoveAll(pDevIns); 4465 4474 iommuAmdIotlbRemoveAll(pDevIns); 4466 IOMMU_UNLOCK(pDevIns);4467 4475 return VINF_SUCCESS; 4468 4476 } … … 4494 4502 static DECLCALLBACK(int) iommuAmdR3CmdThread(PPDMDEVINS pDevIns, PPDMTHREAD pThread) 4495 4503 { 4496 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 4504 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 4505 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 4497 4506 4498 4507 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING) … … 4536 4545 * save on host memory a bit, we could (once PGM has the necessary APIs) 4537 4546 * lock the page mappings page mappings and access them directly. */ 4538 IOMMU_LOCK(pDevIns );4547 IOMMU_LOCK(pDevIns, pThisCC); 4539 4548 4540 4549 IOMMU_STATUS_T const Status = iommuAmdGetStatus(pThis); … … 4562 4571 4563 4572 /* Allow IOMMU to do other work while we process commands. */ 4564 IOMMU_UNLOCK(pDevIns );4573 IOMMU_UNLOCK(pDevIns, pThisCC); 4565 4574 4566 4575 /* Process the fetched commands. */ … … 4598 4607 iommuAmdCmdHwErrorEventRaise(pDevIns, &EvtCmdHwErr); 4599 4608 4600 IOMMU_UNLOCK(pDevIns );4609 IOMMU_UNLOCK(pDevIns, pThisCC); 4601 4610 } 4602 4611 } 4603 4612 else 4604 IOMMU_UNLOCK(pDevIns );4613 IOMMU_UNLOCK(pDevIns, pThisCC); 4605 4614 } 4606 4615 else 4607 IOMMU_UNLOCK(pDevIns );4616 IOMMU_UNLOCK(pDevIns, pThisCC); 4608 4617 } 4609 4618 … … 4668 4677 } 4669 4678 4670 IOMMU_LOCK(pDevIns); 4679 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 4680 IOMMU_LOCK(pDevIns, pThisCC); 4671 4681 4672 4682 VBOXSTRICTRC rcStrict = VERR_IOMMU_IPE_3; … … 4748 4758 } 4749 4759 4750 IOMMU_UNLOCK(pDevIns );4760 IOMMU_UNLOCK(pDevIns, pThisCC); 4751 4761 4752 4762 Log3Func(("uAddress=%#x (cb=%u) with %#x. rc=%Rrc\n", uAddress, cb, u32Value, VBOXSTRICTRC_VAL(rcStrict))); … … 5400 5410 { 5401 5411 DTE_T Dte; 5402 IOMMU_LOCK_NORET(pDevIns);5403 5412 rc = iommuAmdDteRead(pDevIns, uDevId, IOMMUOP_TRANSLATE_REQ, &Dte); 5404 IOMMU_UNLOCK(pDevIns);5405 5413 if (RT_SUCCESS(rc)) 5406 5414 { … … 5437 5445 Args.pHlp = pHlp; 5438 5446 Args.uDomainId = uDomainId; 5439 IOMMU_LOCK_NORET(pDevIns); 5447 5448 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 5440 5449 RTAvlU64DoWithAll(&pThis->TreeIotlbe, true /* fFromLeft */, iommuAmdR3IotlbEntryInfo, &Args); 5441 IOMMU_UNLOCK (pDevIns);5450 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 5442 5451 } 5443 5452 else … … 5562 5571 */ 5563 5572 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 5573 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 5564 5574 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0]; 5565 5575 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev); 5566 5576 5567 IOMMU_LOCK_NORET(pDevIns );5577 IOMMU_LOCK_NORET(pDevIns, pThisCC); 5568 5578 5569 5579 LogFlowFunc(("\n")); … … 5638 5648 PDMPciDevSetCommand(pPciDev, VBOX_PCI_COMMAND_MASTER); 5639 5649 5640 IOMMU_UNLOCK(pDevIns); 5650 IOMMU_UNLOCK(pDevIns, pThisCC); 5651 5652 #ifdef IOMMU_WITH_IOTLBE_CACHE 5653 iommuAmdDteCacheRemoveAll(pDevIns); 5654 iommuAmdIotlbRemoveAll(pDevIns); 5655 #endif 5641 5656 } 5642 5657 … … 5648 5663 { 5649 5664 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns); 5650 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 5665 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 5666 PIOMMUCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUCC); 5651 5667 LogFlowFunc(("\n")); 5652 5668 5653 IOMMU_LOCK_NORET(pDevIns );5669 IOMMU_LOCK_NORET(pDevIns, pThisCC); 5654 5670 5655 5671 /* Close the command thread semaphore. */ … … 5671 5687 if (pThis->paIotlbes) 5672 5688 { 5673 iommuAmdIotlbRemoveAll(pDevIns);5674 5689 PDMDevHlpMMHeapFree(pDevIns, pThis->paIotlbes); 5675 5690 pThis->paIotlbes = NULL; … … 5677 5692 #endif 5678 5693 5679 IOMMU_UNLOCK(pDevIns );5694 IOMMU_UNLOCK(pDevIns, pThisCC); 5680 5695 return VINF_SUCCESS; 5681 5696 } … … 5720 5735 5721 5736 /* 5737 * We will use PDM's critical section (via helpers) for the IOMMU device. 5738 */ 5739 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns)); 5740 AssertRCReturn(rc, rc); 5741 5742 /* 5722 5743 * Initialize read-only PCI configuration space. 5723 5744 */ … … 5915 5936 5916 5937 #ifdef IOMMU_WITH_IOTLBE_CACHE 5938 /* 5939 * Initialize the critsect of the cache. 5940 */ 5941 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->CritSectCache, RT_SRC_POS, "IOMMUCache-#%u", pDevIns->iInstance); 5942 AssertLogRelRCReturn(rc, rc); 5943 5917 5944 /* 5918 5945 * Allocate the level 1 cache (device ID to domain ID mapping). … … 6036 6063 pThisCC->CTX_SUFF(pDevIns) = pDevIns; 6037 6064 6065 /* We will use PDM's critical section (via helpers) for the IOMMU device. */ 6066 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns)); 6067 AssertRCReturn(rc, rc); 6068 6038 6069 /* Set up the MMIO RZ handlers. */ 6039 intrc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, iommuAmdMmioWrite, iommuAmdMmioRead, NULL /* pvUser */);6070 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmio, iommuAmdMmioWrite, iommuAmdMmioRead, NULL /* pvUser */); 6040 6071 AssertRCReturn(rc, rc); 6041 6072 … … 6051 6082 rc = PDMDevHlpIommuSetUpContext(pDevIns, &IommuReg, &pThisCC->CTX_SUFF(pIommuHlp)); 6052 6083 AssertRCReturn(rc, rc); 6053 6084 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp), VERR_IOMMU_IPE_1); 6085 AssertReturn(pThisCC->CTX_SUFF(pIommuHlp)->u32Version == CTX_SUFF(PDM_IOMMUHLP)_VERSION, VERR_VERSION_MISMATCH); 6086 AssertReturn(pThisCC->CTX_SUFF(pIommuHlp)->u32TheEnd == CTX_SUFF(PDM_IOMMUHLP)_VERSION, VERR_VERSION_MISMATCH); 6087 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp)->pfnLock, VERR_INVALID_POINTER); 6088 AssertPtrReturn(pThisCC->CTX_SUFF(pIommuHlp)->pfnUnlock, VERR_INVALID_POINTER); 6054 6089 return VINF_SUCCESS; 6055 6090 } -
trunk/src/VBox/VMM/VMMR0/PDMR0DevHlp.cpp
r87478 r87691 1665 1665 */ 1666 1666 1667 /** @interface_method_impl{PDMIOMMUHLPR0,pfnLock} */ 1668 static DECLCALLBACK(int) pdmR0IommuHlp_Lock(PPDMDEVINS pDevIns, int rc) 1669 { 1670 PDMDEV_ASSERT_DEVINS(pDevIns); 1671 return pdmLockEx(pDevIns->Internal.s.pGVM, rc); 1672 } 1673 1674 1675 /** @interface_method_impl{PDMIOMMUHLPR0,pfnUnlock} */ 1676 static DECLCALLBACK(void) pdmR0IommuHlp_Unlock(PPDMDEVINS pDevIns) 1677 { 1678 PDMDEV_ASSERT_DEVINS(pDevIns); 1679 pdmUnlock(pDevIns->Internal.s.pGVM); 1680 } 1681 1682 1667 1683 /** 1668 1684 * The Ring-0 IOMMU Helper Callbacks. … … 1671 1687 { 1672 1688 PDM_IOMMUHLPR0_VERSION, 1689 pdmR0IommuHlp_Lock, 1690 pdmR0IommuHlp_Unlock, 1673 1691 PDM_IOMMUHLPR0_VERSION, /* the end */ 1674 1692 }; -
trunk/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp
r87477 r87691 260 260 261 261 262 /** @name Ring-3 IOMMU Helpers 263 * @{ 264 */ 265 266 /** @interface_method_impl{PDMIOMMUHLPR3,pfnLock} */ 267 static DECLCALLBACK(int) pdmR3IommuHlp_Lock(PPDMDEVINS pDevIns, int rc) 268 { 269 PDMDEV_ASSERT_DEVINS(pDevIns); 270 LogFlowFunc(("caller='%s'/%d: rc=%Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); 271 return pdmLockEx(pDevIns->Internal.s.pVMR3, rc); 272 } 273 274 275 /** @interface_method_impl{PDMIOMMUHLPR3,pfnUnlock} */ 276 static DECLCALLBACK(void) pdmR3IommuHlp_Unlock(PPDMDEVINS pDevIns) 277 { 278 PDMDEV_ASSERT_DEVINS(pDevIns); 279 LogFlowFunc(("caller='%s'/%d:\n", pDevIns->pReg->szName, pDevIns->iInstance)); 280 pdmUnlock(pDevIns->Internal.s.pVMR3); 281 } 282 283 262 284 /** 263 285 * IOMMU Device Helpers. … … 266 288 { 267 289 PDM_IOMMUHLPR3_VERSION, 290 pdmR3IommuHlp_Lock, 291 pdmR3IommuHlp_Unlock, 268 292 PDM_IOMMUHLPR3_VERSION /* the end */ 269 293 }; 294 295 /** @} */ 270 296 271 297
Note:
See TracChangeset
for help on using the changeset viewer.