Changeset 88669 in vbox for trunk/src/VBox
- Timestamp:
- Apr 23, 2021 7:14:48 AM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp
r88627 r88669 48 48 AssertReturn((a_cb) == 4 || (a_cb) == 8, VINF_IOM_MMIO_UNUSED_FF); \ 49 49 AssertReturn(!((a_off) & ((a_cb) - 1)), VINF_IOM_MMIO_UNUSED_FF); \ 50 } while (0) ;50 } while (0) 51 51 52 52 /** Checks whether the MMIO offset is valid. */ … … 63 63 } while (0) 64 64 65 /** Acquires the DMAR lock and is not expected to fail. */ 66 #define DMAR_LOCK(a_pDevIns, a_pThisCC) \ 67 do { \ 68 int const rcLock = (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), VERR_IGNORED); \ 69 Assert(rcLock == VINF_SUCCESS); \ 70 } while (0) 71 65 72 /** Release the DMAR lock. */ 66 73 #define DMAR_UNLOCK(a_pDevIns, a_pThisCC) (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnUnlock(a_pDevIns) 67 68 /** Checks whether the calling thread is the owner of the DMAR lock. */69 #define DMAR_LOCK_IS_OWNER(a_pDevIns, a_pThisCC) (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns)70 74 71 75 /** Asserts that the calling thread owns the DMAR lock. */ … … 1028 1032 * @returns Strict VBox status code. 1029 1033 * @param pDevIns The IOMMU device instance. 1030 * @param off 1031 * @param cb 1034 * @param offReg The MMIO register offset. 1035 * @param cbReg The size of the MMIO access (in bytes). 1032 1036 * @param uCcmdReg The value written to CCMD_REG. 1033 1037 */ 1034 static VBOXSTRICTRC dmarCcmdRegWrite(PPDMDEVINS pDevIns, uint16_t off , uint8_t cb, uint64_t uCcmdReg)1038 static VBOXSTRICTRC dmarCcmdRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint8_t cbReg, uint64_t uCcmdReg) 1035 1039 { 1036 1040 /* At present, we only care about responding to high 32-bits writes, low 32-bits are data. */ 1037 if (off + cb> VTD_MMIO_OFF_CCMD_REG + 4)1041 if (offReg + cbReg > VTD_MMIO_OFF_CCMD_REG + 4) 1038 1042 { 1039 1043 /* Check if we need to invalidate the context-context. */ … … 1069 1073 * @returns Strict VBox status code. 1070 1074 * @param pDevIns The IOMMU device instance. 1071 * @param off 1075 * @param offReg The MMIO register offset. 1072 1076 * @param uIqtReg The value written to IQT_REG. 1073 1077 */ 1074 static VBOXSTRICTRC dmarIqtRegWrite(PPDMDEVINS pDevIns, uint16_t off , uint64_t uIqtReg)1078 static VBOXSTRICTRC dmarIqtRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint64_t uIqtReg) 1075 1079 { 1076 1080 /* We only care about the low 32-bits, high 32-bits are reserved. */ 1077 if (off == VTD_MMIO_OFF_IQT_REG) 1081 Assert(offReg == VTD_MMIO_OFF_IQT_REG); 1082 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1083 1084 uint32_t const offQt = VTD_IQT_REG_GET_QT(uIqtReg); 1085 uint64_t const uIqaReg = dmarRegRead64(pThis, VTD_MMIO_OFF_IQA_REG); 1086 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW); 1087 1088 /* If the descriptor width is 256-bits, the queue tail offset must be aligned accordingly. */ 1089 if ( fDw != VTD_IQA_REG_DW_256_BIT 1090 || !(offQt & RT_BIT(4))) 1091 dmarInvQueueThreadWakeUpIfNeeded(pDevIns); 1092 else 1078 1093 { 1079 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1080 1081 /* Verify if the queue tail offset is aligned according to the descriptor width. */ 1082 uint32_t const offQueueTail = VTD_IQT_REG_GET_QT(uIqtReg); 1083 uint64_t const uIqaReg = dmarRegRead64(pThis, VTD_MMIO_OFF_IQA_REG); 1084 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW); 1085 if ( fDw != VTD_IQA_REG_DW_256_BIT 1086 || !(offQueueTail & 0x1f)) 1087 dmarInvQueueThreadWakeUpIfNeeded(pDevIns); 1088 else 1089 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_NotAligned, kQueueTailNotAligned); 1094 /* Hardware treats bit 4 as RsvdZ here, so clear it. */ 1095 dmarRegChange32(pThis, offReg, ~RT_BIT(4) /* fAndMask*/ , 0 /* fOrMask */); 1096 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_NotAligned, kQueueTailNotAligned); 1090 1097 } 1091 1098 return VINF_SUCCESS; … … 1098 1105 * @returns Strict VBox status code. 1099 1106 * @param pDevIns The IOMMU device instance. 1100 * @param off 1107 * @param offReg The MMIO register offset. 1101 1108 * @param uIqaReg The value written to IQA_REG. 1102 1109 */ 1103 static VBOXSTRICTRC dmarIqaRegWrite(PPDMDEVINS pDevIns, uint16_t off, uint64_t uIqaReg) 1104 { 1105 /** @todo Don't allow writing this when GSTS.QIES is set? */ 1106 1110 static VBOXSTRICTRC dmarIqaRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint64_t uIqaReg) 1111 { 1107 1112 /* At present, we only care about the low 32-bits, high 32-bits are data. */ 1108 if (off == VTD_MMIO_OFF_IQA_REG) 1113 Assert(offReg == VTD_MMIO_OFF_IQA_REG); 1114 1115 /** @todo What happens if IQA_REG is written when dmarInvQueueCanProcessRequests 1116 * returns true? The Intel VT-d spec. doesn't state anywhere that it 1117 * cannot happen or that it's ignored when it does happen. */ 1118 1119 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1120 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW); 1121 if (fDw == VTD_IQA_REG_DW_256_BIT) 1109 1122 { 1110 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1111 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW); 1112 if (fDw == VTD_IQA_REG_DW_256_BIT) 1113 { 1114 bool const fSupports256BitDw = RT_BOOL(pThis->fExtCap & (VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK)); 1115 if (fSupports256BitDw) 1116 { /* likely */ } 1117 else 1118 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_Invalid, kInvalidDescriptorWidth); 1119 } 1123 bool const fSupports256BitDw = (pThis->fExtCap & (VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK)); 1124 if (fSupports256BitDw) 1125 { /* likely */ } 1126 else 1127 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_Invalid, kInvalidDescriptorWidth); 1120 1128 } 1121 1129 return VINF_SUCCESS; … … 1201 1209 if (DMAR_IS_MMIO_OFF_VALID(offLast)) 1202 1210 { 1211 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC); 1212 DMAR_LOCK_RET(pDevIns, pThisCC, VINF_IOM_R3_MMIO_WRITE); 1213 1203 1214 uint64_t const uRegWritten = cb == 8 ? dmarRegWrite64(pThis, offReg, *(uint64_t *)pv) 1204 1215 : dmarRegWrite32(pThis, offReg, *(uint32_t *)pv); … … 1214 1225 1215 1226 case VTD_MMIO_OFF_IQT_REG: 1216 case VTD_MMIO_OFF_IQT_REG + 4:1227 /* VTD_MMIO_OFF_IQT_REG + 4: (RsvdZ) */ 1217 1228 { 1218 1229 rcStrict = dmarIqtRegWrite(pDevIns, offReg, uRegWritten); … … 1221 1232 1222 1233 case VTD_MMIO_OFF_IQA_REG: 1223 case VTD_MMIO_OFF_IQA_REG + 4:1234 /* VTD_MMIO_OFF_IQA_REG + 4: (Data) */ 1224 1235 { 1225 1236 rcStrict = dmarIqaRegWrite(pDevIns, offReg, uRegWritten); … … 1228 1239 } 1229 1240 1241 DMAR_UNLOCK(pDevIns, pThisCC); 1230 1242 LogFlowFunc(("offReg=%#x rc=%Rrc\n", offReg, VBOXSTRICTRC_VAL(rcStrict))); 1231 1243 return rcStrict; … … 1251 1263 if (DMAR_IS_MMIO_OFF_VALID(offLast)) 1252 1264 { 1265 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC); 1266 DMAR_LOCK_RET(pDevIns, pThisCC, VINF_IOM_R3_MMIO_READ); 1267 1253 1268 if (cb == 8) 1254 1269 { … … 1262 1277 } 1263 1278 1279 DMAR_UNLOCK(pDevIns, pThisCC); 1264 1280 return VINF_SUCCESS; 1265 1281 } … … 1307 1323 */ 1308 1324 DMAR_LOCK_RET(pDevIns, pThisR3, VERR_IGNORED); 1325 /** @todo use dmarInvQueueCanProcessRequests instead? */ 1309 1326 uint32_t const uGstsReg = dmarRegRead32(pThis, VTD_MMIO_OFF_GSTS_REG); 1310 1327 DMAR_UNLOCK(pDevIns, pThisR3); … … 1504 1521 LogFlowFunc(("\n")); 1505 1522 1523 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3); 1524 DMAR_LOCK(pDevIns, pThisR3); 1525 1506 1526 dmarR3RegsInit(pDevIns); 1527 1528 DMAR_UNLOCK(pDevIns, pThisR3); 1507 1529 } 1508 1530 … … 1513 1535 static DECLCALLBACK(int) iommuIntelR3Destruct(PPDMDEVINS pDevIns) 1514 1536 { 1515 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1537 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1538 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3); 1516 1539 LogFlowFunc(("\n")); 1540 1541 DMAR_LOCK_RET(pDevIns, pThisR3, VERR_IGNORED); 1517 1542 1518 1543 if (pThis->hEvtInvQueue != NIL_SUPSEMEVENT) … … 1522 1547 } 1523 1548 1549 DMAR_UNLOCK(pDevIns, pThisR3); 1524 1550 return VINF_SUCCESS; 1525 1551 }
Note:
See TracChangeset
for help on using the changeset viewer.