Changeset 88789 in vbox for trunk/src/VBox
- Timestamp:
- Apr 30, 2021 7:40:47 AM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp
r88784 r88789 78 78 do { \ 79 79 Assert((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns)); \ 80 RT_NOREF1(a_pThisCC); \ 81 } while (0) 82 83 /** Asserts that the calling thread does not own the DMAR lock. */ 84 #define DMAR_ASSERT_LOCK_IS_NOT_OWNER(a_pDevIns, a_pThisCC) \ 85 do { \ 86 Assert((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns) == false); \ 80 87 RT_NOREF1(a_pThisCC); \ 81 88 } while (0) … … 148 155 kDmarDiag_CcmdReg_Ttm_Invalid, 149 156 kDmarDiag_IqaReg_Dsc_Fetch_Error, 150 kDmarDiag_IqaReg_Dw_Invalid, 157 kDmarDiag_IqaReg_Dw_128_Invalid, 158 kDmarDiag_IqaReg_Dw_256_Invalid, 151 159 kDmarDiag_Iqei_Dsc_Type_Invalid, 152 160 kDmarDiag_Iqei_Inv_Wait_Dsc_0_1_Rsvd, 153 161 kDmarDiag_Iqei_Inv_Wait_Dsc_2_3_Rsvd, 154 kDmarDiag_Iqei_Inv_Wait_Dsc_Ttm, 162 kDmarDiag_Iqei_Inv_Wait_Dsc_Invalid, 163 kDmarDiag_Iqei_Ttm_Rsvd, 155 164 kDmarDiag_IqtReg_Qt_Invalid, 156 165 kDmarDiag_IqtReg_Qt_NotAligned, … … 175 184 DMARDIAG_DESC(CcmdReg_Ttm_Invalid ), 176 185 DMARDIAG_DESC(IqaReg_Dsc_Fetch_Error ), 177 DMARDIAG_DESC(IqaReg_Dw_Invalid ), 186 DMARDIAG_DESC(IqaReg_Dw_128_Invalid ), 187 DMARDIAG_DESC(IqaReg_Dw_256_Invalid ), 178 188 DMARDIAG_DESC(Iqei_Dsc_Type_Invalid ), 179 189 DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_0_1_Rsvd), 180 190 DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_2_3_Rsvd), 181 DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_Ttm ), 191 DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_Invalid ), 192 DMARDIAG_DESC(Iqei_Ttm_Rsvd ), 182 193 DMARDIAG_DESC(IqtReg_Qt_Invalid ), 183 194 DMARDIAG_DESC(IqtReg_Qt_NotAligned ) … … 211 222 uint64_t uIrtaReg; 212 223 /** Currently active RTADDR_REG. */ 213 uint64_t uRta Reg;224 uint64_t uRtaddrReg; 214 225 /** @} */ 215 226 … … 221 232 uint8_t abPadding[7]; 222 233 /** Copy of CAP_REG. */ 223 uint64_t fCap ;234 uint64_t fCapReg; 224 235 /** Copy of ECAP_REG. */ 225 uint64_t fExtCap ;236 uint64_t fExtCapReg; 226 237 /** @} */ 227 238 … … 900 911 901 912 /** 902 * Gets the table translation mode from the RTADDR_REG.903 *904 * @returns The table translation mode.905 * @param pThis The shared DMAR device state.906 */907 static uint8_t dmarRtAddrRegGetTtm(PCDMAR pThis)908 {909 uint64_t const uRtAddrReg = dmarRegRead64(pThis, VTD_MMIO_OFF_RTADDR_REG);910 return RT_BF_GET(uRtAddrReg, VTD_BF_RTADDR_REG_TTM);911 }912 913 914 /**915 913 * Checks if the invalidation-queue is empty. 916 914 * … … 1004 1002 static void dmarFaultEventRaiseInterrupt(PPDMDEVINS pDevIns) 1005 1003 { 1006 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1007 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC); 1004 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1008 1005 #ifdef RT_STRICT 1009 1006 { … … 1027 1024 1028 1025 /** @todo Assert Msi.Addr is in the MSR_IA32_APICBASE_ADDR range and ensure on 1029 * FEADD_REG write it can't be anything else. */ 1026 * FEADD_REG write it can't be anything else? */ 1027 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC); 1030 1028 pThisCC->CTX_SUFF(pIommuHlp)->pfnSendMsi(pDevIns, &Msi, 0 /* uTagSrc */); 1031 1029 … … 1051 1049 static void dmarR3InvEventRaiseInterrupt(PPDMDEVINS pDevIns) 1052 1050 { 1053 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1054 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC); 1055 1051 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1056 1052 uint32_t const uIcsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_ICS_REG); 1057 1053 if (uIcsReg & VTD_BF_ICS_REG_IWC_MASK) … … 1067 1063 Msi.Data.u32 = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IEDATA_REG); 1068 1064 1065 PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC); 1069 1066 pThisCC->CTX_SUFF(pIommuHlp)->pfnSendMsi(pDevIns, &Msi, 0 /* uTagSrc */); 1070 1067 … … 1161 1158 { 1162 1159 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1163 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);1164 uint32_t const fChanged = uGstsReg ^ uGcmdReg;1165 uint64_t const fExtCap = pThis->fExtCap;1160 uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG); 1161 uint32_t const fChanged = uGstsReg ^ uGcmdReg; 1162 uint64_t const fExtCapReg = pThis->fExtCapReg; 1166 1163 1167 1164 /* 1168 1165 * Queued-invalidation. 1169 1166 */ 1170 if ( (fExtCap & VTD_BF_ECAP_REG_QI_MASK)1167 if ( (fExtCapReg & VTD_BF_ECAP_REG_QI_MASK) 1171 1168 && (fChanged & VTD_BF_GCMD_REG_QIE_MASK)) 1172 1169 { … … 1186 1183 1187 1184 /* 1188 * Set interrupt remapping table pointer.1185 * Set Interrupt Remapping Table Pointer (SIRTP). 1189 1186 */ 1190 if ( (fExtCap & VTD_BF_ECAP_REG_IR_MASK)1187 if ( (fExtCapReg & VTD_BF_ECAP_REG_IR_MASK) 1191 1188 && (uGcmdReg & VTD_BF_GCMD_REG_SIRTP_MASK)) 1192 1189 { 1193 1190 pThis->uIrtaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IRTA_REG); 1194 1191 dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX /* fAndMask */, VTD_BF_GSTS_REG_IRTPS_MASK /* fOrMask */); 1192 } 1193 1194 /* 1195 * Set Root Table Pointer (SRTP). 1196 */ 1197 if (uGcmdReg & VTD_BF_GCMD_REG_SRTP_MASK) 1198 { 1199 /** @todo Perform global invalidation of all remapping translation caches. */ 1200 #if 0 1201 if (pThis->fCapReg & VTD_BF_CAP_REG_ESRTPS_MASK) 1202 { 1203 } 1204 #endif 1205 pThis->uRtaddrReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_RTADDR_REG); 1195 1206 } 1196 1207 … … 1228 1239 { 1229 1240 /* Verify table translation mode is legacy. */ 1230 uint8_t const fTtm = dmarRtAddrRegGetTtm(pThis);1241 uint8_t const fTtm = RT_BF_GET(pThis->uRtaddrReg, VTD_BF_RTADDR_REG_TTM); 1231 1242 if (fTtm == VTD_TTM_LEGACY_MODE) 1232 1243 { … … 1304 1315 if (fDw == VTD_IQA_REG_DW_256_BIT) 1305 1316 { 1306 bool const fSupports256BitDw = (pThis->fExtCap & (VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK));1317 bool const fSupports256BitDw = (pThis->fExtCapReg & (VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK)); 1307 1318 if (fSupports256BitDw) 1308 1319 { /* likely */ } 1309 1320 else 1310 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_Invalid, kIqei_InvalidDescriptorWidth); 1311 } 1321 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_256_Invalid, kIqei_InvalidDescriptorWidth); 1322 } 1323 /* else: 128-bit descriptor width is validated lazily, see explanation in dmarR3InvQueueProcessRequests. */ 1324 1312 1325 return VINF_SUCCESS; 1313 1326 } … … 1483 1496 * @param fDw The descriptor width (VTD_IQA_REG_DW_128_BIT or 1484 1497 * VTD_IQA_REG_DW_256_BIT). 1485 */ 1486 static void dmarR3InvQueueProcessRequests(PPDMDEVINS pDevIns, void const *pvRequests, uint32_t cbRequests, uint8_t fDw) 1487 { 1498 * @param fTtm The current table translation mode. Must not be 1499 * VTD_TTM_RSVD. 1500 */ 1501 static void dmarR3InvQueueProcessRequests(PPDMDEVINS pDevIns, void const *pvRequests, uint32_t cbRequests, uint8_t fDw, 1502 uint8_t fTtm) 1503 { 1504 #define DMAR_IQE_FAULT_RECORD_RET(a_enmDiag, a_enmIqei) \ 1505 do \ 1506 { \ 1507 DMAR_LOCK(pDevIns, pThisR3); \ 1508 dmarIqeFaultRecord(pDevIns, (a_enmDiag), (a_enmIqei)); \ 1509 DMAR_UNLOCK(pDevIns, pThisR3); \ 1510 return; \ 1511 } while (0) 1512 1488 1513 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1489 1514 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3); 1490 DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisR3); 1491 1515 1516 DMAR_ASSERT_LOCK_IS_NOT_OWNER(pDevIns, pThisR3); 1517 Assert(fTtm != VTD_TTM_RSVD); /* Should've beeen handled by caller. */ 1518 1519 /* 1520 * The below check is redundant since we check both TTM and DW for each 1521 * descriptor type we process. However, the error reported by hardware 1522 * may differ hence this is kept commented out but not removed from the code 1523 * if we need to change this in the future. 1524 * 1525 * In our implementation, we would report the descriptor type as invalid, 1526 * while on real hardware it may report descriptor width as invalid. 1527 * The Intel VT-d spec. is not clear which error takes preceedence. 1528 */ 1529 #if 0 1530 /* 1531 * Verify that 128-bit descriptors are not used when operating in scalable mode. 1532 * We don't check this while software writes IQA_REG but defer it until now because 1533 * RTADDR_REG can be updated lazily (via GCMD_REG.SRTP). The 256-bit descriptor check 1534 * -IS- performed when software writes IQA_REG since it only requires checking against 1535 * immutable hardware features. 1536 */ 1537 if ( fTtm != VTD_TTM_SCALABLE_MODE 1538 || fDw != VTD_IQA_REG_DW_128_BIT) 1539 { /* likely */ } 1540 else 1541 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_IqaReg_Dw_128_Invalid, kIqei_InvalidDescriptorWidth); 1542 #endif 1543 1544 /* 1545 * Process requests in FIFO order. 1546 */ 1492 1547 uint8_t const cbDsc = fDw == VTD_IQA_REG_DW_256_BIT ? 32 : 16; 1493 1548 for (uint32_t offDsc = 0; offDsc < cbRequests; offDsc += cbDsc) … … 1497 1552 uint64_t const uQword1 = puDscQwords[1]; 1498 1553 uint8_t const fDscType = VTD_GENERIC_INV_DSC_GET_TYPE(uQword0); 1499 uint8_t const fTtm = dmarRtAddrRegGetTtm(pThis);1500 Assert(fTtm != VTD_TTM_RSVD); /* Should be guaranteed when software updates GCMD_REG.SRTP. */1501 1502 1554 switch (fDscType) 1503 1555 { … … 1509 1561 case VTD_INV_WAIT_DSC_TYPE: 1510 1562 { 1511 /* Validate translation modes valid for this descriptor. */1563 /* Validate descriptor type. */ 1512 1564 if ( fTtm == VTD_TTM_LEGACY_MODE 1513 1565 || fDw == VTD_IQA_REG_DW_256_BIT) 1514 1566 { /* likely */ } 1515 1567 else 1516 { 1517 dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Inv_Wait_Dsc_Ttm, kIqei_InvalidDescriptorType); 1518 return; 1519 } 1568 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_Invalid, kIqei_InvalidDescriptorType); 1520 1569 1521 1570 /* Validate reserved bits. */ 1522 uint64_t const fValidMask0 = !(pThis->fExtCap & VTD_BF_ECAP_REG_PDS_MASK)1571 uint64_t const fValidMask0 = !(pThis->fExtCapReg & VTD_BF_ECAP_REG_PDS_MASK) 1523 1572 ? VTD_INV_WAIT_DSC_0_VALID_MASK & ~VTD_BF_0_INV_WAIT_DSC_PD_MASK 1524 1573 : VTD_INV_WAIT_DSC_0_VALID_MASK; … … 1527 1576 { /* likely */ } 1528 1577 else 1529 { 1530 dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Inv_Wait_Dsc_0_1_Rsvd, kIqei_RsvdFieldViolation); 1531 return; 1532 } 1578 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_0_1_Rsvd, kIqei_RsvdFieldViolation); 1579 1533 1580 if (fDw == VTD_IQA_REG_DW_256_BIT) 1534 1581 { 1535 uint64_t const uQword2 = puDscQwords[2]; 1536 uint64_t const uQword3 = puDscQwords[3]; 1537 if ( !uQword2 1538 && !uQword3) 1582 if ( !puDscQwords[2] 1583 && !puDscQwords[3]) 1539 1584 { /* likely */ } 1540 1585 else 1541 { 1542 dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Inv_Wait_Dsc_2_3_Rsvd, kIqei_RsvdFieldViolation); 1543 return; 1544 } 1586 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_2_3_Rsvd, kIqei_RsvdFieldViolation); 1545 1587 } 1546 1588 … … 1551 1593 uint32_t const uStatus = RT_BF_GET(uQword0, VTD_BF_0_INV_WAIT_DSC_STDATA); 1552 1594 RTGCPHYS const GCPhysStatus = uQword1 & VTD_BF_1_INV_WAIT_DSC_STADDR_MASK; 1553 DMAR_UNLOCK(pDevIns, pThisR3);1554 1595 int const rc = PDMDevHlpPhysWrite(pDevIns, GCPhysStatus, (void const*)&uStatus, sizeof(uStatus)); 1555 DMAR_LOCK(pDevIns, pThisR3);1556 1596 AssertRC(rc); 1557 1597 } … … 1560 1600 bool const fIf = RT_BF_GET(uQword0, VTD_BF_0_INV_WAIT_DSC_IF); 1561 1601 if (fIf) 1602 { 1603 DMAR_LOCK(pDevIns, pThisR3); 1562 1604 dmarR3InvEventRaiseInterrupt(pDevIns); 1605 DMAR_UNLOCK(pDevIns, pThisR3); 1606 } 1563 1607 break; 1564 1608 } … … 1575 1619 /* Stop processing further requests. */ 1576 1620 LogFunc(("Invalid descriptor type: %#x\n", fDscType)); 1577 dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Dsc_Type_Invalid, kIqei_InvalidDescriptorType); 1578 return; 1621 DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Dsc_Type_Invalid, kIqei_InvalidDescriptorType); 1579 1622 } 1580 1623 } 1581 1624 } 1582 } 1583 1625 #undef DMAR_IQE_FAULT_RECORD_RET 1626 } 1584 1627 1585 1628 … … 1612 1655 AssertPtrReturn(pvRequests, VERR_NO_MEMORY); 1613 1656 1657 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR); 1658 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3); 1659 1614 1660 while (pThread->enmState == PDMTHREADSTATE_RUNNING) 1615 1661 { 1616 PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);1617 PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);1618 1619 1662 /* 1620 1663 * Sleep until we are woken up. … … 1638 1681 if (!fIsEmpty) 1639 1682 { 1640 /** @todo Handle RTADDR_REG MMIO write first, for handling kIqei_InvalidTtm. I1641 * don't think it needs to be checked/handled here? */1642 1643 1683 /* 1644 * Get the current queue size. 1684 * Get the current queue size, descriptor width, queue base address and the 1685 * table translation mode while the lock is still held. 1645 1686 */ 1646 uint64_t const uIqaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQA_REG); 1647 uint8_t const cQueuePages = 1 << (uIqaReg & VTD_BF_IQA_REG_QS_MASK); 1648 uint32_t const cbQueue = cQueuePages << X86_PAGE_SHIFT; 1649 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW); 1687 uint64_t const uIqaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQA_REG); 1688 uint8_t const cQueuePages = 1 << (uIqaReg & VTD_BF_IQA_REG_QS_MASK); 1689 uint32_t const cbQueue = cQueuePages << X86_PAGE_SHIFT; 1690 uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW); 1691 uint8_t const fTtm = RT_BF_GET(pThis->uRtaddrReg, VTD_BF_RTADDR_REG_TTM); 1692 RTGCPHYS const GCPhysRequests = (uIqaReg & VTD_BF_IQA_REG_IQA_MASK) + offQueueHead; 1650 1693 1651 1694 /* Paranoia. */ 1652 Assert(cbQueue <= cbMaxQs); 1695 Assert(cbQueue <= cbMaxQs); NOREF(cbMaxQs); 1653 1696 Assert(!(offQueueTail & ~VTD_IQT_REG_RW_MASK)); 1654 1697 Assert(!(offQueueHead & ~VTD_IQH_REG_RW_MASK)); … … 1658 1701 1659 1702 /* 1660 * Read the requests in the queue from guest memory into our buffer. 1703 * A table translation mode of "reserved" isn't valid for any descriptor type. 1704 * However, RTADDR_REG can be modified in parallel to invalidation-queue processing, 1705 * but if ESRTPS is support, we will perform a global invalidation when software 1706 * changes RTADDR_REG, or it's the responsibility of software to do it explicitly. 1707 * So caching TTM while reading all descriptors should not be a problem. 1708 * 1709 * Also, validate the queue tail offset as it's mutable by software. 1661 1710 */ 1662 if (offQueueTail < cbQueue) 1711 if ( fTtm != VTD_TTM_RSVD 1712 && offQueueTail < cbQueue) 1663 1713 { 1664 RTGCPHYS const GCPhysRequests = (uIqaReg & VTD_BF_IQA_REG_IQA_MASK) + offQueueHead; 1665 1666 /* Don't hold the lock while reading (potentially large amount of) requests. */ 1714 /* Don't hold the lock while reading (a potentially large amount of) requests */ 1667 1715 DMAR_UNLOCK(pDevIns, pThisR3); 1668 1716 … … 1699 1747 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_IQH_REG, offQueueTail); 1700 1748 1701 /* Process all requests (in FIFO order). */ 1749 /* Don't hold the lock while processing requests. */ 1750 DMAR_UNLOCK(pDevIns, pThisR3); 1751 1752 /* Process all requests. */ 1702 1753 Assert(cbRequests <= cbQueue); 1703 dmarR3InvQueueProcessRequests(pDevIns, pvRequests, cbRequests, fDw); 1754 dmarR3InvQueueProcessRequests(pDevIns, pvRequests, cbRequests, fDw, fTtm); 1755 1756 /* 1757 * We've processed all requests and the lock shouldn't be held at this point. 1758 * Instead of re-acquiring the lock just to release it again and go back to 1759 * the thread loop using 'continue' here. It's a bit ugly but it certainly 1760 * helps with performance. 1761 */ 1762 DMAR_ASSERT_LOCK_IS_NOT_OWNER(pDevIns, pThisR3); 1763 continue; 1704 1764 } 1705 1765 else … … 1707 1767 } 1708 1768 else 1709 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_Invalid, kIqei_InvalidTailPointer); 1769 { 1770 if (fTtm != VTD_TTM_RSVD) 1771 dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Ttm_Rsvd, kIqei_InvalidTtm); 1772 else 1773 { 1774 Assert(offQueueTail < cbQueue); 1775 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_Invalid, kIqei_InvalidTailPointer); 1776 } 1777 } 1710 1778 } 1711 1779 } … … 2052 2120 uint8_t const uSagaw = vtdCapRegGetSagaw(uMgaw); /* Supported adjust guest address width. */ 2053 2121 uint16_t const offFro = DMAR_MMIO_OFF_FRCD_LO_REG >> 4; /* MMIO offset of FRCD registers. */ 2054 2055 pThis->fCap = RT_BF_MAKE(VTD_BF_CAP_REG_ND, fNd) 2056 | RT_BF_MAKE(VTD_BF_CAP_REG_AFL, 0) /* Advanced fault logging not supported. */ 2057 | RT_BF_MAKE(VTD_BF_CAP_REG_RWBF, 0) /* Software need not flush write-buffers. */ 2058 | RT_BF_MAKE(VTD_BF_CAP_REG_PLMR, 0) /* Protected Low-Memory Region not supported. */ 2059 | RT_BF_MAKE(VTD_BF_CAP_REG_PHMR, 0) /* Protected High-Memory Region not supported. */ 2060 | RT_BF_MAKE(VTD_BF_CAP_REG_CM, 1) /** @todo Figure out if required when we impl. caching. */ 2061 | RT_BF_MAKE(VTD_BF_CAP_REG_SAGAW, fSlts & uSagaw) 2062 | RT_BF_MAKE(VTD_BF_CAP_REG_MGAW, uMgaw) 2063 | RT_BF_MAKE(VTD_BF_CAP_REG_ZLR, 1) /** @todo Figure out if/how to support zero-length reads. */ 2064 | RT_BF_MAKE(VTD_BF_CAP_REG_FRO, offFro) 2065 | RT_BF_MAKE(VTD_BF_CAP_REG_SLLPS, fSlts & fSllps) 2066 | RT_BF_MAKE(VTD_BF_CAP_REG_PSI, fPsi) 2067 | RT_BF_MAKE(VTD_BF_CAP_REG_NFR, DMAR_FRCD_REG_COUNT - 1) 2068 | RT_BF_MAKE(VTD_BF_CAP_REG_MAMV, fPsi & fMamv) 2069 | RT_BF_MAKE(VTD_BF_CAP_REG_DWD, 1) 2070 | RT_BF_MAKE(VTD_BF_CAP_REG_DRD, 1) 2071 | RT_BF_MAKE(VTD_BF_CAP_REG_FL1GP, fFlts & fFl1gp) 2072 | RT_BF_MAKE(VTD_BF_CAP_REG_PI, 0) /* Posted Interrupts not supported. */ 2073 | RT_BF_MAKE(VTD_BF_CAP_REG_FL5LP, fFlts & fFl5lp) 2074 | RT_BF_MAKE(VTD_BF_CAP_REG_ESIRTPS, 0) /* If we invalidate interrupt cache on SIRTP flow. */ 2075 | RT_BF_MAKE(VTD_BF_CAP_REG_ESRTPS, 0); /* If we invalidate translation cache on SRTP flow. */ 2076 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_CAP_REG, pThis->fCap); 2122 uint8_t const fEsrtps = 1; /* Enhanced SRTPS (flush all caches on SRTP flow). */ 2123 2124 pThis->fCapReg = RT_BF_MAKE(VTD_BF_CAP_REG_ND, fNd) 2125 | RT_BF_MAKE(VTD_BF_CAP_REG_AFL, 0) /* Advanced fault logging not supported. */ 2126 | RT_BF_MAKE(VTD_BF_CAP_REG_RWBF, 0) /* Software need not flush write-buffers. */ 2127 | RT_BF_MAKE(VTD_BF_CAP_REG_PLMR, 0) /* Protected Low-Memory Region not supported. */ 2128 | RT_BF_MAKE(VTD_BF_CAP_REG_PHMR, 0) /* Protected High-Memory Region not supported. */ 2129 | RT_BF_MAKE(VTD_BF_CAP_REG_CM, 1) /** @todo Figure out if required when we impl. caching. */ 2130 | RT_BF_MAKE(VTD_BF_CAP_REG_SAGAW, fSlts & uSagaw) 2131 | RT_BF_MAKE(VTD_BF_CAP_REG_MGAW, uMgaw) 2132 | RT_BF_MAKE(VTD_BF_CAP_REG_ZLR, 1) /** @todo Figure out if/how to support zero-length reads. */ 2133 | RT_BF_MAKE(VTD_BF_CAP_REG_FRO, offFro) 2134 | RT_BF_MAKE(VTD_BF_CAP_REG_SLLPS, fSlts & fSllps) 2135 | RT_BF_MAKE(VTD_BF_CAP_REG_PSI, fPsi) 2136 | RT_BF_MAKE(VTD_BF_CAP_REG_NFR, DMAR_FRCD_REG_COUNT - 1) 2137 | RT_BF_MAKE(VTD_BF_CAP_REG_MAMV, fPsi & fMamv) 2138 | RT_BF_MAKE(VTD_BF_CAP_REG_DWD, 1) 2139 | RT_BF_MAKE(VTD_BF_CAP_REG_DRD, 1) 2140 | RT_BF_MAKE(VTD_BF_CAP_REG_FL1GP, fFlts & fFl1gp) 2141 | RT_BF_MAKE(VTD_BF_CAP_REG_PI, 0) /* Posted Interrupts not supported. */ 2142 | RT_BF_MAKE(VTD_BF_CAP_REG_FL5LP, fFlts & fFl5lp) 2143 | RT_BF_MAKE(VTD_BF_CAP_REG_ESIRTPS, 0) /* If we invalidate interrupt cache on SIRTP flow. */ 2144 | RT_BF_MAKE(VTD_BF_CAP_REG_ESRTPS, fEsrtps); 2145 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_CAP_REG, pThis->fCapReg); 2077 2146 } 2078 2147 … … 2087 2156 uint8_t const fAdms = 1; /* Abort DMA mode support. */ 2088 2157 2089 pThis->fExtCap = RT_BF_MAKE(VTD_BF_ECAP_REG_C, 0) /* Accesses don't snoop CPU cache. */2090 | RT_BF_MAKE(VTD_BF_ECAP_REG_QI, fQi)2091 | RT_BF_MAKE(VTD_BF_ECAP_REG_DT, 0) /* Device-TLBs not supported. */2092 | RT_BF_MAKE(VTD_BF_ECAP_REG_IR, fQi & fIr)2093 | RT_BF_MAKE(VTD_BF_ECAP_REG_EIM, fIr & fEim)2094 | RT_BF_MAKE(VTD_BF_ECAP_REG_PT, fPt)2095 | RT_BF_MAKE(VTD_BF_ECAP_REG_SC, 0) /* Snoop control not supported. */2096 | RT_BF_MAKE(VTD_BF_ECAP_REG_IRO, offIro)2097 | RT_BF_MAKE(VTD_BF_ECAP_REG_MHMV, fIr & fMhmv)2098 | RT_BF_MAKE(VTD_BF_ECAP_REG_MTS, 0) /* Memory type not supported. */2099 | RT_BF_MAKE(VTD_BF_ECAP_REG_NEST, fNest)2100 | RT_BF_MAKE(VTD_BF_ECAP_REG_PRS, 0) /* 0 as DT not supported. */2101 | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS, 0) /* Execute request not supported. */2102 | RT_BF_MAKE(VTD_BF_ECAP_REG_SRS, fSmts & fSrs)2103 | RT_BF_MAKE(VTD_BF_ECAP_REG_NWFS, 0) /* 0 as DT not supported. */2104 | RT_BF_MAKE(VTD_BF_ECAP_REG_EAFS, 0) /** @todo figure out if EAFS is required? */2105 | RT_BF_MAKE(VTD_BF_ECAP_REG_PSS, 0) /* 0 as PASID not supported. */2106 | RT_BF_MAKE(VTD_BF_ECAP_REG_PASID, 0) /* PASID support. */2107 | RT_BF_MAKE(VTD_BF_ECAP_REG_DIT, 0) /* 0 as DT not supported. */2108 | RT_BF_MAKE(VTD_BF_ECAP_REG_PDS, 0) /* 0 as DT not supported. */2109 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMTS, fSmts)2110 | RT_BF_MAKE(VTD_BF_ECAP_REG_VCS, 0) /* 0 as PASID not supported (commands seem PASID specific). */2111 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLADS, 0) /* Second-level accessed/dirty not supported. */2112 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLTS, fSlts)2113 | RT_BF_MAKE(VTD_BF_ECAP_REG_FLTS, fFlts)2114 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMPWCS, 0) /* 0 as PASID not supported. */2115 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPS, 0) /* We don't support RID_PASID field in SM context entry. */2116 | RT_BF_MAKE(VTD_BF_ECAP_REG_ADMS, fAdms)2117 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPRIVS, 0); /** @todo figure out if we should/can support this? */2118 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_ECAP_REG, pThis->fExtCap );2158 pThis->fExtCapReg = RT_BF_MAKE(VTD_BF_ECAP_REG_C, 0) /* Accesses don't snoop CPU cache. */ 2159 | RT_BF_MAKE(VTD_BF_ECAP_REG_QI, fQi) 2160 | RT_BF_MAKE(VTD_BF_ECAP_REG_DT, 0) /* Device-TLBs not supported. */ 2161 | RT_BF_MAKE(VTD_BF_ECAP_REG_IR, fQi & fIr) 2162 | RT_BF_MAKE(VTD_BF_ECAP_REG_EIM, fIr & fEim) 2163 | RT_BF_MAKE(VTD_BF_ECAP_REG_PT, fPt) 2164 | RT_BF_MAKE(VTD_BF_ECAP_REG_SC, 0) /* Snoop control not supported. */ 2165 | RT_BF_MAKE(VTD_BF_ECAP_REG_IRO, offIro) 2166 | RT_BF_MAKE(VTD_BF_ECAP_REG_MHMV, fIr & fMhmv) 2167 | RT_BF_MAKE(VTD_BF_ECAP_REG_MTS, 0) /* Memory type not supported. */ 2168 | RT_BF_MAKE(VTD_BF_ECAP_REG_NEST, fNest) 2169 | RT_BF_MAKE(VTD_BF_ECAP_REG_PRS, 0) /* 0 as DT not supported. */ 2170 | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS, 0) /* Execute request not supported. */ 2171 | RT_BF_MAKE(VTD_BF_ECAP_REG_SRS, fSmts & fSrs) 2172 | RT_BF_MAKE(VTD_BF_ECAP_REG_NWFS, 0) /* 0 as DT not supported. */ 2173 | RT_BF_MAKE(VTD_BF_ECAP_REG_EAFS, 0) /** @todo figure out if EAFS is required? */ 2174 | RT_BF_MAKE(VTD_BF_ECAP_REG_PSS, 0) /* 0 as PASID not supported. */ 2175 | RT_BF_MAKE(VTD_BF_ECAP_REG_PASID, 0) /* PASID support. */ 2176 | RT_BF_MAKE(VTD_BF_ECAP_REG_DIT, 0) /* 0 as DT not supported. */ 2177 | RT_BF_MAKE(VTD_BF_ECAP_REG_PDS, 0) /* 0 as DT not supported. */ 2178 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMTS, fSmts) 2179 | RT_BF_MAKE(VTD_BF_ECAP_REG_VCS, 0) /* 0 as PASID not supported (commands seem PASID specific). */ 2180 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLADS, 0) /* Second-level accessed/dirty not supported. */ 2181 | RT_BF_MAKE(VTD_BF_ECAP_REG_SLTS, fSlts) 2182 | RT_BF_MAKE(VTD_BF_ECAP_REG_FLTS, fFlts) 2183 | RT_BF_MAKE(VTD_BF_ECAP_REG_SMPWCS, 0) /* 0 as PASID not supported. */ 2184 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPS, 0) /* We don't support RID_PASID field in SM context entry. */ 2185 | RT_BF_MAKE(VTD_BF_ECAP_REG_ADMS, fAdms) 2186 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPRIVS, 0); /** @todo figure out if we should/can support this? */ 2187 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_ECAP_REG, pThis->fExtCapReg); 2119 2188 } 2120 2189 … … 2135 2204 2136 2205 #ifdef VBOX_STRICT 2137 Assert(!RT_BF_GET(pThis->fExtCap , VTD_BF_ECAP_REG_PRS)); /* PECTL_REG - Reserved if don't support PRS. */2138 Assert(!RT_BF_GET(pThis->fExtCap , VTD_BF_ECAP_REG_MTS)); /* MTRRCAP_REG - Reserved if we don't support MTS. */2206 Assert(!RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_PRS)); /* PECTL_REG - Reserved if don't support PRS. */ 2207 Assert(!RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_MTS)); /* MTRRCAP_REG - Reserved if we don't support MTS. */ 2139 2208 #endif 2140 2209 } … … 2320 2389 */ 2321 2390 uint32_t const uVerReg = pThis->uVerReg; 2322 uint8_t const cMaxGstAddrBits = RT_BF_GET(pThis->fCap , VTD_BF_CAP_REG_MGAW) + 1;2323 uint8_t const cSupGstAddrBits = vtdCapRegGetSagawBits(RT_BF_GET(pThis->fCap , VTD_BF_CAP_REG_SAGAW));2324 uint16_t const offFrcd = RT_BF_GET(pThis->fCap , VTD_BF_CAP_REG_FRO);2325 uint16_t const offIva = RT_BF_GET(pThis->fExtCap , VTD_BF_ECAP_REG_IRO);2391 uint8_t const cMaxGstAddrBits = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_MGAW) + 1; 2392 uint8_t const cSupGstAddrBits = vtdCapRegGetSagawBits(RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_SAGAW)); 2393 uint16_t const offFrcd = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_FRO); 2394 uint16_t const offIva = RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_IRO); 2326 2395 LogRel(("%s: VER=%u.%u CAP=%#RX64 ECAP=%#RX64 (MGAW=%u bits, SAGAW=%u bits, FRO=%#x, IRO=%#x) mapped at %#RGp\n", 2327 2396 DMAR_LOG_PFX, RT_BF_GET(uVerReg, VTD_BF_VER_REG_MAX), RT_BF_GET(uVerReg, VTD_BF_VER_REG_MIN), 2328 pThis->fCap , pThis->fExtCap, cMaxGstAddrBits, cSupGstAddrBits, offFrcd, offIva, DMAR_MMIO_BASE_PHYSADDR));2397 pThis->fCapReg, pThis->fExtCapReg, cMaxGstAddrBits, cSupGstAddrBits, offFrcd, offIva, DMAR_MMIO_BASE_PHYSADDR)); 2329 2398 2330 2399 return VINF_SUCCESS;
Note:
See TracChangeset
for help on using the changeset viewer.