VirtualBox

Changeset 88789 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Apr 30, 2021 7:40:47 AM (4 years ago)
Author:
vboxsync
Message:

Intel IOMMU: bugref:9967 Queued Invalidation WIP.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp

    r88784 r88789  
    7878    do { \
    7979        Assert((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns)); \
     80        RT_NOREF1(a_pThisCC); \
     81    } while (0)
     82
     83/** Asserts that the calling thread does not own the DMAR lock. */
     84#define DMAR_ASSERT_LOCK_IS_NOT_OWNER(a_pDevIns, a_pThisCC) \
     85    do { \
     86        Assert((a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns) == false); \
    8087        RT_NOREF1(a_pThisCC); \
    8188    } while (0)
     
    148155    kDmarDiag_CcmdReg_Ttm_Invalid,
    149156    kDmarDiag_IqaReg_Dsc_Fetch_Error,
    150     kDmarDiag_IqaReg_Dw_Invalid,
     157    kDmarDiag_IqaReg_Dw_128_Invalid,
     158    kDmarDiag_IqaReg_Dw_256_Invalid,
    151159    kDmarDiag_Iqei_Dsc_Type_Invalid,
    152160    kDmarDiag_Iqei_Inv_Wait_Dsc_0_1_Rsvd,
    153161    kDmarDiag_Iqei_Inv_Wait_Dsc_2_3_Rsvd,
    154     kDmarDiag_Iqei_Inv_Wait_Dsc_Ttm,
     162    kDmarDiag_Iqei_Inv_Wait_Dsc_Invalid,
     163    kDmarDiag_Iqei_Ttm_Rsvd,
    155164    kDmarDiag_IqtReg_Qt_Invalid,
    156165    kDmarDiag_IqtReg_Qt_NotAligned,
     
    175184    DMARDIAG_DESC(CcmdReg_Ttm_Invalid       ),
    176185    DMARDIAG_DESC(IqaReg_Dsc_Fetch_Error    ),
    177     DMARDIAG_DESC(IqaReg_Dw_Invalid         ),
     186    DMARDIAG_DESC(IqaReg_Dw_128_Invalid     ),
     187    DMARDIAG_DESC(IqaReg_Dw_256_Invalid     ),
    178188    DMARDIAG_DESC(Iqei_Dsc_Type_Invalid     ),
    179189    DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_0_1_Rsvd),
    180190    DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_2_3_Rsvd),
    181     DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_Ttm     ),
     191    DMARDIAG_DESC(Iqei_Inv_Wait_Dsc_Invalid ),
     192    DMARDIAG_DESC(Iqei_Ttm_Rsvd             ),
    182193    DMARDIAG_DESC(IqtReg_Qt_Invalid         ),
    183194    DMARDIAG_DESC(IqtReg_Qt_NotAligned      )
     
    211222    uint64_t                    uIrtaReg;
    212223    /** Currently active RTADDR_REG. */
    213     uint64_t                    uRtaReg;
     224    uint64_t                    uRtaddrReg;
    214225    /** @} */
    215226
     
    221232    uint8_t                     abPadding[7];
    222233    /** Copy of CAP_REG. */
    223     uint64_t                    fCap;
     234    uint64_t                    fCapReg;
    224235    /** Copy of ECAP_REG. */
    225     uint64_t                    fExtCap;
     236    uint64_t                    fExtCapReg;
    226237    /** @} */
    227238
     
    900911
    901912/**
    902  * Gets the table translation mode from the RTADDR_REG.
    903  *
    904  * @returns The table translation mode.
    905  * @param   pThis   The shared DMAR device state.
    906  */
    907 static uint8_t dmarRtAddrRegGetTtm(PCDMAR pThis)
    908 {
    909     uint64_t const uRtAddrReg = dmarRegRead64(pThis, VTD_MMIO_OFF_RTADDR_REG);
    910     return RT_BF_GET(uRtAddrReg, VTD_BF_RTADDR_REG_TTM);
    911 }
    912 
    913 
    914 /**
    915913 * Checks if the invalidation-queue is empty.
    916914 *
     
    10041002static void dmarFaultEventRaiseInterrupt(PPDMDEVINS pDevIns)
    10051003{
    1006     PDMAR    pThis   = PDMDEVINS_2_DATA(pDevIns, PDMAR);
    1007     PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
     1004    PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
    10081005#ifdef RT_STRICT
    10091006    {
     
    10271024
    10281025        /** @todo Assert Msi.Addr is in the MSR_IA32_APICBASE_ADDR range and ensure on
    1029          *        FEADD_REG write it can't be anything else. */
     1026         *        FEADD_REG write it can't be anything else? */
     1027        PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
    10301028        pThisCC->CTX_SUFF(pIommuHlp)->pfnSendMsi(pDevIns, &Msi, 0 /* uTagSrc */);
    10311029
     
    10511049static void dmarR3InvEventRaiseInterrupt(PPDMDEVINS pDevIns)
    10521050{
    1053     PDMAR    pThis   = PDMDEVINS_2_DATA(pDevIns, PDMAR);
    1054     PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
    1055 
     1051    PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
    10561052    uint32_t const uIcsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_ICS_REG);
    10571053    if (uIcsReg & VTD_BF_ICS_REG_IWC_MASK)
     
    10671063        Msi.Data.u32 = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_IEDATA_REG);
    10681064
     1065        PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
    10691066        pThisCC->CTX_SUFF(pIommuHlp)->pfnSendMsi(pDevIns, &Msi, 0 /* uTagSrc */);
    10701067
     
    11611158{
    11621159    PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
    1163     uint32_t const uGstsReg = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
    1164     uint32_t const fChanged = uGstsReg ^ uGcmdReg;
    1165     uint64_t const fExtCap  = pThis->fExtCap;
     1160    uint32_t const uGstsReg   = dmarRegReadRaw32(pThis, VTD_MMIO_OFF_GSTS_REG);
     1161    uint32_t const fChanged   = uGstsReg ^ uGcmdReg;
     1162    uint64_t const fExtCapReg = pThis->fExtCapReg;
    11661163
    11671164    /*
    11681165     * Queued-invalidation.
    11691166     */
    1170     if (   (fExtCap & VTD_BF_ECAP_REG_QI_MASK)
     1167    if (   (fExtCapReg & VTD_BF_ECAP_REG_QI_MASK)
    11711168        && (fChanged & VTD_BF_GCMD_REG_QIE_MASK))
    11721169    {
     
    11861183
    11871184    /*
    1188      * Set interrupt remapping table pointer.
     1185     * Set Interrupt Remapping Table Pointer (SIRTP).
    11891186     */
    1190     if (   (fExtCap & VTD_BF_ECAP_REG_IR_MASK)
     1187    if (   (fExtCapReg & VTD_BF_ECAP_REG_IR_MASK)
    11911188        && (uGcmdReg & VTD_BF_GCMD_REG_SIRTP_MASK))
    11921189    {
    11931190        pThis->uIrtaReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IRTA_REG);
    11941191        dmarRegChangeRaw32(pThis, VTD_MMIO_OFF_GSTS_REG, UINT32_MAX /* fAndMask */, VTD_BF_GSTS_REG_IRTPS_MASK /* fOrMask */);
     1192    }
     1193
     1194    /*
     1195     * Set Root Table Pointer (SRTP).
     1196     */
     1197    if (uGcmdReg & VTD_BF_GCMD_REG_SRTP_MASK)
     1198    {
     1199        /** @todo Perform global invalidation of all remapping translation caches. */
     1200#if 0
     1201        if (pThis->fCapReg & VTD_BF_CAP_REG_ESRTPS_MASK)
     1202        {
     1203        }
     1204#endif
     1205        pThis->uRtaddrReg = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_RTADDR_REG);
    11951206    }
    11961207
     
    12281239                {
    12291240                    /* Verify table translation mode is legacy. */
    1230                     uint8_t const fTtm = dmarRtAddrRegGetTtm(pThis);
     1241                    uint8_t const  fTtm = RT_BF_GET(pThis->uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
    12311242                    if (fTtm == VTD_TTM_LEGACY_MODE)
    12321243                    {
     
    13041315    if (fDw == VTD_IQA_REG_DW_256_BIT)
    13051316    {
    1306         bool const fSupports256BitDw = (pThis->fExtCap & (VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK));
     1317        bool const fSupports256BitDw = (pThis->fExtCapReg & (VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK));
    13071318        if (fSupports256BitDw)
    13081319        { /* likely */ }
    13091320        else
    1310             dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_Invalid, kIqei_InvalidDescriptorWidth);
    1311     }
     1321            dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_256_Invalid, kIqei_InvalidDescriptorWidth);
     1322    }
     1323    /* else: 128-bit descriptor width is validated lazily, see explanation in dmarR3InvQueueProcessRequests. */
     1324
    13121325    return VINF_SUCCESS;
    13131326}
     
    14831496 * @param   fDw         The descriptor width (VTD_IQA_REG_DW_128_BIT or
    14841497 *                      VTD_IQA_REG_DW_256_BIT).
    1485  */
    1486 static void dmarR3InvQueueProcessRequests(PPDMDEVINS pDevIns, void const *pvRequests, uint32_t cbRequests, uint8_t fDw)
    1487 {
     1498 * @param   fTtm        The current table translation mode. Must not be
     1499 *                      VTD_TTM_RSVD.
     1500 */
     1501static void dmarR3InvQueueProcessRequests(PPDMDEVINS pDevIns, void const *pvRequests, uint32_t cbRequests, uint8_t fDw,
     1502                                          uint8_t fTtm)
     1503{
     1504#define DMAR_IQE_FAULT_RECORD_RET(a_enmDiag, a_enmIqei) \
     1505    do \
     1506    { \
     1507        DMAR_LOCK(pDevIns, pThisR3); \
     1508        dmarIqeFaultRecord(pDevIns, (a_enmDiag), (a_enmIqei)); \
     1509        DMAR_UNLOCK(pDevIns, pThisR3); \
     1510        return; \
     1511    } while (0)
     1512
    14881513    PCDMAR   pThis   = PDMDEVINS_2_DATA(pDevIns, PDMAR);
    14891514    PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
    1490     DMAR_ASSERT_LOCK_IS_OWNER(pDevIns, pThisR3);
    1491 
     1515
     1516    DMAR_ASSERT_LOCK_IS_NOT_OWNER(pDevIns, pThisR3);
     1517    Assert(fTtm != VTD_TTM_RSVD);       /* Should've beeen handled by caller. */
     1518
     1519    /*
     1520     * The below check is redundant since we check both TTM and DW for each
     1521     * descriptor type we process. However, the error reported by hardware
     1522     * may differ hence this is kept commented out but not removed from the code
     1523     * if we need to change this in the future.
     1524     *
     1525     * In our implementation, we would report the descriptor type as invalid,
     1526     * while on real hardware it may report descriptor width as invalid.
     1527     * The Intel VT-d spec. is not clear which error takes preceedence.
     1528     */
     1529#if 0
     1530    /*
     1531     * Verify that 128-bit descriptors are not used when operating in scalable mode.
     1532     * We don't check this while software writes IQA_REG but defer it until now because
     1533     * RTADDR_REG can be updated lazily (via GCMD_REG.SRTP). The 256-bit descriptor check
     1534     * -IS- performed when software writes IQA_REG since it only requires checking against
     1535     * immutable hardware features.
     1536     */
     1537    if (   fTtm != VTD_TTM_SCALABLE_MODE
     1538        || fDw != VTD_IQA_REG_DW_128_BIT)
     1539    { /* likely */ }
     1540    else
     1541        DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_IqaReg_Dw_128_Invalid, kIqei_InvalidDescriptorWidth);
     1542#endif
     1543
     1544    /*
     1545     * Process requests in FIFO order.
     1546     */
    14921547    uint8_t const cbDsc = fDw == VTD_IQA_REG_DW_256_BIT ? 32 : 16;
    14931548    for (uint32_t offDsc = 0; offDsc < cbRequests; offDsc += cbDsc)
     
    14971552        uint64_t const  uQword1     = puDscQwords[1];
    14981553        uint8_t const   fDscType    = VTD_GENERIC_INV_DSC_GET_TYPE(uQword0);
    1499         uint8_t const   fTtm        = dmarRtAddrRegGetTtm(pThis);
    1500         Assert(fTtm != VTD_TTM_RSVD);       /* Should be guaranteed when software updates GCMD_REG.SRTP. */
    1501 
    15021554        switch (fDscType)
    15031555        {
     
    15091561            case VTD_INV_WAIT_DSC_TYPE:
    15101562            {
    1511                 /* Validate translation modes valid for this descriptor. */
     1563                /* Validate descriptor type. */
    15121564                if (   fTtm == VTD_TTM_LEGACY_MODE
    15131565                    || fDw == VTD_IQA_REG_DW_256_BIT)
    15141566                { /* likely */  }
    15151567                else
    1516                 {
    1517                     dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Inv_Wait_Dsc_Ttm, kIqei_InvalidDescriptorType);
    1518                     return;
    1519                 }
     1568                    DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_Invalid, kIqei_InvalidDescriptorType);
    15201569
    15211570                /* Validate reserved bits. */
    1522                 uint64_t const fValidMask0 = !(pThis->fExtCap & VTD_BF_ECAP_REG_PDS_MASK)
     1571                uint64_t const fValidMask0 = !(pThis->fExtCapReg & VTD_BF_ECAP_REG_PDS_MASK)
    15231572                                           ? VTD_INV_WAIT_DSC_0_VALID_MASK & ~VTD_BF_0_INV_WAIT_DSC_PD_MASK
    15241573                                           : VTD_INV_WAIT_DSC_0_VALID_MASK;
     
    15271576                { /* likely */ }
    15281577                else
    1529                 {
    1530                     dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Inv_Wait_Dsc_0_1_Rsvd, kIqei_RsvdFieldViolation);
    1531                     return;
    1532                 }
     1578                    DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_0_1_Rsvd, kIqei_RsvdFieldViolation);
     1579
    15331580                if (fDw == VTD_IQA_REG_DW_256_BIT)
    15341581                {
    1535                     uint64_t const uQword2 = puDscQwords[2];
    1536                     uint64_t const uQword3 = puDscQwords[3];
    1537                     if (   !uQword2
    1538                         && !uQword3)
     1582                    if (   !puDscQwords[2]
     1583                        && !puDscQwords[3])
    15391584                    { /* likely */ }
    15401585                    else
    1541                     {
    1542                         dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Inv_Wait_Dsc_2_3_Rsvd, kIqei_RsvdFieldViolation);
    1543                         return;
    1544                     }
     1586                        DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Inv_Wait_Dsc_2_3_Rsvd, kIqei_RsvdFieldViolation);
    15451587                }
    15461588
     
    15511593                    uint32_t const uStatus      = RT_BF_GET(uQword0, VTD_BF_0_INV_WAIT_DSC_STDATA);
    15521594                    RTGCPHYS const GCPhysStatus = uQword1 & VTD_BF_1_INV_WAIT_DSC_STADDR_MASK;
    1553                     DMAR_UNLOCK(pDevIns, pThisR3);
    15541595                    int const rc = PDMDevHlpPhysWrite(pDevIns, GCPhysStatus, (void const*)&uStatus, sizeof(uStatus));
    1555                     DMAR_LOCK(pDevIns, pThisR3);
    15561596                    AssertRC(rc);
    15571597                }
     
    15601600                bool const fIf = RT_BF_GET(uQword0, VTD_BF_0_INV_WAIT_DSC_IF);
    15611601                if (fIf)
     1602                {
     1603                    DMAR_LOCK(pDevIns, pThisR3);
    15621604                    dmarR3InvEventRaiseInterrupt(pDevIns);
     1605                    DMAR_UNLOCK(pDevIns, pThisR3);
     1606                }
    15631607                break;
    15641608            }
     
    15751619                /* Stop processing further requests. */
    15761620                LogFunc(("Invalid descriptor type: %#x\n", fDscType));
    1577                 dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Dsc_Type_Invalid, kIqei_InvalidDescriptorType);
    1578                 return;
     1621                DMAR_IQE_FAULT_RECORD_RET(kDmarDiag_Iqei_Dsc_Type_Invalid, kIqei_InvalidDescriptorType);
    15791622            }
    15801623        }
    15811624    }
    1582 }
    1583 
     1625#undef DMAR_IQE_FAULT_RECORD_RET
     1626}
    15841627
    15851628
     
    16121655    AssertPtrReturn(pvRequests, VERR_NO_MEMORY);
    16131656
     1657    PDMAR    pThis   = PDMDEVINS_2_DATA(pDevIns, PDMAR);
     1658    PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
     1659
    16141660    while (pThread->enmState == PDMTHREADSTATE_RUNNING)
    16151661    {
    1616         PDMAR    pThis   = PDMDEVINS_2_DATA(pDevIns, PDMAR);
    1617         PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
    1618 
    16191662        /*
    16201663         * Sleep until we are woken up.
     
    16381681            if (!fIsEmpty)
    16391682            {
    1640                 /** @todo Handle RTADDR_REG MMIO write first, for handling kIqei_InvalidTtm. I
    1641                  *        don't think it needs to be checked/handled here? */
    1642 
    16431683                /*
    1644                  * Get the current queue size.
     1684                 * Get the current queue size, descriptor width, queue base address and the
     1685                 * table translation mode while the lock is still held.
    16451686                 */
    1646                 uint64_t const uIqaReg     = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQA_REG);
    1647                 uint8_t const  cQueuePages = 1 << (uIqaReg & VTD_BF_IQA_REG_QS_MASK);
    1648                 uint32_t const cbQueue     = cQueuePages << X86_PAGE_SHIFT;
    1649                 uint8_t const  fDw         = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
     1687                uint64_t const uIqaReg        = dmarRegReadRaw64(pThis, VTD_MMIO_OFF_IQA_REG);
     1688                uint8_t const  cQueuePages    = 1 << (uIqaReg & VTD_BF_IQA_REG_QS_MASK);
     1689                uint32_t const cbQueue        = cQueuePages << X86_PAGE_SHIFT;
     1690                uint8_t const  fDw            = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
     1691                uint8_t const  fTtm           = RT_BF_GET(pThis->uRtaddrReg, VTD_BF_RTADDR_REG_TTM);
     1692                RTGCPHYS const GCPhysRequests = (uIqaReg & VTD_BF_IQA_REG_IQA_MASK) + offQueueHead;
    16501693
    16511694                /* Paranoia. */
    1652                 Assert(cbQueue <= cbMaxQs);
     1695                Assert(cbQueue <= cbMaxQs); NOREF(cbMaxQs);
    16531696                Assert(!(offQueueTail & ~VTD_IQT_REG_RW_MASK));
    16541697                Assert(!(offQueueHead & ~VTD_IQH_REG_RW_MASK));
     
    16581701
    16591702                /*
    1660                  * Read the requests in the queue from guest memory into our buffer.
     1703                 * A table translation mode of "reserved" isn't valid for any descriptor type.
     1704                 * However, RTADDR_REG can be modified in parallel to invalidation-queue processing,
     1705                 * but if ESRTPS is support, we will perform a global invalidation when software
     1706                 * changes RTADDR_REG, or it's the responsibility of software to do it explicitly.
     1707                 * So caching TTM while reading all descriptors should not be a problem.
     1708                 *
     1709                 * Also, validate the queue tail offset as it's mutable by software.
    16611710                 */
    1662                 if (offQueueTail < cbQueue)
     1711                if (   fTtm != VTD_TTM_RSVD
     1712                    && offQueueTail < cbQueue)
    16631713                {
    1664                     RTGCPHYS const GCPhysRequests = (uIqaReg & VTD_BF_IQA_REG_IQA_MASK) + offQueueHead;
    1665 
    1666                     /* Don't hold the lock while reading (potentially large amount of) requests. */
     1714                    /* Don't hold the lock while reading (a potentially large amount of) requests */
    16671715                    DMAR_UNLOCK(pDevIns, pThisR3);
    16681716
     
    16991747                        dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_IQH_REG, offQueueTail);
    17001748
    1701                         /* Process all requests (in FIFO order). */
     1749                        /* Don't hold the lock while processing requests. */
     1750                        DMAR_UNLOCK(pDevIns, pThisR3);
     1751
     1752                        /* Process all requests. */
    17021753                        Assert(cbRequests <= cbQueue);
    1703                         dmarR3InvQueueProcessRequests(pDevIns, pvRequests, cbRequests, fDw);
     1754                        dmarR3InvQueueProcessRequests(pDevIns, pvRequests, cbRequests, fDw, fTtm);
     1755
     1756                        /*
     1757                         * We've processed all requests and the lock shouldn't be held at this point.
     1758                         * Instead of re-acquiring the lock just to release it again and go back to
     1759                         * the thread loop using 'continue' here. It's a bit ugly but it certainly
     1760                         * helps with performance.
     1761                         */
     1762                        DMAR_ASSERT_LOCK_IS_NOT_OWNER(pDevIns, pThisR3);
     1763                        continue;
    17041764                    }
    17051765                    else
     
    17071767                }
    17081768                else
    1709                     dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_Invalid, kIqei_InvalidTailPointer);
     1769                {
     1770                    if (fTtm != VTD_TTM_RSVD)
     1771                        dmarIqeFaultRecord(pDevIns, kDmarDiag_Iqei_Ttm_Rsvd, kIqei_InvalidTtm);
     1772                    else
     1773                    {
     1774                        Assert(offQueueTail < cbQueue);
     1775                        dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_Invalid, kIqei_InvalidTailPointer);
     1776                    }
     1777                }
    17101778            }
    17111779        }
     
    20522120        uint8_t const uSagaw  = vtdCapRegGetSagaw(uMgaw);       /* Supported adjust guest address width. */
    20532121        uint16_t const offFro = DMAR_MMIO_OFF_FRCD_LO_REG >> 4; /* MMIO offset of FRCD registers. */
    2054 
    2055         pThis->fCap = RT_BF_MAKE(VTD_BF_CAP_REG_ND,      fNd)
    2056                     | RT_BF_MAKE(VTD_BF_CAP_REG_AFL,     0)     /* Advanced fault logging not supported. */
    2057                     | RT_BF_MAKE(VTD_BF_CAP_REG_RWBF,    0)     /* Software need not flush write-buffers. */
    2058                     | RT_BF_MAKE(VTD_BF_CAP_REG_PLMR,    0)     /* Protected Low-Memory Region not supported. */
    2059                     | RT_BF_MAKE(VTD_BF_CAP_REG_PHMR,    0)     /* Protected High-Memory Region not supported. */
    2060                     | RT_BF_MAKE(VTD_BF_CAP_REG_CM,      1)     /** @todo Figure out if required when we impl. caching. */
    2061                     | RT_BF_MAKE(VTD_BF_CAP_REG_SAGAW,   fSlts & uSagaw)
    2062                     | RT_BF_MAKE(VTD_BF_CAP_REG_MGAW,    uMgaw)
    2063                     | RT_BF_MAKE(VTD_BF_CAP_REG_ZLR,     1)     /** @todo Figure out if/how to support zero-length reads. */
    2064                     | RT_BF_MAKE(VTD_BF_CAP_REG_FRO,     offFro)
    2065                     | RT_BF_MAKE(VTD_BF_CAP_REG_SLLPS,   fSlts & fSllps)
    2066                     | RT_BF_MAKE(VTD_BF_CAP_REG_PSI,     fPsi)
    2067                     | RT_BF_MAKE(VTD_BF_CAP_REG_NFR,     DMAR_FRCD_REG_COUNT - 1)
    2068                     | RT_BF_MAKE(VTD_BF_CAP_REG_MAMV,    fPsi & fMamv)
    2069                     | RT_BF_MAKE(VTD_BF_CAP_REG_DWD,     1)
    2070                     | RT_BF_MAKE(VTD_BF_CAP_REG_DRD,     1)
    2071                     | RT_BF_MAKE(VTD_BF_CAP_REG_FL1GP,   fFlts & fFl1gp)
    2072                     | RT_BF_MAKE(VTD_BF_CAP_REG_PI,      0)     /* Posted Interrupts not supported. */
    2073                     | RT_BF_MAKE(VTD_BF_CAP_REG_FL5LP,   fFlts & fFl5lp)
    2074                     | RT_BF_MAKE(VTD_BF_CAP_REG_ESIRTPS, 0)     /* If we invalidate interrupt cache on SIRTP flow. */
    2075                     | RT_BF_MAKE(VTD_BF_CAP_REG_ESRTPS,  0);    /* If we invalidate translation cache on SRTP flow. */
    2076         dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_CAP_REG, pThis->fCap);
     2122        uint8_t const fEsrtps = 1;                              /* Enhanced SRTPS (flush all caches on SRTP flow). */
     2123
     2124        pThis->fCapReg = RT_BF_MAKE(VTD_BF_CAP_REG_ND,      fNd)
     2125                       | RT_BF_MAKE(VTD_BF_CAP_REG_AFL,     0)     /* Advanced fault logging not supported. */
     2126                       | RT_BF_MAKE(VTD_BF_CAP_REG_RWBF,    0)     /* Software need not flush write-buffers. */
     2127                       | RT_BF_MAKE(VTD_BF_CAP_REG_PLMR,    0)     /* Protected Low-Memory Region not supported. */
     2128                       | RT_BF_MAKE(VTD_BF_CAP_REG_PHMR,    0)     /* Protected High-Memory Region not supported. */
     2129                       | RT_BF_MAKE(VTD_BF_CAP_REG_CM,      1)     /** @todo Figure out if required when we impl. caching. */
     2130                       | RT_BF_MAKE(VTD_BF_CAP_REG_SAGAW,   fSlts & uSagaw)
     2131                       | RT_BF_MAKE(VTD_BF_CAP_REG_MGAW,    uMgaw)
     2132                       | RT_BF_MAKE(VTD_BF_CAP_REG_ZLR,     1)     /** @todo Figure out if/how to support zero-length reads. */
     2133                       | RT_BF_MAKE(VTD_BF_CAP_REG_FRO,     offFro)
     2134                       | RT_BF_MAKE(VTD_BF_CAP_REG_SLLPS,   fSlts & fSllps)
     2135                       | RT_BF_MAKE(VTD_BF_CAP_REG_PSI,     fPsi)
     2136                       | RT_BF_MAKE(VTD_BF_CAP_REG_NFR,     DMAR_FRCD_REG_COUNT - 1)
     2137                       | RT_BF_MAKE(VTD_BF_CAP_REG_MAMV,    fPsi & fMamv)
     2138                       | RT_BF_MAKE(VTD_BF_CAP_REG_DWD,     1)
     2139                       | RT_BF_MAKE(VTD_BF_CAP_REG_DRD,     1)
     2140                       | RT_BF_MAKE(VTD_BF_CAP_REG_FL1GP,   fFlts & fFl1gp)
     2141                       | RT_BF_MAKE(VTD_BF_CAP_REG_PI,      0)     /* Posted Interrupts not supported. */
     2142                       | RT_BF_MAKE(VTD_BF_CAP_REG_FL5LP,   fFlts & fFl5lp)
     2143                       | RT_BF_MAKE(VTD_BF_CAP_REG_ESIRTPS, 0)     /* If we invalidate interrupt cache on SIRTP flow. */
     2144                       | RT_BF_MAKE(VTD_BF_CAP_REG_ESRTPS,  fEsrtps);
     2145        dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_CAP_REG, pThis->fCapReg);
    20772146    }
    20782147
     
    20872156        uint8_t const  fAdms  = 1;                              /* Abort DMA mode support. */
    20882157
    2089         pThis->fExtCap = RT_BF_MAKE(VTD_BF_ECAP_REG_C,      0)  /* Accesses don't snoop CPU cache. */
    2090                        | RT_BF_MAKE(VTD_BF_ECAP_REG_QI,     fQi)
    2091                        | RT_BF_MAKE(VTD_BF_ECAP_REG_DT,     0)  /* Device-TLBs not supported. */
    2092                        | RT_BF_MAKE(VTD_BF_ECAP_REG_IR,     fQi & fIr)
    2093                        | RT_BF_MAKE(VTD_BF_ECAP_REG_EIM,    fIr & fEim)
    2094                        | RT_BF_MAKE(VTD_BF_ECAP_REG_PT,     fPt)
    2095                        | RT_BF_MAKE(VTD_BF_ECAP_REG_SC,     0)  /* Snoop control not supported. */
    2096                        | RT_BF_MAKE(VTD_BF_ECAP_REG_IRO,    offIro)
    2097                        | RT_BF_MAKE(VTD_BF_ECAP_REG_MHMV,   fIr & fMhmv)
    2098                        | RT_BF_MAKE(VTD_BF_ECAP_REG_MTS,    0)  /* Memory type not supported. */
    2099                        | RT_BF_MAKE(VTD_BF_ECAP_REG_NEST,   fNest)
    2100                        | RT_BF_MAKE(VTD_BF_ECAP_REG_PRS,    0)  /* 0 as DT not supported. */
    2101                        | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS,    0)  /* Execute request not supported. */
    2102                        | RT_BF_MAKE(VTD_BF_ECAP_REG_SRS,    fSmts & fSrs)
    2103                        | RT_BF_MAKE(VTD_BF_ECAP_REG_NWFS,   0)  /* 0 as DT not supported. */
    2104                        | RT_BF_MAKE(VTD_BF_ECAP_REG_EAFS,   0)  /** @todo figure out if EAFS is required? */
    2105                        | RT_BF_MAKE(VTD_BF_ECAP_REG_PSS,    0)  /* 0 as PASID not supported. */
    2106                        | RT_BF_MAKE(VTD_BF_ECAP_REG_PASID,  0)  /* PASID support. */
    2107                        | RT_BF_MAKE(VTD_BF_ECAP_REG_DIT,    0)  /* 0 as DT not supported. */
    2108                        | RT_BF_MAKE(VTD_BF_ECAP_REG_PDS,    0)  /* 0 as DT not supported. */
    2109                        | RT_BF_MAKE(VTD_BF_ECAP_REG_SMTS,   fSmts)
    2110                        | RT_BF_MAKE(VTD_BF_ECAP_REG_VCS,    0)  /* 0 as PASID not supported (commands seem PASID specific). */
    2111                        | RT_BF_MAKE(VTD_BF_ECAP_REG_SLADS,  0)  /* Second-level accessed/dirty not supported. */
    2112                        | RT_BF_MAKE(VTD_BF_ECAP_REG_SLTS,   fSlts)
    2113                        | RT_BF_MAKE(VTD_BF_ECAP_REG_FLTS,   fFlts)
    2114                        | RT_BF_MAKE(VTD_BF_ECAP_REG_SMPWCS, 0)  /* 0 as PASID not supported. */
    2115                        | RT_BF_MAKE(VTD_BF_ECAP_REG_RPS,    0)  /* We don't support RID_PASID field in SM context entry. */
    2116                        | RT_BF_MAKE(VTD_BF_ECAP_REG_ADMS,   fAdms)
    2117                        | RT_BF_MAKE(VTD_BF_ECAP_REG_RPRIVS, 0); /** @todo figure out if we should/can support this? */
    2118         dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_ECAP_REG, pThis->fExtCap);
     2158        pThis->fExtCapReg = RT_BF_MAKE(VTD_BF_ECAP_REG_C,      0)  /* Accesses don't snoop CPU cache. */
     2159                          | RT_BF_MAKE(VTD_BF_ECAP_REG_QI,     fQi)
     2160                          | RT_BF_MAKE(VTD_BF_ECAP_REG_DT,     0)  /* Device-TLBs not supported. */
     2161                          | RT_BF_MAKE(VTD_BF_ECAP_REG_IR,     fQi & fIr)
     2162                          | RT_BF_MAKE(VTD_BF_ECAP_REG_EIM,    fIr & fEim)
     2163                          | RT_BF_MAKE(VTD_BF_ECAP_REG_PT,     fPt)
     2164                          | RT_BF_MAKE(VTD_BF_ECAP_REG_SC,     0)  /* Snoop control not supported. */
     2165                          | RT_BF_MAKE(VTD_BF_ECAP_REG_IRO,    offIro)
     2166                          | RT_BF_MAKE(VTD_BF_ECAP_REG_MHMV,   fIr & fMhmv)
     2167                          | RT_BF_MAKE(VTD_BF_ECAP_REG_MTS,    0)  /* Memory type not supported. */
     2168                          | RT_BF_MAKE(VTD_BF_ECAP_REG_NEST,   fNest)
     2169                          | RT_BF_MAKE(VTD_BF_ECAP_REG_PRS,    0)  /* 0 as DT not supported. */
     2170                          | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS,    0)  /* Execute request not supported. */
     2171                          | RT_BF_MAKE(VTD_BF_ECAP_REG_SRS,    fSmts & fSrs)
     2172                          | RT_BF_MAKE(VTD_BF_ECAP_REG_NWFS,   0)  /* 0 as DT not supported. */
     2173                          | RT_BF_MAKE(VTD_BF_ECAP_REG_EAFS,   0)  /** @todo figure out if EAFS is required? */
     2174                          | RT_BF_MAKE(VTD_BF_ECAP_REG_PSS,    0)  /* 0 as PASID not supported. */
     2175                          | RT_BF_MAKE(VTD_BF_ECAP_REG_PASID,  0)  /* PASID support. */
     2176                          | RT_BF_MAKE(VTD_BF_ECAP_REG_DIT,    0)  /* 0 as DT not supported. */
     2177                          | RT_BF_MAKE(VTD_BF_ECAP_REG_PDS,    0)  /* 0 as DT not supported. */
     2178                          | RT_BF_MAKE(VTD_BF_ECAP_REG_SMTS,   fSmts)
     2179                          | RT_BF_MAKE(VTD_BF_ECAP_REG_VCS,    0)  /* 0 as PASID not supported (commands seem PASID specific). */
     2180                          | RT_BF_MAKE(VTD_BF_ECAP_REG_SLADS,  0)  /* Second-level accessed/dirty not supported. */
     2181                          | RT_BF_MAKE(VTD_BF_ECAP_REG_SLTS,   fSlts)
     2182                          | RT_BF_MAKE(VTD_BF_ECAP_REG_FLTS,   fFlts)
     2183                          | RT_BF_MAKE(VTD_BF_ECAP_REG_SMPWCS, 0)  /* 0 as PASID not supported. */
     2184                          | RT_BF_MAKE(VTD_BF_ECAP_REG_RPS,    0)  /* We don't support RID_PASID field in SM context entry. */
     2185                          | RT_BF_MAKE(VTD_BF_ECAP_REG_ADMS,   fAdms)
     2186                          | RT_BF_MAKE(VTD_BF_ECAP_REG_RPRIVS, 0); /** @todo figure out if we should/can support this? */
     2187        dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_ECAP_REG, pThis->fExtCapReg);
    21192188    }
    21202189
     
    21352204
    21362205#ifdef VBOX_STRICT
    2137     Assert(!RT_BF_GET(pThis->fExtCap, VTD_BF_ECAP_REG_PRS));    /* PECTL_REG - Reserved if don't support PRS. */
    2138     Assert(!RT_BF_GET(pThis->fExtCap, VTD_BF_ECAP_REG_MTS));    /* MTRRCAP_REG - Reserved if we don't support MTS. */
     2206    Assert(!RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_PRS));    /* PECTL_REG - Reserved if don't support PRS. */
     2207    Assert(!RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_MTS));    /* MTRRCAP_REG - Reserved if we don't support MTS. */
    21392208#endif
    21402209}
     
    23202389     */
    23212390    uint32_t const uVerReg         = pThis->uVerReg;
    2322     uint8_t const  cMaxGstAddrBits = RT_BF_GET(pThis->fCap, VTD_BF_CAP_REG_MGAW) + 1;
    2323     uint8_t const  cSupGstAddrBits = vtdCapRegGetSagawBits(RT_BF_GET(pThis->fCap, VTD_BF_CAP_REG_SAGAW));
    2324     uint16_t const offFrcd         = RT_BF_GET(pThis->fCap, VTD_BF_CAP_REG_FRO);
    2325     uint16_t const offIva          = RT_BF_GET(pThis->fExtCap, VTD_BF_ECAP_REG_IRO);
     2391    uint8_t const  cMaxGstAddrBits = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_MGAW) + 1;
     2392    uint8_t const  cSupGstAddrBits = vtdCapRegGetSagawBits(RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_SAGAW));
     2393    uint16_t const offFrcd         = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_FRO);
     2394    uint16_t const offIva          = RT_BF_GET(pThis->fExtCapReg, VTD_BF_ECAP_REG_IRO);
    23262395    LogRel(("%s: VER=%u.%u CAP=%#RX64 ECAP=%#RX64 (MGAW=%u bits, SAGAW=%u bits, FRO=%#x, IRO=%#x) mapped at %#RGp\n",
    23272396            DMAR_LOG_PFX, RT_BF_GET(uVerReg, VTD_BF_VER_REG_MAX), RT_BF_GET(uVerReg, VTD_BF_VER_REG_MIN),
    2328             pThis->fCap, pThis->fExtCap, cMaxGstAddrBits, cSupGstAddrBits, offFrcd, offIva, DMAR_MMIO_BASE_PHYSADDR));
     2397            pThis->fCapReg, pThis->fExtCapReg, cMaxGstAddrBits, cSupGstAddrBits, offFrcd, offIva, DMAR_MMIO_BASE_PHYSADDR));
    23292398
    23302399    return VINF_SUCCESS;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette