VirtualBox

Changeset 88669 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Apr 23, 2021 7:14:48 AM (4 years ago)
Author:
vboxsync
Message:

Intel IOMMU: bugref:9967 WIP.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp

    r88627 r88669  
    4848         AssertReturn((a_cb) == 4 || (a_cb) == 8, VINF_IOM_MMIO_UNUSED_FF); \
    4949         AssertReturn(!((a_off) & ((a_cb) - 1)), VINF_IOM_MMIO_UNUSED_FF); \
    50     } while (0);
     50    } while (0)
    5151
    5252/** Checks whether the MMIO offset is valid. */
     
    6363    } while (0)
    6464
     65/** Acquires the DMAR lock and is not expected to fail. */
     66#define DMAR_LOCK(a_pDevIns, a_pThisCC) \
     67    do { \
     68        int const rcLock = (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLock((a_pDevIns), VERR_IGNORED); \
     69        Assert(rcLock == VINF_SUCCESS); \
     70    } while (0)
     71
    6572/** Release the DMAR lock. */
    6673#define DMAR_UNLOCK(a_pDevIns, a_pThisCC)           (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnUnlock(a_pDevIns)
    67 
    68 /** Checks whether the calling thread is the owner of the DMAR lock. */
    69 #define DMAR_LOCK_IS_OWNER(a_pDevIns, a_pThisCC)    (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnLockIsOwner(a_pDevIns)
    7074
    7175/** Asserts that the calling thread owns the DMAR lock. */
     
    10281032 * @returns Strict VBox status code.
    10291033 * @param   pDevIns     The IOMMU device instance.
    1030  * @param   off         The MMIO register offset.
    1031  * @param   cb          The size of the MMIO access (in bytes).
     1034 * @param   offReg      The MMIO register offset.
     1035 * @param   cbReg       The size of the MMIO access (in bytes).
    10321036 * @param   uCcmdReg    The value written to CCMD_REG.
    10331037 */
    1034 static VBOXSTRICTRC dmarCcmdRegWrite(PPDMDEVINS pDevIns, uint16_t off, uint8_t cb, uint64_t uCcmdReg)
     1038static VBOXSTRICTRC dmarCcmdRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint8_t cbReg, uint64_t uCcmdReg)
    10351039{
    10361040    /* At present, we only care about responding to high 32-bits writes, low 32-bits are data. */
    1037     if (off + cb > VTD_MMIO_OFF_CCMD_REG + 4)
     1041    if (offReg + cbReg > VTD_MMIO_OFF_CCMD_REG + 4)
    10381042    {
    10391043        /* Check if we need to invalidate the context-context. */
     
    10691073 * @returns Strict VBox status code.
    10701074 * @param   pDevIns     The IOMMU device instance.
    1071  * @param   off         The MMIO register offset.
     1075 * @param   offReg      The MMIO register offset.
    10721076 * @param   uIqtReg     The value written to IQT_REG.
    10731077 */
    1074 static VBOXSTRICTRC dmarIqtRegWrite(PPDMDEVINS pDevIns, uint16_t off, uint64_t uIqtReg)
     1078static VBOXSTRICTRC dmarIqtRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint64_t uIqtReg)
    10751079{
    10761080    /* We only care about the low 32-bits, high 32-bits are reserved. */
    1077     if (off == VTD_MMIO_OFF_IQT_REG)
     1081    Assert(offReg == VTD_MMIO_OFF_IQT_REG);
     1082    PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
     1083
     1084    uint32_t const offQt   = VTD_IQT_REG_GET_QT(uIqtReg);
     1085    uint64_t const uIqaReg = dmarRegRead64(pThis, VTD_MMIO_OFF_IQA_REG);
     1086    uint8_t const  fDw     = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
     1087
     1088    /* If the descriptor width is 256-bits, the queue tail offset must be aligned accordingly. */
     1089    if (   fDw != VTD_IQA_REG_DW_256_BIT
     1090        || !(offQt & RT_BIT(4)))
     1091        dmarInvQueueThreadWakeUpIfNeeded(pDevIns);
     1092    else
    10781093    {
    1079         PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
    1080 
    1081         /* Verify if the queue tail offset is aligned according to the descriptor width. */
    1082         uint32_t const offQueueTail = VTD_IQT_REG_GET_QT(uIqtReg);
    1083         uint64_t const uIqaReg      = dmarRegRead64(pThis, VTD_MMIO_OFF_IQA_REG);
    1084         uint8_t const  fDw          = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
    1085         if (   fDw != VTD_IQA_REG_DW_256_BIT
    1086             || !(offQueueTail & 0x1f))
    1087             dmarInvQueueThreadWakeUpIfNeeded(pDevIns);
    1088         else
    1089             dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_NotAligned, kQueueTailNotAligned);
     1094        /* Hardware treats bit 4 as RsvdZ here, so clear it. */
     1095        dmarRegChange32(pThis, offReg, ~RT_BIT(4) /* fAndMask*/ , 0 /* fOrMask */);
     1096        dmarIqeFaultRecord(pDevIns, kDmarDiag_IqtReg_Qt_NotAligned, kQueueTailNotAligned);
    10901097    }
    10911098    return VINF_SUCCESS;
     
    10981105 * @returns Strict VBox status code.
    10991106 * @param   pDevIns     The IOMMU device instance.
    1100  * @param   off         The MMIO register offset.
     1107 * @param   offReg      The MMIO register offset.
    11011108 * @param   uIqaReg     The value written to IQA_REG.
    11021109 */
    1103 static VBOXSTRICTRC dmarIqaRegWrite(PPDMDEVINS pDevIns, uint16_t off, uint64_t uIqaReg)
    1104 {
    1105     /** @todo Don't allow writing this when GSTS.QIES is set? */
    1106 
     1110static VBOXSTRICTRC dmarIqaRegWrite(PPDMDEVINS pDevIns, uint16_t offReg, uint64_t uIqaReg)
     1111{
    11071112    /* At present, we only care about the low 32-bits, high 32-bits are data. */
    1108     if (off == VTD_MMIO_OFF_IQA_REG)
     1113    Assert(offReg == VTD_MMIO_OFF_IQA_REG);
     1114
     1115    /** @todo What happens if IQA_REG is written when dmarInvQueueCanProcessRequests
     1116     *        returns true? The Intel VT-d spec. doesn't state anywhere that it
     1117     *        cannot happen or that it's ignored when it does happen. */
     1118
     1119    PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
     1120    uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
     1121    if (fDw == VTD_IQA_REG_DW_256_BIT)
    11091122    {
    1110         PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
    1111         uint8_t const fDw = RT_BF_GET(uIqaReg, VTD_BF_IQA_REG_DW);
    1112         if (fDw == VTD_IQA_REG_DW_256_BIT)
    1113         {
    1114             bool const fSupports256BitDw = RT_BOOL(pThis->fExtCap & (VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK));
    1115             if (fSupports256BitDw)
    1116             { /* likely */ }
    1117             else
    1118                 dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_Invalid, kInvalidDescriptorWidth);
    1119         }
     1123        bool const fSupports256BitDw = (pThis->fExtCap & (VTD_BF_ECAP_REG_SMTS_MASK | VTD_BF_ECAP_REG_ADMS_MASK));
     1124        if (fSupports256BitDw)
     1125        { /* likely */ }
     1126        else
     1127            dmarIqeFaultRecord(pDevIns, kDmarDiag_IqaReg_Dw_Invalid, kInvalidDescriptorWidth);
    11201128    }
    11211129    return VINF_SUCCESS;
     
    12011209    if (DMAR_IS_MMIO_OFF_VALID(offLast))
    12021210    {
     1211        PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
     1212        DMAR_LOCK_RET(pDevIns, pThisCC, VINF_IOM_R3_MMIO_WRITE);
     1213
    12031214        uint64_t const uRegWritten = cb == 8 ? dmarRegWrite64(pThis, offReg, *(uint64_t *)pv)
    12041215                                             : dmarRegWrite32(pThis, offReg, *(uint32_t *)pv);
     
    12141225
    12151226            case VTD_MMIO_OFF_IQT_REG:
    1216             case VTD_MMIO_OFF_IQT_REG + 4:
     1227            /*   VTD_MMIO_OFF_IQT_REG + 4: (RsvdZ) */
    12171228            {
    12181229                rcStrict = dmarIqtRegWrite(pDevIns, offReg, uRegWritten);
     
    12211232
    12221233            case VTD_MMIO_OFF_IQA_REG:
    1223             case VTD_MMIO_OFF_IQA_REG + 4:
     1234            /*   VTD_MMIO_OFF_IQA_REG + 4: (Data) */
    12241235            {
    12251236                rcStrict = dmarIqaRegWrite(pDevIns, offReg, uRegWritten);
     
    12281239        }
    12291240
     1241        DMAR_UNLOCK(pDevIns, pThisCC);
    12301242        LogFlowFunc(("offReg=%#x rc=%Rrc\n", offReg, VBOXSTRICTRC_VAL(rcStrict)));
    12311243        return rcStrict;
     
    12511263    if (DMAR_IS_MMIO_OFF_VALID(offLast))
    12521264    {
     1265        PCDMARCC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARCC);
     1266        DMAR_LOCK_RET(pDevIns, pThisCC, VINF_IOM_R3_MMIO_READ);
     1267
    12531268        if (cb == 8)
    12541269        {
     
    12621277        }
    12631278
     1279        DMAR_UNLOCK(pDevIns, pThisCC);
    12641280        return VINF_SUCCESS;
    12651281    }
     
    13071323         */
    13081324        DMAR_LOCK_RET(pDevIns, pThisR3, VERR_IGNORED);
     1325        /** @todo use dmarInvQueueCanProcessRequests instead? */
    13091326        uint32_t const uGstsReg = dmarRegRead32(pThis, VTD_MMIO_OFF_GSTS_REG);
    13101327        DMAR_UNLOCK(pDevIns, pThisR3);
     
    15041521    LogFlowFunc(("\n"));
    15051522
     1523    PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
     1524    DMAR_LOCK(pDevIns, pThisR3);
     1525
    15061526    dmarR3RegsInit(pDevIns);
     1527
     1528    DMAR_UNLOCK(pDevIns, pThisR3);
    15071529}
    15081530
     
    15131535static DECLCALLBACK(int) iommuIntelR3Destruct(PPDMDEVINS pDevIns)
    15141536{
    1515     PDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PDMAR);
     1537    PDMAR    pThis   = PDMDEVINS_2_DATA(pDevIns, PDMAR);
     1538    PCDMARR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PCDMARR3);
    15161539    LogFlowFunc(("\n"));
     1540
     1541    DMAR_LOCK_RET(pDevIns, pThisR3, VERR_IGNORED);
    15171542
    15181543    if (pThis->hEvtInvQueue != NIL_SUPSEMEVENT)
     
    15221547    }
    15231548
     1549    DMAR_UNLOCK(pDevIns, pThisR3);
    15241550    return VINF_SUCCESS;
    15251551}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette