VirtualBox

Changeset 90028 in vbox for trunk/src


Ignore:
Timestamp:
Jul 5, 2021 2:25:35 PM (4 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
145521
Message:

AMD IOMMU: bugref:9654 Fixed address translations resulting in non-contiguous physical regions.
Fixed IOMMU from trying to remap MSIs generated by itself (e.g, IOPF).
Fixed destroying the IOTLB cache properly on VM reset.
Fixed some missing conditions in I/O page walk (reserved bits checks).
Slightly faster IOVA skipped bits checking in I/O page walk.
Added a couple of extra statistics to track non-contiguous translations and usage of non-standard page sizes.

Location:
trunk/src/VBox
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp

    r89554 r90028  
    4848/** Enable the IOTLBE cache only in ring-3 for now, see @bugref{9654#c95}. */
    4949#ifdef IN_RING3
    50 # define IOMMU_WITH_IOTLBE_CACHE
     50//# define IOMMU_WITH_IOTLBE_CACHE      /* Disabled for now, see @bugref{9654#c107}. */
    5151#endif
    5252/** Enable the interrupt cache. */
     
    219219# define IOMMU_UNLOCK(a_pDevIns, a_pThisCC)         (a_pThisCC)->CTX_SUFF(pIommuHlp)->pfnUnlock((a_pDevIns))
    220220
     221/** Gets the maximum valid IOVA for the given I/O page-table level. */
     222#define IOMMU_GET_MAX_VALID_IOVA(a_Level)           ((X86_PAGE_4K_SIZE << ((a_Level) * 9)) - 1)
     223
    221224
    222225/*********************************************************************************************************************************
     
    353356    bool volatile               fCmdThreadSignaled;
    354357    /** Padding. */
    355     bool                        afPadding0[7];
     358    bool                        afPadding0[3];
     359    /** The IOMMU PCI address. */
     360    PCIBDF                      uPciAddress;
    356361
    357362#ifdef IOMMU_WITH_DTE_CACHE
     
    536541    STAMCOUNTER                 StatIntrCacheHit;          /**< Number of interrupt cache hits. */
    537542    STAMCOUNTER                 StatIntrCacheMiss;         /**< Number of interrupt cache misses. */
     543
     544    STAMCOUNTER                 StatNonStdPageSize;        /**< Number of non-standard page size translations. */
     545    STAMCOUNTER                 StatIopfs;                 /**< Number of I/O page faults. */
    538546    /** @} */
    539547#endif
     
    691699typedef DECLCALLBACKTYPE(int, FNIOPAGELOOKUP,(PPDMDEVINS pDevIns, uint64_t uIovaPage, uint8_t fPerm, PCIOMMUOPAUX pAux,
    692700                                              PIOPAGELOOKUP pPageLookup));
    693 typedef FNIOPAGELOOKUP      *PFNIOPAGELOOKUP;
     701typedef FNIOPAGELOOKUP *PFNIOPAGELOOKUP;
    694702
    695703
     
    803811static bool iommuAmdLookupIsAccessContig(PCIOPAGELOOKUP pPageLookupPrev, PCIOPAGELOOKUP pPageLookup)
    804812{
    805     Assert(pPageLookupPrev->fPerm  == pPageLookup->fPerm);
    806813    size_t const   cbPrev      = RT_BIT_64(pPageLookupPrev->cShift);
    807814    RTGCPHYS const GCPhysPrev  = pPageLookupPrev->GCPhysSpa;
    808815    RTGCPHYS const GCPhys      = pPageLookup->GCPhysSpa;
    809 #ifdef RT_STRICT
     816
    810817    /* Paranoia: Ensure offset bits are 0. */
    811     {
    812         uint64_t const fOffMaskPrev = X86_GET_PAGE_OFFSET_MASK(pPageLookupPrev->cShift);
    813         uint64_t const fOffMask     = X86_GET_PAGE_OFFSET_MASK(pPageLookup->cShift);
    814         Assert(!(GCPhysPrev & fOffMaskPrev));
    815         Assert(!(GCPhys     & fOffMask));
    816     }
    817 #endif
     818    Assert(!(GCPhysPrev & X86_GET_PAGE_OFFSET_MASK(pPageLookupPrev->cShift)));
     819    Assert(!(GCPhys     & X86_GET_PAGE_OFFSET_MASK(pPageLookup->cShift)));
     820
     821    /* Paranoia: Ensure permissions are identical. */
     822    Assert(pPageLookupPrev->fPerm  == pPageLookup->fPerm);
     823
    818824    return GCPhysPrev + cbPrev == GCPhys;
    819825}
    820826
    821827
     828#ifdef IOMMU_WITH_DTE_CACHE
    822829/**
    823830 * Gets the basic I/O device flags for the given device table entry.
     
    860867    return fFlags;
    861868}
     869#endif
    862870
    863871
     
    10811089    AssertPtr(pArgs->pIommuR3);
    10821090    AssertPtr(pArgs->pHlp);
    1083     //Assert(pArgs->pIommuCC->u32Magic == IOMMU_MAGIC);
     1091    //Assert(pArgs->pIommuR3->u32Magic == IOMMU_MAGIC);
    10841092
    10851093    uint16_t const idDomain = IOMMU_IOTLB_KEY_GET_DOMAIN_ID(pNode->Key);
     
    11371145
    11381146/**
     1147 * Destroys an IOTLB entry that's in the tree.
     1148 *
     1149 * @returns VINF_SUCCESS.
     1150 * @param   pNode       Pointer to an IOTLBE.
     1151 * @param   pvUser      Opaque data. Currently not used, will be NULL.
     1152 */
     1153static DECLCALLBACK(int) iommuAmdIotlbEntryDestroy(PAVLU64NODECORE pNode, void *pvUser)
     1154{
     1155    RT_NOREF(pvUser);
     1156    PIOTLBE pIotlbe = (PIOTLBE)pNode;
     1157    Assert(pIotlbe);
     1158    pIotlbe->NdLru.pNext = NULL;
     1159    pIotlbe->NdLru.pPrev = NULL;
     1160    RT_ZERO(pIotlbe->PageLookup);
     1161    pIotlbe->fEvictPending = false;
     1162    return VINF_SUCCESS;
     1163}
     1164
     1165
     1166/**
    11391167 * Inserts an IOTLB entry into the cache.
    11401168 *
     
    13061334    if (pThisR3->cCachedIotlbes > 0)
    13071335    {
    1308         size_t const cbIotlbes = sizeof(IOTLBE) * IOMMU_IOTLBE_MAX;
    1309         RT_BZERO(pThisR3->paIotlbes, cbIotlbes);
     1336        RTAvlU64Destroy(&pThisR3->TreeIotlbe, iommuAmdIotlbEntryDestroy, NULL /* pvParam */);
     1337        RTListInit(&pThisR3->LstLruIotlbe);
    13101338        pThisR3->idxUnusedIotlbe = 0;
    13111339        pThisR3->cCachedIotlbes  = 0;
    13121340        STAM_COUNTER_RESET(&pThis->StatIotlbeCached);
    1313         RTListInit(&pThisR3->LstLruIotlbe);
    13141341    }
    13151342
     
    14091436    size_t cPages = cbIova / X86_PAGE_4K_SIZE;
    14101437    cPages = RT_MIN(cPages, IOMMU_IOTLBE_MAX);
     1438
     1439    Assert((cbIova % X86_PAGE_4K_SIZE) == 0);
     1440    Assert(cPages > 0);
    14111441
    14121442    IOMMU_CACHE_LOCK(pDevIns, pThis);
     
    15561586    int rc = VINF_SUCCESS;
    15571587    PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
     1588    Assert(idDevice != pThis->uPciAddress);
    15581589    IOMMU_CACHE_LOCK(pDevIns, pThis);
    15591590
     
    24522483    pThis->EvtLogHeadPtr.au32[0] = offBuf;
    24532484
    2454     LogFlowFunc(("Set EvtLogHeadPtr to %#RX32\n", offBuf));
     2485    Log4Func(("Set EvtLogHeadPtr to %#RX32\n", offBuf));
    24552486    return VINF_SUCCESS;
    24562487}
     
    24922523    pThis->EvtLogTailPtr.au32[0] = offBuf;
    24932524
    2494     LogFlowFunc(("Set EvtLogTailPtr to %#RX32\n", offBuf));
     2525    Log4Func(("Set EvtLogTailPtr to %#RX32\n", offBuf));
    24952526    return VINF_SUCCESS;
    24962527}
     
    33183349    AssertCompile(sizeof(EVT_GENERIC_T) == sizeof(EVT_IO_PAGE_FAULT_T));
    33193350    PCEVT_GENERIC_T pEvent = (PCEVT_GENERIC_T)pEvtIoPageFault;
     3351    PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
     3352    STAM_COUNTER_INC(&pThis->StatIopfs);
    33203353
    33213354#ifdef IOMMU_WITH_DTE_CACHE
     
    36453678                                   IOMMUOP enmOp, PIOPAGELOOKUP pPageLookup)
    36463679{
     3680    PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
    36473681    Assert(pDte->n.u1Valid);
    36483682    Assert(!(uIova & X86_PAGE_4K_OFFSET_MASK));
     
    36503684    /* The virtual address bits indexing table. */
    36513685    static uint8_t const  s_acIovaLevelShifts[] = { 0, 12, 21, 30, 39, 48, 57, 0 };
    3652     static uint64_t const s_auIovaLevelMasks[]  = { UINT64_C(0x0000000000000000),
    3653                                                     UINT64_C(0x00000000001ff000),
    3654                                                     UINT64_C(0x000000003fe00000),
    3655                                                     UINT64_C(0x0000007fc0000000),
    3656                                                     UINT64_C(0x0000ff8000000000),
    3657                                                     UINT64_C(0x01ff000000000000),
    3658                                                     UINT64_C(0xfe00000000000000),
    3659                                                     UINT64_C(0x0000000000000000) };
    3660     AssertCompile(RT_ELEMENTS(s_acIovaLevelShifts) == RT_ELEMENTS(s_auIovaLevelMasks));
    36613686    AssertCompile(RT_ELEMENTS(s_acIovaLevelShifts) > IOMMU_MAX_HOST_PT_LEVEL);
    36623687
    3663     /* Traverse the I/O page table starting with the page directory in the DTE. */
     3688    /*
     3689     * Traverse the I/O page table starting with the page directory in the DTE.
     3690     *
     3691     * The Valid (Present bit), Translation Valid and Mode (Next-Level bits) in
     3692     * the DTE have been validated already, see iommuAmdPreTranslateChecks.
     3693     */
    36643694    IOPTENTITY_T PtEntity;
    36653695    PtEntity.u64 = pDte->au64[0];
    36663696    for (;;)
    36673697    {
    3668         /* Figure out the system physical address of the page table at the current level. */
    36693698        uint8_t const uLevel = PtEntity.n.u3NextLevel;
    36703699
     
    36763705            uint64_t const offPte         = idxPte << 3;
    36773706            RTGCPHYS const GCPhysPtEntity = (PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK) + offPte;
    3678             int rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhysPtEntity, &PtEntity.u64, sizeof(PtEntity));
     3707            int rc = PDMDevHlpPhysRead(pDevIns, GCPhysPtEntity, &PtEntity.u64, sizeof(PtEntity));
    36793708            if (RT_FAILURE(rc))
    36803709            {
     
    37013730        }
    37023731
     3732        /* Validate the encoding of the next level. */
     3733        uint8_t const uNextLevel = PtEntity.n.u3NextLevel;
     3734#if IOMMU_MAX_HOST_PT_LEVEL < 6
     3735        if (uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL)
     3736        { /* likely */ }
     3737        else
     3738        {
     3739            LogFunc(("Next-level/paging-mode field of the paging entity invalid. uNextLevel=%#x -> IOPF\n", uNextLevel));
     3740            EVT_IO_PAGE_FAULT_T EvtIoPageFault;
     3741            iommuAmdIoPageFaultEventInit(idDevice, pDte->n.u16DomainId, uIova, true /* fPresent */, true /* fRsvdNotZero */,
     3742                                         false /* fPermDenied */, enmOp, &EvtIoPageFault);
     3743            iommuAmdIoPageFaultEventRaiseWithDte(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
     3744                                                 kIoPageFaultType_PteInvalidLvlEncoding);
     3745            return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
     3746        }
     3747#endif
     3748
     3749        /* Check reserved bits. */
     3750        uint64_t const fRsvdMask = uNextLevel == 0 || uNextLevel == 7 ? IOMMU_PTE_RSVD_MASK : IOMMU_PDE_RSVD_MASK;
     3751        if (!(PtEntity.u64 & fRsvdMask))
     3752        { /* likely */ }
     3753        else
     3754        {
     3755            LogFunc(("Page table entity (%#RX64 level=%u) reserved bits set -> IOPF\n", PtEntity.u64, uNextLevel));
     3756            EVT_IO_PAGE_FAULT_T EvtIoPageFault;
     3757            iommuAmdIoPageFaultEventInit(idDevice, pDte->n.u16DomainId, uIova, true /* fPresent */, true /* fRsvdNotZero */,
     3758                                         false /* fPermDenied */, enmOp, &EvtIoPageFault);
     3759            iommuAmdIoPageFaultEventRaiseWithDte(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
     3760                                                 kIoPageFaultType_PteRsvdNotZero);
     3761            return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
     3762        }
     3763
    37033764        /* Check permission bits. */
    3704         uint8_t const fPtePerm  = (PtEntity.u64 >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
     3765        uint8_t const fPtePerm = (PtEntity.u64 >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK;
    37053766        if ((fPerm & fPtePerm) == fPerm)
    37063767        { /* likely */ }
     
    37163777        }
    37173778
    3718         /* If this is a PTE, we're at the final level and we're done. */
    3719         uint8_t const uNextLevel = PtEntity.n.u3NextLevel;
     3779        /* If the next level is 0 or 7, this is the final level PTE. */
    37203780        if (uNextLevel == 0)
    37213781        {
    3722             /* The page size of the translation is the default (4K). */
    3723             pPageLookup->GCPhysSpa = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK;
    3724             pPageLookup->cShift    = X86_PAGE_4K_SHIFT;
     3782            /* The page size of the translation is the default size for the level. */
     3783            uint8_t const  cShift    = s_acIovaLevelShifts[uLevel];
     3784            RTGCPHYS const GCPhysPte = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK;
     3785            pPageLookup->GCPhysSpa = GCPhysPte & X86_GET_PAGE_BASE_MASK(cShift);
     3786            pPageLookup->cShift    = cShift;
    37253787            pPageLookup->fPerm     = fPtePerm;
    37263788            return VINF_SUCCESS;
     
    37293791        {
    37303792            /* The default page size of the translation is overridden. */
     3793            uint8_t        cShift    = X86_PAGE_4K_SHIFT;
    37313794            RTGCPHYS const GCPhysPte = PtEntity.u64 & IOMMU_PTENTITY_ADDR_MASK;
    3732             uint8_t        cShift    = X86_PAGE_4K_SHIFT;
    37333795            while (GCPhysPte & RT_BIT_64(cShift++))
    37343796                ;
    37353797
    37363798            /* The page size must be larger than the default size and lower than the default size of the higher level. */
    3737             Assert(uLevel < IOMMU_MAX_HOST_PT_LEVEL);   /* PTE at level 6 handled outside the loop, uLevel should be <= 5. */
    37383799            if (   cShift > s_acIovaLevelShifts[uLevel]
    37393800                && cShift < s_acIovaLevelShifts[uLevel + 1])
    37403801            {
    3741                 pPageLookup->GCPhysSpa = GCPhysPte;
     3802                pPageLookup->GCPhysSpa = GCPhysPte & X86_GET_PAGE_BASE_MASK(cShift);
    37423803                pPageLookup->cShift    = cShift;
    37433804                pPageLookup->fPerm     = fPtePerm;
     3805                STAM_COUNTER_INC(&pThis->StatNonStdPageSize);
    37443806                return VINF_SUCCESS;
    37453807            }
    37463808
    3747             LogFunc(("Page size invalid cShift=%#x -> IOPF\n", cShift));
     3809            LogFunc(("Page size invalid cShift=%u -> IOPF\n", cShift));
    37483810            EVT_IO_PAGE_FAULT_T EvtIoPageFault;
    37493811            iommuAmdIoPageFaultEventInit(idDevice, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
     
    37533815            return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
    37543816        }
    3755 
    3756         /* Validate the next level encoding of the PDE. */
    3757 #if IOMMU_MAX_HOST_PT_LEVEL < 6
    3758         if (uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL)
    3759         { /* likely */ }
    3760         else
    3761         {
    3762             LogFunc(("Next level of PDE invalid uNextLevel=%#x -> IOPF\n", uNextLevel));
    3763             EVT_IO_PAGE_FAULT_T EvtIoPageFault;
    3764             iommuAmdIoPageFaultEventInit(idDevice, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
    3765                                          false /* fPermDenied */, enmOp, &EvtIoPageFault);
    3766             iommuAmdIoPageFaultEventRaiseWithDte(pDevIns, pDte, NULL /* pIrte */, enmOp, &EvtIoPageFault,
    3767                                                  kIoPageFaultType_PteInvalidLvlEncoding);
    3768             return VERR_IOMMU_ADDR_TRANSLATION_FAILED;
    3769         }
    3770 #else
    3771         Assert(uNextLevel <= IOMMU_MAX_HOST_PT_LEVEL);
    3772 #endif
    37733817
    37743818        /* Validate level transition. */
     
    37863830        }
    37873831
    3788         /* Ensure IOVA bits of skipped levels are zero. */
    3789         Assert(uLevel > 0);
    3790         uint64_t uIovaSkipMask = 0;
    3791         for (unsigned idxLevel = uLevel - 1; idxLevel > uNextLevel; idxLevel--)
    3792             uIovaSkipMask |= s_auIovaLevelMasks[idxLevel];
    3793         if (!(uIova & uIovaSkipMask))
     3832        /* Ensure IOVA bits of skipped levels (if any) are zero. */
     3833        uint64_t const fIovaSkipMask = IOMMU_GET_MAX_VALID_IOVA(uLevel - 1) - IOMMU_GET_MAX_VALID_IOVA(uNextLevel);
     3834        if (!(uIova & fIovaSkipMask))
    37943835        { /* likely */ }
    37953836        else
    37963837        {
    3797             LogFunc(("IOVA of skipped levels are not zero %#RX64 (SkipMask=%#RX64) -> IOPF\n", uIova, uIovaSkipMask));
     3838            LogFunc(("IOVA of skipped levels are not zero. uIova=%#RX64 fSkipMask=%#RX64 -> IOPF\n", uIova, fIovaSkipMask));
    37983839            EVT_IO_PAGE_FAULT_T EvtIoPageFault;
    37993840            iommuAmdIoPageFaultEventInit(idDevice, pDte->n.u16DomainId, uIova, true /* fPresent */, false /* fRsvdNotZero */,
     
    38043845        }
    38053846
    3806         /* Continue with traversing the page directory at this level. */
     3847        /* Traverse to the next level. */
    38073848    }
    38083849}
     
    38863927                uint64_t const offMask = X86_GET_PAGE_OFFSET_MASK(PageLookup.cShift);
    38873928                uint64_t const offSpa  = uIova & offMask;
    3888                 Assert(!(PageLookup.GCPhysSpa & offMask));
     3929                AssertMsg(!(PageLookup.GCPhysSpa & offMask), ("GCPhysSpa=%#RX64 offMask=%#RX64\n",
     3930                                                              PageLookup.GCPhysSpa, offMask));
    38893931                GCPhysSpa = PageLookup.GCPhysSpa | offSpa;
    38903932            }
     
    39083950                cbRemaining -= (cbPage - offIova);  /* Calculate how much more we need to access. */
    39093951                uIovaPage   += cbPage;              /* Update address of the next access. */
    3910                 offIova      = 0;                   /* After first page, all pages are accessed from off 0. */
     3952                offIova      = 0;                   /* After first page, remaining pages are accessed from offset 0. */
    39113953            }
    39123954            else
     
    39253967    pAddrOut->fPerm = PageLookupPrev.fPerm;     /* Update the allowed permissions for this access. */
    39263968    if (pcbPages)
    3927         *pcbPages = cbPages;                    /* Update the size of the pages accessed. */
     3969        *pcbPages = cbPages;                    /* Update the size (in bytes) of the pages accessed. */
    39283970    return rc;
    39293971}
     
    47494791    PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
    47504792
     4793    /* If this MSI was generated by the IOMMU itself, it's not subject to remapping, see @bugref{9654#c104}. */
     4794    if (idDevice == pThis->uPciAddress)
     4795        return VERR_IOMMU_CANNOT_CALL_SELF;
     4796
    47514797    /* Interrupts are forwarded with remapping when the IOMMU is disabled. */
    47524798    IOMMU_CTRL_T const Ctrl = iommuAmdGetCtrlUnlocked(pThis);
     
    51935239{
    51945240    RT_NOREF(pThread);
    5195     LogFlowFunc(("\n"));
     5241    Log4Func(("\n"));
    51965242    PCIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
    51975243    return PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEvtCmdThread);
     
    67876833{
    67886834    PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
    6789     RT_NOREF(pCfg);
    6790 
    6791     PIOMMU   pThis   = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
    6792     PIOMMUR3 pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUR3);
     6835
     6836    PIOMMU        pThis   = PDMDEVINS_2_DATA(pDevIns, PIOMMU);
     6837    PIOMMUR3      pThisR3 = PDMDEVINS_2_DATA_CC(pDevIns, PIOMMUR3);
     6838    PCPDMDEVHLPR3 pHlp    = pDevIns->pHlpR3;
     6839
    67936840    pThis->u32Magic = IOMMU_MAGIC;
    67946841    pThisR3->pDevInsR3 = pDevIns;
    67956842
    67966843    LogFlowFunc(("iInstance=%d\n", iInstance));
     6844
     6845    /*
     6846     * Validate and read the configuration.
     6847     */
     6848    PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns, "PCIAddress", "");
     6849    int rc = pHlp->pfnCFGMQueryU32Def(pCfg, "PCIAddress", &pThis->uPciAddress, NIL_PCIBDF);
     6850    if (RT_FAILURE(rc))
     6851        return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed to query 32-bit integer \"PCIAddress\""));
     6852    if (!PCIBDF_IS_VALID(pThis->uPciAddress))
     6853        return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed \"PCIAddress\" of the AMD IOMMU cannot be invalid"));
    67976854
    67986855    /*
     
    68066863    IommuReg.pfnMsiRemap      = iommuAmdMsiRemap;
    68076864    IommuReg.u32TheEnd        = PDM_IOMMUREGCC_VERSION;
    6808     int rc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisR3->CTX_SUFF(pIommuHlp), &pThis->idxIommu);
     6865    rc = PDMDevHlpIommuRegister(pDevIns, &IommuReg, &pThisR3->CTX_SUFF(pIommuHlp), &pThis->idxIommu);
    68096866    if (RT_FAILURE(rc))
    68106867        return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to register ourselves as an IOMMU device"));
     
    70257082    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntrCacheHit, STAMTYPE_COUNTER, "Interrupt/CacheHit", STAMUNIT_OCCURENCES, "Number of cache hits.");
    70267083    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntrCacheMiss, STAMTYPE_COUNTER, "Interrupt/CacheMiss", STAMUNIT_OCCURENCES, "Number of cache misses.");
     7084
     7085    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatNonStdPageSize, STAMTYPE_COUNTER, "MemAccess/NonStdPageSize", STAMUNIT_OCCURENCES, "Number of non-standard page size translations.");
     7086    PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIopfs, STAMTYPE_COUNTER, "MemAccess/IOPFs", STAMUNIT_OCCURENCES, "Number of I/O page faults.");
    70277087# endif
    70287088
  • trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp

    r89953 r90028  
    16031603                InsertConfigNode(pInst,    "Config", &pCfg);
    16041604                hrc = pBusMgr->assignPCIDevice("iommu-amd", pInst);                         H();
     1605
     1606                /* The AMD IOMMU device needs to know which PCI slot it's in, see @bugref{9654#c104}. */
     1607                {
     1608                    PCIBusAddress Address;
     1609                    if (pBusMgr->findPCIAddress("iommu-amd", 0, Address))
     1610                    {
     1611                        uint32_t const u32IommuAddress = (Address.miDevice << 16) | Address.miFn;
     1612                        InsertConfigInteger(pCfg, "PCIAddress", u32IommuAddress);
     1613                    }
     1614                    else
     1615                        return VMR3SetError(pUVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
     1616                                            N_("Failed to find PCI address of the assigned IOMMU device!"));
     1617                }
    16051618
    16061619                PCIBusAddress PCIAddr = PCIBusAddress((int32_t)uIoApicPciAddress);
  • trunk/src/VBox/VMM/VMMAll/PDMAllIommu.cpp

    r88638 r90028  
    133133            {
    134134                /** @todo Handle strict return codes from PGMPhysRead. */
    135                 rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysRead(pDevIns, GCPhysOut, pvBuf, cbRead, fFlags);
     135                rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysRead(pDevIns, GCPhysOut, pvBuf, cbContig, fFlags);
    136136                if (RT_SUCCESS(rc))
    137137                {
     
    201201            {
    202202                /** @todo Handle strict return codes from PGMPhysWrite. */
    203                 rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysWrite(pDevIns, GCPhysOut, pvBuf, cbWrite, fFlags);
     203                rc = pDevIns->CTX_SUFF(pHlp)->pfnPhysWrite(pDevIns, GCPhysOut, pvBuf, cbContig, fFlags);
    204204                if (RT_SUCCESS(rc))
    205205                {
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette