VirtualBox

Ignore:
Timestamp:
Sep 26, 2022 5:43:43 PM (2 years ago)
Author:
vboxsync
Message:

VMM/PGM: Nested VMX: bugref:10092 Nested EPT shadow page-pool handling.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r96407 r96879  
    6666static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
    6767                          PPGMPTWALKGST pGstWalk);
    68 static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
     68static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
     69                              PPGMPTWALKGST pGstWalk);
    6970static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
     71static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
     72                                   PPGMPTWALKGST pGstWalkAll);
    7073#endif
    7174static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
     
    7477
    7578#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    76 /* Guest - EPT SLAT is identical for all guest paging mode. */
    7779# define PGM_SLAT_TYPE               PGM_SLAT_TYPE_EPT
    78 # define PGM_GST_TYPE                PGM_TYPE_EPT
    79 # include "PGMGstDefs.h"
     80# include "PGMSlatDefs.h"
    8081# include "PGMAllGstSlatEpt.cpp.h"
    81 # undef PGM_GST_TYPE
     82# undef PGM_SLAT_TYPE
    8283#endif
    8384
     
    899900#undef PGMMODEDATABTH_NULL_ENTRY
    900901};
     902
     903
     904/**
     905 * Gets the CR3 mask corresponding to the given paging mode.
     906 *
     907 * @returns The CR3 mask.
     908 * @param   enmMode         The paging mode.
     909 * @param   enmSlatMode     The second-level address translation mode.
     910 */
     911DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode, PGMSLAT enmSlatMode)
     912{
     913    /** @todo This work can be optimized either by storing the masks in
     914     *        pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and
     915     *        store the result when entering guest mode since we currently use it only
     916     *        for enmGuestMode. */
     917    if (enmSlatMode == PGMSLAT_DIRECT)
     918    {
     919        Assert(enmMode != PGMMODE_EPT);
     920        switch (enmMode)
     921        {
     922            case PGMMODE_PAE:
     923            case PGMMODE_PAE_NX:
     924                return X86_CR3_PAE_PAGE_MASK;
     925            case PGMMODE_AMD64:
     926            case PGMMODE_AMD64_NX:
     927                return X86_CR3_AMD64_PAGE_MASK;
     928            default:
     929                return X86_CR3_PAGE_MASK;
     930        }
     931    }
     932    else
     933    {
     934        Assert(enmSlatMode == PGMSLAT_EPT);
     935        return X86_CR3_EPT_PAGE_MASK;
     936    }
     937}
     938
     939
     940/**
     941 * Gets the masked CR3 value according to the current guest paging mode.
     942 *
     943 * @returns The masked PGM CR3 value.
     944 * @param   pVCpu   The cross context virtual CPU structure.
     945 * @param   uCr3    The raw guest CR3 value.
     946 */
     947DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
     948{
     949    uint64_t const fCr3Mask  = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode, pVCpu->pgm.s.enmGuestSlatMode);
     950    RTGCPHYS       GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
     951    PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
     952    return GCPhysCR3;
     953}
    901954
    902955
     
    16701723
    16711724
     1725#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     1726/**
     1727 * Syncs the SHADOW nested-guest page directory pointer for the specified address.
     1728 * Allocates backing pages in case the PDPT or PML4 entry is missing.
     1729 *
     1730 * @returns VBox status code.
     1731 * @param   pVCpu           The cross context virtual CPU structure.
     1732 * @param   GCPhysNested    The nested-guest physical address.
     1733 * @param   ppPdpt          Where to store the PDPT. Optional, can be NULL.
     1734 * @param   ppPD            Where to store the PD. Optional, can be NULL.
     1735 * @param   pGstWalkAll     The guest walk info.
     1736 */
     1737static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
     1738                                   PPGMPTWALKGST pGstWalkAll)
     1739{
     1740    PVMCC    pVM   = pVCpu->CTX_SUFF(pVM);
     1741    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
     1742    int      rc;
     1743
     1744    PPGMPOOLPAGE pShwPage;
     1745    Assert(pVM->pgm.s.fNestedPaging);
     1746    Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT);
     1747    PGM_LOCK_ASSERT_OWNER(pVM);
     1748
     1749    /*
     1750     * PML4 level.
     1751     */
     1752    {
     1753        PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
     1754        Assert(pPml4);
     1755
     1756        /* Allocate page directory pointer table if not present. */
     1757        {
     1758            uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pml4e.u & pVCpu->pgm.s.fGstEptShadowedPml4eMask;
     1759            const unsigned iPml4e    = (GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
     1760            PEPTPML4E      pPml4e    = &pPml4->a[iPml4e];
     1761
     1762            if (!(pPml4e->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
     1763            {
     1764                RTGCPHYS const GCPhysPdpt = pGstWalkAll->u.Ept.Pml4e.u & EPT_PML4E_PG_MASK;
     1765                rc = pgmPoolAlloc(pVM, GCPhysPdpt, PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT, PGMPOOLACCESS_DONTCARE,
     1766                                  PGM_A20_IS_ENABLED(pVCpu), pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4e, false /*fLockPage*/,
     1767                                  &pShwPage);
     1768                AssertRCReturn(rc, rc);
     1769
     1770                /* Hook up the new PDPT now. */
     1771                ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
     1772            }
     1773            else
     1774            {
     1775                pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
     1776                AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
     1777
     1778                pgmPoolCacheUsed(pPool, pShwPage);
     1779
     1780                /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
     1781                if (pPml4e->u != (pShwPage->Core.Key | fShwFlags))
     1782                    ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
     1783            }
     1784            Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
     1785            Log7Func(("GstPml4e=%RX64 ShwPml4e=%RX64 iPml4e=%u\n", pGstWalkAll->u.Ept.Pml4e.u, pPml4e->u, iPml4e));
     1786        }
     1787    }
     1788
     1789    /*
     1790     * PDPT level.
     1791     */
     1792    {
     1793        AssertReturn(!(pGstWalkAll->u.Ept.Pdpte.u & EPT_E_LEAF), VERR_NOT_SUPPORTED); /* shadowing 1GB pages not supported yet. */
     1794
     1795        PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
     1796        if (ppPdpt)
     1797            *ppPdpt = pPdpt;
     1798
     1799        uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pdpte.u & pVCpu->pgm.s.fGstEptShadowedPdpteMask;
     1800        const unsigned iPdPte    = (GCPhysNested >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
     1801        PEPTPDPTE      pPdpte    = &pPdpt->a[iPdPte];
     1802
     1803        if (!(pPdpte->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
     1804        {
     1805            RTGCPHYS const GCPhysPd = pGstWalkAll->u.Ept.Pdpte.u & EPT_PDPTE_PG_MASK;
     1806            rc = pgmPoolAlloc(pVM, GCPhysPd, PGMPOOLKIND_EPT_PD_FOR_EPT_PD, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
     1807                              pShwPage->idx, iPdPte, false /*fLockPage*/, &pShwPage);
     1808            AssertRCReturn(rc, rc);
     1809
     1810            /* Hook up the new PD now. */
     1811            ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
     1812        }
     1813        else
     1814        {
     1815            pShwPage = pgmPoolGetPage(pPool, pPdpte->u & EPT_PDPTE_PG_MASK);
     1816            AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
     1817
     1818            pgmPoolCacheUsed(pPool, pShwPage);
     1819
     1820            /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
     1821            if (pPdpte->u != (pShwPage->Core.Key | fShwFlags))
     1822                ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
     1823        }
     1824        Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
     1825        Log7Func(("GstPdpte=%RX64 ShwPdpte=%RX64 iPdPte=%u \n", pGstWalkAll->u.Ept.Pdpte.u, pPdpte->u, iPdPte));
     1826
     1827        *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
     1828    }
     1829
     1830    return VINF_SUCCESS;
     1831}
     1832#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
     1833
     1834
    16721835#ifdef IN_RING0
    16731836/**
     
    17871950    uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
    17881951    AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
    1789     AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
     1952    AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
    17901953    return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
    17911954}
     
    21352298    Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
    21362299
    2137     RTGCPHYS    GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
     2300    RTGCPHYS    GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
    21382301    PPGMPAGE    pPage;
    21392302    int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
     
    21762339    PGM_LOCK_VOID(pVM);
    21772340
    2178     RTGCPHYS    GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
     2341    RTGCPHYS    GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
    21792342    PPGMPAGE    pPage;
    2180     /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
    2181      *        guest-physical address here. */
    21822343    int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
    21832344    if (RT_SUCCESS(rc))
     
    22722433    PGM_LOCK_VOID(pVM);
    22732434
    2274     RTGCPHYS    GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
     2435    RTGCPHYS    GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
    22752436    PPGMPAGE    pPage;
    22762437    int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
     
    23692530
    23702531
    2371 /**
    2372  * Gets the CR3 mask corresponding to the given paging mode.
    2373  *
    2374  * @returns The CR3 mask.
    2375  * @param   enmMode     The paging mode.
    2376  */
    2377 DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode)
    2378 {
    2379     /** @todo This work can be optimized either by storing the masks in
    2380      *        pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and
    2381      *        store the result when entering guest mode since we currently use it only
    2382      *        for enmGuestMode. */
    2383     switch (enmMode)
    2384     {
    2385         case PGMMODE_PAE:
    2386         case PGMMODE_PAE_NX:
    2387             return X86_CR3_PAE_PAGE_MASK;
    2388         case PGMMODE_AMD64:
    2389         case PGMMODE_AMD64_NX:
    2390             return X86_CR3_AMD64_PAGE_MASK;
    2391         case PGMMODE_EPT:
    2392             return X86_CR3_EPT_PAGE_MASK;
    2393         default:
    2394             return X86_CR3_PAGE_MASK;
    2395     }
    2396 }
    2397 
    2398 
    2399 /**
    2400  * Gets the masked CR3 value according to the current guest paging mode.
    2401  *
    2402  * @returns The masked PGM CR3 value.
    2403  * @param   pVCpu   The cross context virtual CPU structure.
    2404  * @param   uCr3    The raw guest CR3 value.
    2405  */
    2406 DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
    2407 {
    2408     uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode);
    2409     RTGCPHYS      GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
    2410     PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
    2411     return GCPhysCR3;
    2412 }
    2413 
    2414 
    24152532#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    24162533/**
     
    24292546static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
    24302547{
     2548# if 0
    24312549    if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
     2550# endif
    24322551    {
    24332552        PGMPTWALK    Walk;
     
    24492568    }
    24502569
     2570# if 0
    24512571    /*
    24522572     * If the nested-guest CR3 has not changed, then the previously
     
    24552575    *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
    24562576    return VINF_SUCCESS;
     2577# endif
    24572578}
    24582579#endif
     
    24952616        && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode))
    24962617    {
    2497         LogFlowFunc(("nested_cr3=%RX64 old=%RX64\n", GCPhysCR3, pVCpu->pgm.s.GCPhysNstGstCR3));
    24982618        RTGCPHYS GCPhysOut;
    24992619        int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
     
    26012721    if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
    26022722    {
    2603         LogFlowFunc(("nested_cr3=%RX64 old_nested_cr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysNstGstCR3));
    26042723        RTGCPHYS GCPhysOut;
    26052724        int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
     
    26102729            /* CR3 SLAT translation failed but we try to pretend it
    26112730               succeeded for the reasons mentioned in PGMHCChangeMode(). */
    2612             AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
     2731            Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
    26132732            int const rc2 = pgmGstUnmapCr3(pVCpu);
    26142733            pVCpu->pgm.s.GCPhysCR3       = NIL_RTGCPHYS;
    26152734            pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
     2735            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
    26162736            return rc2;
    26172737        }
     
    27172837                   succeeded for the reasons mentioned in PGMHCChangeMode(). */
    27182838                AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2));
    2719                 rc2 = pgmGstUnmapCr3(pVCpu);
    27202839                pVCpu->pgm.s.GCPhysCR3       = NIL_RTGCPHYS;
    27212840                pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
     
    28112930         * PDPE entries. Here we assume the caller has already validated or doesn't require
    28122931         * validation of the PDPEs.
     2932         *
     2933         * In the case of nested EPT (i.e. for nested-guests), the PAE PDPEs have been
     2934         * validated by the VMX transition.
    28132935         *
    28142936         * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
     
    28442966    }
    28452967
     2968    /*
     2969     * Update CPUM with the PAE PDPEs.
     2970     */
     2971    CPUMSetGuestPaePdpes(pVCpu, paPaePdpes);
    28462972    return VINF_SUCCESS;
    28472973}
     
    28682994
    28692995#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2870     if (CPUMIsGuestVmxEptPaePagingEnabled(pVCpu))
     2996    if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
    28712997    {
    28722998        RTGCPHYS GCPhysOut;
     
    29083034
    29093035            /*
    2910              * Update CPUM.
    2911              * We do this prior to mapping the PDPEs to keep the order consistent
    2912              * with what's used in HM. In practice, it doesn't really matter.
    2913              */
    2914             CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
    2915 
    2916             /*
    2917              * Map the PDPEs.
     3036             * Map the PDPEs and update CPUM.
    29183037             */
    29193038            rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
     
    33203439
    33213440    /*
     3441     * Determine SLAT mode -before- entering the new shadow mode!
     3442     */
     3443    pVCpu->pgm.s.enmGuestSlatMode = !CPUMIsGuestVmxEptPagingEnabled(pVCpu) ? PGMSLAT_DIRECT : PGMSLAT_EPT;
     3444
     3445    /*
    33223446     * Enter new shadow mode (if changed).
    33233447     */
     
    33803504     *   - Indicate that the CR3 is nested-guest physical address.
    33813505     */
    3382     if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
     3506    if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
    33833507    {
    33843508        if (PGMMODE_WITH_PAGING(enmGuestMode))
     
    34053529                 * See Intel spec. 27.2.1 "EPT Overview".
    34063530                 */
    3407                 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
     3531                Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
    34083532
    34093533                /* Trying to coax PGM to succeed for the time being... */
    34103534                Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
    34113535                pVCpu->pgm.s.GCPhysNstGstCR3  = GCPhysCR3;
    3412                 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
    34133536                pVCpu->pgm.s.enmGuestMode     = enmGuestMode;
    34143537                HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
     
    34163539            }
    34173540            pVCpu->pgm.s.GCPhysNstGstCR3  = GCPhysCR3;
    3418             GCPhysCR3 = Walk.GCPhys;
    3419         }
    3420         pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
     3541            GCPhysCR3 = Walk.GCPhys & X86_CR3_EPT_PAGE_MASK;
     3542        }
    34213543    }
    34223544    else
    3423     {
    34243545        Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
    3425         pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
    3426     }
    34273546#endif
    34283547
     
    39534072    PGM_LOCK_VOID(pVM);
    39544073    pVCpu->pgm.s.uEptPtr = uEptPtr;
     4074    pVCpu->pgm.s.pGstEptPml4R3 = 0;
     4075    pVCpu->pgm.s.pGstEptPml4R0 = 0;
    39554076    PGM_UNLOCK(pVM);
    39564077}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette