VirtualBox

Changeset 91904 in vbox


Ignore:
Timestamp:
Oct 20, 2021 4:54:47 PM (3 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
147709
Message:

VMM: Nested VMX: bugref:10092 EPT guest paging mode boiler plate and some ifdef'd disabled extras.

Location:
trunk
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/Config.kmk

    r91775 r91904  
    460460 endif
    461461 # Enables guest EPT support for VMX nested hardware virtualization.
    462  VBOX_WITH_NESTED_HWVIRT_VMX_EPT = 1
     462 #VBOX_WITH_NESTED_HWVIRT_VMX_EPT = 1
    463463endif
    464464# Enable native NEM on windows.
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r91854 r91904  
    489489#endif /* VBOX_WITH_64_BITS_GUESTS */
    490490
     491#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     492/* Guest - EPT mode */
     493# define PGM_GST_TYPE               PGM_TYPE_EPT
     494# define PGM_GST_NAME(name)         PGM_GST_NAME_EPT(name)
     495# define PGM_BTH_NAME(name)         PGM_BTH_NAME_EPT_EPT(name)
     496# define BTH_PGMPOOLKIND_PT_FOR_PT  PGMPOOLKIND_EPT_PT_FOR_PHYS
     497# include "PGMGstDefs.h"
     498# include "PGMAllGst.h"
     499# include "PGMAllBth.h"
     500# undef BTH_PGMPOOLKIND_PT_FOR_PT
     501# undef PGM_BTH_NAME
     502# undef PGM_GST_TYPE
     503# undef PGM_GST_NAME
     504#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
     505
    491506#undef PGM_SHW_TYPE
    492507#undef PGM_SHW_NAME
     
    614629# endif
    615630    },
     631#endif
     632#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     633    { UINT32_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_NESTED_32BIT */
     634    { UINT32_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_NESTED_PAE   */
     635    { UINT32_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_NESTED_AMD64 */
     636    {
     637        PGM_TYPE_EPT,
     638        PGM_GST_NAME_EPT(GetPage),
     639        PGM_GST_NAME_EPT(ModifyPage),
     640        PGM_GST_NAME_EPT(Enter),
     641        PGM_GST_NAME_EPT(Exit),
     642# ifdef IN_RING3
     643        PGM_GST_NAME_EPT(Relocate),
     644# endif
     645    }
    616646#endif
    617647};
     
    863893    PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE   - illegal */
    864894    PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
     895#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     896    PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_EPT, PGM_BTH_NAME_EPT_EPT),
     897#else
    865898    PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT          - illegal */
     899#endif
    866900    PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE         - illegal */
    867901
     
    19321966            return VERR_PGM_NOT_USED_IN_MODE;
    19331967
     1968#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     1969        case PGMMODE_EPT:
     1970            pWalk->enmType = PGMPTWALKGSTTYPE_EPT;
     1971            return PGM_GST_NAME_EPT(Walk)(pVCpu, GCPtr, &pWalk->u.Ept);
     1972#else
     1973        case PGMMODE_EPT:
     1974#endif
    19341975        case PGMMODE_NESTED_32BIT:
    19351976        case PGMMODE_NESTED_PAE:
    19361977        case PGMMODE_NESTED_AMD64:
    1937         case PGMMODE_EPT:
    19381978        default:
    19391979            AssertFailed();
     
    23442384
    23452385
     2386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2387 /**
     2388 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
     2389 *
     2390 * @returns VBox status code.
     2391 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     2392 * @param   ppEptPml4   Where to return the pointer to the mapping.  This will
     2393 *                      always be set.
     2394 */
     2395int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
     2396{
     2397    Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
     2398    PVMCC       pVM = pVCpu->CTX_SUFF(pVM);
     2399    PGM_LOCK_VOID(pVM);
     2400
     2401    RTGCPHYS const GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_EPT_PAGE_MASK;
     2402    PPGMPAGE       pPage;
     2403    int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
     2404    if (RT_SUCCESS(rc))
     2405    {
     2406        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppEptPml4);
     2407        if (RT_SUCCESS(rc))
     2408        {
     2409# ifdef IN_RING3
     2410            pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
     2411            pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
     2412# else
     2413            pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
     2414            pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
     2415# endif
     2416            PGM_UNLOCK(pVM);
     2417            return VINF_SUCCESS;
     2418        }
     2419    }
     2420
     2421    PGM_UNLOCK(pVM);
     2422    *ppEptPml4 = NULL;
     2423    return rc;
     2424}
     2425#endif
     2426
     2427
    23462428/**
    23472429 * Gets the current CR3 register value for the shadow memory context.
     
    23932475            GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_AMD64_PAGE_MASK);
    23942476            break;
     2477#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2478        case PGMMODE_EPT:
     2479            GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_EPT_PAGE_MASK);
     2480            break;
     2481#endif
    23952482        default:
    23962483            GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAGE_MASK);
     
    28192906     */
    28202907    PGMMODE enmGuestMode;
     2908#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2909    if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
     2910        enmGuestMode = PGMMODE_EPT;
     2911    else
     2912#endif
    28212913    if (cr0 & X86_CR0_PG)
    28222914    {
     
    30243116            }
    30253117            break;
     3118
     3119#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     3120        case PGMMODE_EPT:
     3121            /* Nested paging is a requirement for nested VT-x. */
     3122            Assert(enmHostMode == PGMMODE_EPT);
     3123            break;
     3124#endif
    30263125
    30273126        default:
     
    30793178            }
    30803179        }
     3180#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     3181        else
     3182        {
     3183            /* Nested paging is a requirement for nested VT-x. */
     3184            AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
     3185        }
     3186#endif
    30813187    }
    30823188
     
    32233329        case PGMMODE_AMD64:
    32243330            GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
     3331            break;
     3332#endif
     3333#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     3334        case PGMMODE_EPT:
     3335            GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_EPT_PAGE_MASK;
    32253336            break;
    32263337#endif
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r91712 r91904  
    2323#if PGM_GST_TYPE == PGM_TYPE_32BIT \
    2424 || PGM_GST_TYPE == PGM_TYPE_PAE \
     25 || PGM_GST_TYPE == PGM_TYPE_EPT \
    2526 || PGM_GST_TYPE == PGM_TYPE_AMD64
    2627DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);
     
    7374#if PGM_GST_TYPE == PGM_TYPE_32BIT \
    7475 || PGM_GST_TYPE == PGM_TYPE_PAE \
     76 || PGM_GST_TYPE == PGM_TYPE_EPT \
    7577 || PGM_GST_TYPE == PGM_TYPE_AMD64
    7678
     
    166168        if (RT_SUCCESS(rc)) { /* probable */ }
    167169        else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
     170
     171# elif PGM_GST_TYPE == PGM_TYPE_EPT
     172        rc = pgmGstGetEptPML4PtrEx(pVCpu, &pWalk->pPml4);
     173        if (RT_SUCCESS(rc)) { /* probable */ }
     174        else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
     175
     176        PEPTPML4E pPml4e;
     177        pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK];
     178        EPTPML4E  Pml4e;
     179        pWalk->Pml4e.u = Pml4e.u = pPml4e->u;
     180
     181        if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
     182        else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
     183
     184        if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
     185        else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
     186
     187        /** @todo figure out what this effective stuff is about. */
     188        pWalk->Core.fEffective = fEffective = ((uint32_t)Pml4e.u & (X86_PML4E_RW  | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A))
     189                                            | ((uint32_t)(Pml4e.u >> 63) ^ 1) /*NX */;
     190# error "Implement me."
    168191# endif
    169192    }
     
    298321}
    299322
    300 #endif /* 32BIT, PAE, AMD64 */
     323#endif /* 32BIT, PAE, EPT, AMD64 */
    301324
    302325/**
     
    331354#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
    332355   || PGM_GST_TYPE == PGM_TYPE_PAE \
     356   || PGM_GST_TYPE == PGM_TYPE_EPT \
    333357   || PGM_GST_TYPE == PGM_TYPE_AMD64
    334358
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r91854 r91904  
    15591559        pVM->pgm.s.HCPhysInvMmioPg |= UINT64_C(0x000f0000000000);
    15601560    }
     1561    Assert(pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth == cMaxPhysAddrWidth);
    15611562
    15621563    /** @todo query from CPUM. */
     
    15981599        pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask  = X86_PDPE_P  | X86_PDPE_RW  | X86_PDPE_US  | X86_PDPE_A;
    15991600        pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
     1601
     1602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     1603        pVCpu->pgm.s.fGstEptMbzPteMask        = fMbzPageFrameMask | EPT_PTE_MBZ_MASK;
     1604        pVCpu->pgm.s.fGstEptMbzPdeMask        = fMbzPageFrameMask | EPT_PDE_MBZ_MASK;
     1605        pVCpu->pgm.s.fGstEptMbzBigPdeMask     = fMbzPageFrameMask | EPT_PDE2M_MBZ_MASK;
     1606        pVCpu->pgm.s.fGstEptMbzPdpeMask       = fMbzPageFrameMask | EPT_PDPTE_MBZ_MASK;
     1607        pVCpu->pgm.s.fGstEptMbzBigPdpeMask    = fMbzPageFrameMask | EPT_PDPTE1G_MBZ_MASK;
     1608        pVCpu->pgm.s.fGstEptMbzPml4eMask      = fMbzPageFrameMask | EPT_PML4E_MBZ_MASK;
     1609
     1610        /* If any of the features (in the assert below) are enabled, we might have to shadow the relevant bits. */
     1611        Assert(   !pVM->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt
     1612               && !pVM->cpum.ro.GuestFeatures.fVmxSppEpt
     1613               && !pVM->cpum.ro.GuestFeatures.fVmxEptXcptVe);
     1614        pVCpu->pgm.s.fGstEptPresentMask         = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE;
     1615        pVCpu->pgm.s.fGstEptShadowedPml4eMask   = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED;
     1616        pVCpu->pgm.s.fGstEptShadowedPdpeMask    = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED;
     1617        pVCpu->pgm.s.fGstEptShadowedBigPdpeMask = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED | EPT_E_DIRTY;
     1618        pVCpu->pgm.s.fGstEptShadowedPdeMask     = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED;
     1619        pVCpu->pgm.s.fGstEptShadowedBigPdeMask  = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED | EPT_E_DIRTY;
     1620        pVCpu->pgm.s.fGstEptShadowedPteMask     = EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_ACCESSED | EPT_E_DIRTY;
     1621#endif
    16001622    }
    16011623
  • trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp

    r91854 r91904  
    16501650    int             rc;
    16511651    unsigned const  cch     = pState->cchAddress;
    1652     uint64_t const  cr3Mask = pState->fEpt ? X86_CR3_AMD64_PAGE_MASK
     1652    uint64_t const  cr3Mask = pState->fEpt ? X86_CR3_AMD64_PAGE_MASK    /** @todo this should be X86_CR3_EPT_PAGE_MASK */
    16531653                            : pState->fLme ? X86_CR3_AMD64_PAGE_MASK
    16541654                            : pState->fPae ? X86_CR3_PAE_PAGE_MASK
     
    23292329    int             rc;
    23302330    unsigned const  cch     = pState->cchAddress;
    2331     uint64_t const  cr3Mask = pState->fEpt ? X86_CR3_AMD64_PAGE_MASK
     2331    uint64_t const  cr3Mask = pState->fEpt ? X86_CR3_AMD64_PAGE_MASK    /** @todo this should be X86_CR3_EPT_PAGE_MASK */
    23322332                            : pState->fLme ? X86_CR3_AMD64_PAGE_MASK
    23332333                            : pState->fPae ? X86_CR3_PAE_PAGE_MASK
  • trunk/src/VBox/VMM/include/PGMGstDefs.h

    r91822 r91904  
    6060#undef GST_IS_BIG_PDPE_VALID
    6161#undef GST_IS_PML4E_VALID
     62#undef GST_IS_PGENTRY_PRESENT
    6263#undef GST_IS_PSE_ACTIVE
    6364#undef GST_IS_NX_ACTIVE
     
    236237# define GST_IS_NX_ACTIVE(pVCpu)                (pgmGstIsNoExecuteActive(pVCpu))
    237238# define BTH_IS_NP_ACTIVE(pVM)                  (false)
     239
     240#elif PGM_GST_TYPE == PGM_TYPE_EPT
     241# define GSTUINT                                uint64_t
     242# define GST_ATOMIC_OR(a_pu, a_fFlags)          ASMAtomicOrU64((a_pu), (a_fFlags))
     243# define GSTPT                                  EPTPT
     244# define PGSTPT                                 PEPTPT
     245# define GSTPTE                                 EPTPTE
     246# define PGSTPTE                                PEPTPTE
     247# define GSTPD                                  EPTPD
     248# define PGSTPD                                 PEPTPD
     249# define GSTPDE                                 EPTPDE
     250# define PGSTPDE                                PEPTPDE
     251# define GST_BIG_PAGE_SIZE                      X86_PAGE_2M_SIZE
     252# define GST_BIG_PAGE_OFFSET_MASK               X86_PAGE_2M_OFFSET_MASK
     253# define GST_PDE_PG_MASK                        EPT_PDE_PG_MASK
     254# define GST_PDE_BIG_PG_MASK                    EPT_PDE2M_PG_MASK
     255# define GST_PD_SHIFT                           EPT_PD_SHIFT
     256# define GST_PD_MASK                            EPT_PD_MASK
     257# define GSTPTWALK                              PGMPTWALKGSTEPT
     258# define PGSTPTWALK                             PPGMPTWALKGSTEPT
     259# define PCGSTPTWALK                            PCPGMPTWALKGSTEPT
     260# define GST_PDPE_ENTRIES                       EPT_PG_ENTRIES
     261# define GST_PDPT_SHIFT                         EPT_PDPT_SHIFT
     262# define GST_PDPE_PG_MASK                       EPT_PDPTE_PG_MASK
     263# define GST_PDPT_MASK                          EPT_PDPT_MASK
     264# define GST_PTE_PG_MASK                        EPT_E_PG_MASK
     265# define GST_CR3_PAGE_MASK                      X86_CR3_EPT_PAGE_MASK
     266# define GST_PT_SHIFT                           EPT_PT_SHIFT
     267# define GST_PT_MASK                            EPT_PT_MASK
     268# define GST_GET_PTE_GCPHYS(Pte)                PGM_A20_APPLY(pVCpu, ((Pte).u & GST_PTE_PG_MASK))
     269# define GST_GET_PDE_GCPHYS(Pde)                PGM_A20_APPLY(pVCpu, ((Pde).u & GST_PDE_PG_MASK))
     270# define GST_GET_BIG_PDE_GCPHYS(pVM, Pde)       PGM_A20_APPLY(pVCpu, ((Pde).u & GST_PDE_BIG_PG_MASK))
     271# define GST_GET_PTE_SHW_FLAGS(pVCpu, Pte)      ((Pte).u & (pVCpu)->pgm.s.fGst64ShadowedPteMask )                                // TODO
     272# define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde)      ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedPdeMask )                                // TODO
     273# define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde)  ( ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedBigPdeMask ) | PGM_PDFLAGS_BIG_PAGE)   // TODO
     274# define GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, Pde)  ((Pde).u & (pVCpu)->pgm.s.fGst64ShadowedBigPde4PteMask )                   // TODO
     275# define GST_IS_PTE_VALID(pVCpu, Pte)           (!( (Pte).u   & (pVCpu)->pgm.s.fGstAmd64MbzPteMask ))                            // TODO
     276# define GST_IS_PDE_VALID(pVCpu, Pde)           (!( (Pde).u   & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask ))                            // TODO
     277# define GST_IS_BIG_PDE_VALID(pVCpu, Pde)       (!( (Pde).u   & (pVCpu)->pgm.s.fGstAmd64MbzBigPdeMask ))                         // TODO
     278# define GST_IS_PDPE_VALID(pVCpu, Pdpe)         (!( (Pdpe).u  & (pVCpu)->pgm.s.fGstAmd64MbzPdpeMask ))                           // TODO
     279# define GST_IS_BIG_PDPE_VALID(pVCpu, Pdpe)     (!( (Pdpe).u  & (pVCpu)->pgm.s.fGstAmd64MbzBigPdpeMask ))                        // TODO
     280# define GST_IS_PML4E_VALID(pVCpu, Pml4e)       (!( (Pml4e).u & (pVCpu)->pgm.s.fGstEptMbzPml4eMask ))
     281# define GST_IS_PGENTRY_PRESENT(pVCpu, Entry)   (!( (Entry).u & (pVCpu)->pgm.s.fGstEptPresentMask ))
     282# define GST_IS_PSE_ACTIVE(pVCpu)               (true)
     283# define GST_IS_NX_ACTIVE(pVCpu)                (pgmGstIsNoExecuteActive(pVCpu))
     284# define BTH_IS_NP_ACTIVE(pVM)                  (false)
    238285#endif
    239286
  • trunk/src/VBox/VMM/include/PGMInline.h

    r91854 r91904  
    727727
    728728
     729#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     730/**
     731 * Gets the pointer to a page map level-4 entry when the guest using EPT paging.
     732 *
     733 * @returns Pointer to the PML4 entry.
     734 * @param   pVCpu       The cross context virtual CPU structure.
     735 * @param   iPml4       The index.
     736 * @remarks Only used by AssertCR3.
     737 */
     738DECLINLINE(PEPTPML4E) pgmGstGetEptPML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
     739{
     740    PEPTPML4 pEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
     741    if (pEptPml4)
     742    { /* likely */ }
     743    else
     744    {
     745         int const rc = pgmGstLazyMapEptPml4(pVCpu, &pEptPml4);
     746         AssertRCReturn(rc, NULL);
     747    }
     748    return &pEptPml4->a[iPml4];
     749}
     750
     751
     752/**
     753 * Gets the page map level-4 pointer for the guest when the guest is using EPT
     754 * paging.
     755 *
     756 * @returns VBox status code.
     757 * @param   pVCpu       The cross context virtual CPU structure.
     758 * @param   ppEptPml4   Where to return the mapping.  Always set.
     759 */
     760DECLINLINE(int) pgmGstGetEptPML4PtrEx(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
     761{
     762    *ppEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
     763    if (RT_UNLIKELY(!*ppEptPml4))
     764        return pgmGstLazyMapEptPml4(pVCpu, ppEptPml4);
     765    return VINF_SUCCESS;
     766}
     767
     768
     769/**
     770 * Gets the page map level-4 pointer for the guest when the guest is using EPT
     771 * paging.
     772 *
     773 * @returns Pointer to the EPT PML4 page.
     774 * @param   pVCpu       The cross context virtual CPU structure.
     775 */
     776DECLINLINE(PEPTPML4) pgmGstGetEptPML4Ptr(PVMCPUCC pVCpu)
     777{
     778    PEPTPML4 pEptPml4;
     779    int rc = pgmGstGetEptPML4PtrEx(pVCpu, &pEptPml4);
     780    AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
     781    return pEptPml4;
     782}
     783#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
     784
     785
    729786/**
    730787 * Gets the shadow page directory, 32-bit.
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r91854 r91904  
    219219     && (uShwType) < PGM_TYPE_NESTED_32BIT)
    220220
    221 /** Macro for checking for nested or EPT.
     221/** Macro for checking for nested.
    222222 * @param   uType   PGM_TYPE_*
    223223 */
     
    24042404typedef PGMPTWALKGSTAMD64 const *PCPGMPTWALKGSTAMD64;
    24052405
     2406#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2407/**
     2408 * Guest page table walk for the EPT mode.
     2409 */
     2410typedef struct PGMPTWALKGSTEPT
     2411{
     2412    /** The common core. */
     2413    PGMPTWALKCORE   Core;
     2414
     2415    PEPTPML4        pPml4;
     2416    PEPTPML4E       pPml4e;
     2417    EPTPML4E        Pml4e;
     2418
     2419    PEPTPDPT        pPdpt;
     2420    PEPTPDPTE       pPdpte;
     2421    EPTPDPTE        Pdpte;
     2422
     2423    PEPTPD          pPd;
     2424    PEPTPDE         pPde;
     2425    EPTPDE          Pde;
     2426
     2427    PEPTPT          pPt;
     2428    PEPTPTE         pPte;
     2429    EPTPTE          Pte;
     2430} PGMPTWALKGSTEPT;
     2431/** Pointer to an EPT guest page table walk. */
     2432typedef PGMPTWALKGSTEPT *PPGMPTWALKGSTEPT;
     2433/** Pointer to a const EPT guest page table walk. */
     2434typedef PGMPTWALKGSTEPT const *PCPGMPTWALKGSTEPT;
     2435#endif
     2436
    24062437/**
    24072438 * Guest page table walk for the PAE mode.
     
    24632494    /**  PGMPTWALKGST::u.Legacy is valid. */
    24642495    PGMPTWALKGSTTYPE_32BIT,
     2496    /** PGMPTWALKGST::u.Ept is valid. */
     2497    PGMPTWALKGSTTYPE_EPT,
    24652498    /** Customary 32-bit type hack. */
    24662499    PGMPTWALKGSTTYPE_32BIT_HACK = 0x7fff0000
     
    24832516         * convension). */
    24842517        PGMPTWALKGST32BIT   Legacy;
     2518#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2519        /** The page walker for EPT. */
     2520        PGMPTWALKGSTEPT     Ept;
     2521#endif
    24852522    } u;
    24862523    /** Indicates which part of the union is valid. */
     
    25232560#define PGM_GST_NAME_RC_AMD64_STR(name)                 "pgmRCGstAMD64" #name
    25242561#define PGM_GST_NAME_R0_AMD64_STR(name)                 "pgmR0GstAMD64" #name
     2562#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2563# define PGM_GST_NAME_EPT(name)                         PGM_CTX(pgm,GstEPT##name)
     2564# define PGM_GST_NAME_RC_EPT_STR(name)                  "pgmRCGstEPT" #name
     2565# define PGM_GST_NAME_R0_EPT_STR(name)                  "pgmR0GstEPT" #name
     2566#endif
    25252567#define PGM_GST_DECL(type, name)                        PGM_CTX_DECL(type) PGM_GST_NAME(name)
    25262568
     
    25812623#define PGM_BTH_NAME_EPT_PAE(name)                      PGM_CTX(pgm,BthEPTPAE##name)
    25822624#define PGM_BTH_NAME_EPT_AMD64(name)                    PGM_CTX(pgm,BthEPTAMD64##name)
     2625#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2626# define PGM_BTH_NAME_EPT_EPT(name)                     PGM_CTX(pgm,BthEPTEPT##name)
     2627#endif
    25832628#define PGM_BTH_NAME_NONE_REAL(name)                    PGM_CTX(pgm,BthNoneReal##name)
    25842629#define PGM_BTH_NAME_NONE_PROT(name)                    PGM_CTX(pgm,BthNoneProt##name)
     
    26152660#define PGM_BTH_NAME_RC_EPT_PAE_STR(name)               "pgmRCBthEPTPAE" #name
    26162661#define PGM_BTH_NAME_RC_EPT_AMD64_STR(name)             "pgmRCBthEPTAMD64" #name
     2662#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2663# define PGM_BTH_NAME_RC_EPT_EPT_STR(name)              "pgmRCBthEPTEPT" #name
     2664#endif
    26172665
    26182666#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name)            "pgmR0Bth32BitReal" #name
     
    26452693#define PGM_BTH_NAME_R0_EPT_PAE_STR(name)               "pgmR0BthEPTPAE" #name
    26462694#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name)             "pgmR0BthEPTAMD64" #name
     2695#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2696# define PGM_BTH_NAME_R0_EPT_EPT_STR(name)              "pgmR0BthEPTEPT" #name
     2697#endif
    26472698
    26482699#define PGM_BTH_DECL(type, name)        PGM_CTX_DECL(type) PGM_BTH_NAME(name)
     
    26672718
    26682719/** The length of g_aPgmGuestModeData. */
    2669 #ifdef VBOX_WITH_64_BITS_GUESTS
     2720#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2721# define PGM_GUEST_MODE_DATA_ARRAY_SIZE     (PGM_TYPE_EPT + 1)
     2722#elif defined(VBOX_WITH_64_BITS_GUESTS)
    26702723# define PGM_GUEST_MODE_DATA_ARRAY_SIZE     (PGM_TYPE_AMD64 + 1)
    26712724#else
     
    34463499    /** @} */
    34473500
     3501    /** @name EPT Guest Paging.
     3502     * @{ */
     3503    /** The guest's page directory pointer table, R3 pointer. */
     3504    R3PTRTYPE(PEPTPML4)             pGstEptPml4R3;
     3505    /** The guest's page directory pointer table, R0 pointer. */
     3506    R0PTRTYPE(PEPTPML4)             pGstEptPml4R0;
     3507    /** Mask containing the MBZ PTE bits. */
     3508    uint64_t                        fGstEptMbzPteMask;
     3509    /** Mask containing the MBZ PDE bits. */
     3510    uint64_t                        fGstEptMbzPdeMask;
     3511    /** Mask containing the MBZ big page PDE bits. */
     3512    uint64_t                        fGstEptMbzBigPdeMask;
     3513    /** Mask containing the MBZ PDPE bits. */
     3514    uint64_t                        fGstEptMbzPdpeMask;
     3515    /** Mask containing the MBZ big page PDPE bits. */
     3516    uint64_t                        fGstEptMbzBigPdpeMask;
     3517    /** Mask containing the MBZ PML4E bits. */
     3518    uint64_t                        fGstEptMbzPml4eMask;
     3519    /** Mask to determine whether an entry is present. */
     3520    uint64_t                        fGstEptPresentMask;
     3521    /** Mask containing the PML4E bits that we shadow. */
     3522    uint64_t                        fGstEptShadowedPml4eMask;
     3523    /** Mask containing the PDPE bits that we shadow. */
     3524    uint64_t                        fGstEptShadowedPdpeMask;
     3525    /** Mask containing the big page PDPE bits that we shadow. */
     3526    uint64_t                        fGstEptShadowedBigPdpeMask;
     3527    /** Mask containing the PDE bits that we shadow. */
     3528    uint64_t                        fGstEptShadowedPdeMask;
     3529    /** Mask containing the big page PDE bits that we shadow. */
     3530    uint64_t                        fGstEptShadowedBigPdeMask;
     3531    /** Mask containing the PTE bits that we shadow. */
     3532    uint64_t                        fGstEptShadowedPteMask;
     3533    /** @} */
     3534
    34483535    /** Pointer to the page of the current active CR3 - R3 Ptr. */
    34493536    R3PTRTYPE(PPGMPOOLPAGE)         pShwPageCR3R3;
     
    36573744int             pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd);
    36583745int             pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4);
     3746#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     3747int             pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppPml4);
     3748#endif
    36593749int             pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk);
    36603750int             pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette