VirtualBox

Changeset 108156 in vbox


Ignore:
Timestamp:
Feb 11, 2025 12:26:07 PM (10 days ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167467
Message:

VMM/PGM: Try keep the x86-only stuff more together. jiraref:VBP-1531

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r108142 r108156  
    19551955# endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
    19561956
     1957/**
     1958 * Maps the guest CR3.
     1959 *
     1960 * @returns VBox status code.
     1961 * @param   pVCpu           The cross context virtual CPU structure.
     1962 * @param   GCPhysCr3       The guest CR3 value.
     1963 * @param   pHCPtrGuestCr3  Where to store the mapped memory.
     1964 */
     1965DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
     1966{
     1967    /** @todo this needs some reworking wrt. locking?  */
     1968    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     1969    PGM_LOCK_VOID(pVM);
     1970    PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
     1971    AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
     1972
     1973    RTHCPTR HCPtrGuestCr3;
     1974    int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
     1975    PGM_UNLOCK(pVM);
     1976
     1977    *pHCPtrGuestCr3 = HCPtrGuestCr3;
     1978    return rc;
     1979}
     1980
     1981
     1982# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     1983/**
     1984 * Unmaps the guest CR3.
     1985 *
     1986 * @returns VBox status code.
     1987 * @param   pVCpu   The cross context virtual CPU structure.
     1988 */
     1989DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
     1990{
     1991    uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
     1992    AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
     1993    AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
     1994    return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
     1995}
     1996# endif
     1997
    19571998#endif /* VBOX_VMM_TARGET_X86 */
    19581999
     
    19792020#elif defined(VBOX_VMM_TARGET_ARMV8)
    19802021    return pgmGstGetPageArmv8Hack(pVCpu, GCPtr, pWalk);
     2022
    19812023#else
    19822024# error "port me"
     
    19842026}
    19852027
    1986 #ifdef VBOX_VMM_TARGET_X86
    1987 
     2028
     2029#ifdef VBOX_VMM_TARGET_X86 /** @todo Implement PGMGstQueryPageFast for ARMv8! */
    19882030/**
    19892031 * Gets effective Guest OS page information.
     
    20082050    return g_aPgmGuestModeData[idx].pfnQueryPageFast(pVCpu, GCPtr, fFlags, pWalk);
    20092051}
    2010 
    2011 
    2012 /**
    2013  * Maps the guest CR3.
    2014  *
    2015  * @returns VBox status code.
    2016  * @param   pVCpu           The cross context virtual CPU structure.
    2017  * @param   GCPhysCr3       The guest CR3 value.
    2018  * @param   pHCPtrGuestCr3  Where to store the mapped memory.
    2019  */
    2020 DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
    2021 {
    2022     /** @todo this needs some reworking wrt. locking?  */
    2023     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2024     PGM_LOCK_VOID(pVM);
    2025     PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
    2026     AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
    2027 
    2028     RTHCPTR HCPtrGuestCr3;
    2029     int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
    2030     PGM_UNLOCK(pVM);
    2031 
    2032     *pHCPtrGuestCr3 = HCPtrGuestCr3;
    2033     return rc;
    2034 }
    2035 
    2036 
    2037 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2038 /**
    2039  * Unmaps the guest CR3.
    2040  *
    2041  * @returns VBox status code.
    2042  * @param   pVCpu   The cross context virtual CPU structure.
    2043  */
    2044 DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
    2045 {
    2046     uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
    2047     AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
    2048     AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
    2049     return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
    2050 }
    2051 # endif
    2052 
    20532052#endif /* VBOX_VMM_TARGET_X86 */
    20542053
     
    21162115#endif
    21172116}
    2118 
    2119 
    2120 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2121 /**
    2122  * Performs a guest second-level address translation (SLAT).
    2123  *
    2124  * @returns VBox status code.
    2125  * @retval  VINF_SUCCESS on success.
    2126  * @retval  VERR_PAGE_TABLE_NOT_PRESENT on failure.  Check pWalk for details.
    2127  * @retval  VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
    2128  *          not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
    2129  *
    2130  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    2131  * @param   GCPhysNested        The nested-guest physical address being translated.
    2132  * @param   fIsLinearAddrValid  Whether the linear address in @a GCPtrNested is the
    2133  *                              cause for this translation.
    2134  * @param   GCPtrNested         The nested-guest virtual address that initiated the
    2135  *                              SLAT. If none, pass 0 (and not NIL_RTGCPTR).
    2136  * @param   pWalk               Where to return the walk result. This is updated for
    2137  *                              all error codes other than
    2138  *                              VERR_PGM_NOT_USED_IN_MODE.
    2139  * @param   pGstWalk            Where to store the second-level paging-mode specific
    2140  *                              walk info.
    2141  */
    2142 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
    2143                           PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
    2144 {
    2145     /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
    2146     Assert(   pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
    2147            && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
    2148     AssertPtr(pWalk);
    2149     AssertPtr(pGstWalk);
    2150     switch (pVCpu->pgm.s.enmGuestSlatMode)
    2151     {
    2152         case PGMSLAT_EPT:
    2153             pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
    2154             return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
    2155 
    2156         default:
    2157             AssertFailed();
    2158             pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
    2159             return VERR_PGM_NOT_USED_IN_MODE;
    2160     }
    2161 }
    2162 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
    21632117
    21642118
     
    26132567
    26142568# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     2569
     2570/**
     2571 * Performs a guest second-level address translation (SLAT).
     2572 *
     2573 * @returns VBox status code.
     2574 * @retval  VINF_SUCCESS on success.
     2575 * @retval  VERR_PAGE_TABLE_NOT_PRESENT on failure.  Check pWalk for details.
     2576 * @retval  VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
     2577 *          not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
     2578 *
     2579 * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
     2580 * @param   GCPhysNested        The nested-guest physical address being translated.
     2581 * @param   fIsLinearAddrValid  Whether the linear address in @a GCPtrNested is the
     2582 *                              cause for this translation.
     2583 * @param   GCPtrNested         The nested-guest virtual address that initiated the
     2584 *                              SLAT. If none, pass 0 (and not NIL_RTGCPTR).
     2585 * @param   pWalk               Where to return the walk result. This is updated for
     2586 *                              all error codes other than
     2587 *                              VERR_PGM_NOT_USED_IN_MODE.
     2588 * @param   pGstWalk            Where to store the second-level paging-mode specific
     2589 *                              walk info.
     2590 */
     2591static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
     2592                          PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
     2593{
     2594    /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
     2595    Assert(   pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
     2596           && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
     2597    AssertPtr(pWalk);
     2598    AssertPtr(pGstWalk);
     2599    switch (pVCpu->pgm.s.enmGuestSlatMode)
     2600    {
     2601        case PGMSLAT_EPT:
     2602            pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
     2603            return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
     2604
     2605        default:
     2606            AssertFailed();
     2607            pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
     2608            return VERR_PGM_NOT_USED_IN_MODE;
     2609    }
     2610}
     2611
     2612
    26152613/**
    26162614 * Performs second-level address translation for the given CR3 and updates the
     
    26552653    return VINF_SUCCESS;
    26562654}
    2657 # endif
    2658 
     2655
     2656# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
    26592657
    26602658/**
     
    29902988        PGM_INVL_VCPU_TLBS(pVCpu);
    29912989    return rcSync;
     2990}
     2991
     2992
     2993# ifdef VBOX_STRICT
     2994/**
     2995 * Asserts that everything related to the guest CR3 is correctly shadowed.
     2996 *
     2997 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
     2998 * and assert the correctness of the guest CR3 mapping before asserting that the
     2999 * shadow page tables is in sync with the guest page tables.
     3000 *
     3001 * @returns Number of conflicts.
     3002 * @param   pVM     The cross context VM structure.
     3003 * @param   pVCpu   The cross context virtual CPU structure.
     3004 * @param   cr3     The current guest CR3 register value.
     3005 * @param   cr4     The current guest CR4 register value.
     3006 */
     3007VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
     3008{
     3009    AssertReturn(pVM->enmTarget == VMTARGET_X86, 0);
     3010    STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
     3011
     3012    uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
     3013    AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
     3014    AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
     3015
     3016    PGM_LOCK_VOID(pVM);
     3017    unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
     3018    PGM_UNLOCK(pVM);
     3019
     3020    STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
     3021    return cErrors;
     3022}
     3023# endif /* VBOX_STRICT */
     3024
     3025
     3026/**
     3027 * Called by CPUM or REM when CR0.WP changes to 1.
     3028 *
     3029 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     3030 * @thread  EMT
     3031 */
     3032VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
     3033{
     3034    /*
     3035     * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
     3036     *
     3037     * Use the counter to judge whether there might be pool pages with active
     3038     * hacks in them.  If there are, we will be running the risk of messing up
     3039     * the guest by allowing it to write to read-only pages.  Thus, we have to
     3040     * clear the page pool ASAP if there is the slightest chance.
     3041     */
     3042    if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
     3043    {
     3044        Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
     3045
     3046        Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
     3047        pVCpu->pgm.s.cNetwareWp0Hacks = 0;
     3048        pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
     3049        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     3050    }
     3051}
     3052
     3053
     3054/**
     3055 * Updates PGM's copy of the guest's EPT pointer.
     3056 *
     3057 * @param   pVCpu       The cross context virtual CPU structure.
     3058 * @param   uEptPtr     The EPT pointer.
     3059 *
     3060 * @remarks This can be called as part of VM-entry so we might be in the midst of
     3061 *          switching to VMX non-root mode.
     3062 */
     3063VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
     3064{
     3065    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     3066    PGM_LOCK_VOID(pVM);
     3067    pVCpu->pgm.s.uEptPtr = uEptPtr;
     3068    pVCpu->pgm.s.pGstEptPml4R3 = 0;
     3069    pVCpu->pgm.s.pGstEptPml4R0 = 0;
     3070    PGM_UNLOCK(pVM);
    29923071}
    29933072
     
    37003779
    37013780
    3702 #ifdef VBOX_VMM_TARGET_X86
    3703 /**
    3704  * Called by CPUM or REM when CR0.WP changes to 1.
    3705  *
    3706  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    3707  * @thread  EMT
    3708  */
    3709 VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
    3710 {
    3711     /*
    3712      * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
    3713      *
    3714      * Use the counter to judge whether there might be pool pages with active
    3715      * hacks in them.  If there are, we will be running the risk of messing up
    3716      * the guest by allowing it to write to read-only pages.  Thus, we have to
    3717      * clear the page pool ASAP if there is the slightest chance.
    3718      */
    3719     if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
    3720     {
    3721         Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
    3722 
    3723         Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
    3724         pVCpu->pgm.s.cNetwareWp0Hacks = 0;
    3725         pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
    3726         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    3727     }
    3728 }
    3729 #endif /* VBOX_VMM_TARGET_X86 */
    3730 
    3731 
    3732 /**
    3733  * Gets the current guest paging mode.
    3734  *
    3735  * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
    3736  *
    3737  * @returns The current paging mode.
    3738  * @param   pVCpu       The cross context virtual CPU structure.
    3739  */
    3740 VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
    3741 {
    3742     return pVCpu->pgm.s.enmGuestMode;
    3743 }
    3744 
    3745 
    3746 /**
    3747  * Gets the current shadow paging mode.
    3748  *
    3749  * @returns The current paging mode.
    3750  * @param   pVCpu       The cross context virtual CPU structure.
    3751  */
    3752 VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
    3753 {
    3754 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    3755     return pVCpu->pgm.s.enmShadowMode;
    3756 #else
    3757     RT_NOREF(pVCpu);
    3758     return PGMMODE_NONE;
    3759 #endif
    3760 }
    3761 
    3762 
    3763 #ifdef VBOX_VMM_TARGET_X86
    3764 /**
    3765  * Gets the current host paging mode.
    3766  *
    3767  * @returns The current paging mode.
    3768  * @param   pVM             The cross context VM structure.
    3769  */
    3770 VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
    3771 {
    3772     switch (pVM->pgm.s.enmHostMode)
    3773     {
    3774         case SUPPAGINGMODE_32_BIT:
    3775         case SUPPAGINGMODE_32_BIT_GLOBAL:
    3776             return PGMMODE_32_BIT;
    3777 
    3778         case SUPPAGINGMODE_PAE:
    3779         case SUPPAGINGMODE_PAE_GLOBAL:
    3780             return PGMMODE_PAE;
    3781 
    3782         case SUPPAGINGMODE_PAE_NX:
    3783         case SUPPAGINGMODE_PAE_GLOBAL_NX:
    3784             return PGMMODE_PAE_NX;
    3785 
    3786         case SUPPAGINGMODE_AMD64:
    3787         case SUPPAGINGMODE_AMD64_GLOBAL:
    3788             return PGMMODE_AMD64;
    3789 
    3790         case SUPPAGINGMODE_AMD64_NX:
    3791         case SUPPAGINGMODE_AMD64_GLOBAL_NX:
    3792             return PGMMODE_AMD64_NX;
    3793 
    3794         default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
    3795     }
    3796 
    3797     return PGMMODE_INVALID;
    3798 }
    3799 #endif /* VBOX_VMM_TARGET_X86 */
    3800 
    3801 
    38023781/**
    38033782 * Get mode name.
     
    38243803        case PGMMODE_VMSA_V8_32:    return "VMSAv8-32";
    38253804        case PGMMODE_VMSA_V8_64:    return "VMSAv8-64";
    3826         default:                    return "unknown mode value";
    3827     }
    3828 }
    3829 
    3830 
    3831 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     3805
     3806        case PGMMODE_INVALID:
     3807        case PGMMODE_MAX:
     3808        case PGMMODE_32BIT_HACK:
     3809            break;
     3810    }
     3811    return "unknown mode value";
     3812}
     3813
     3814
     3815/**
     3816 * Gets the current guest paging mode.
     3817 *
     3818 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
     3819 *
     3820 * @returns The current paging mode.
     3821 * @param   pVCpu       The cross context virtual CPU structure.
     3822 */
     3823VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
     3824{
     3825    return pVCpu->pgm.s.enmGuestMode;
     3826}
     3827
     3828
     3829/**
     3830 * Gets the current shadow paging mode.
     3831 *
     3832 * @returns The current paging mode.
     3833 * @param   pVCpu       The cross context virtual CPU structure.
     3834 */
     3835VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
     3836{
     3837#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
     3838    return pVCpu->pgm.s.enmShadowMode;
     3839#else
     3840    RT_NOREF(pVCpu);
     3841    return PGMMODE_NONE;
     3842#endif
     3843}
     3844
     3845#ifdef VBOX_VMM_TARGET_X86
     3846
     3847/**
     3848 * Gets the current host paging mode.
     3849 *
     3850 * @returns The current paging mode.
     3851 * @param   pVM             The cross context VM structure.
     3852 */
     3853VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
     3854{
     3855    switch (pVM->pgm.s.enmHostMode)
     3856    {
     3857        case SUPPAGINGMODE_32_BIT:
     3858        case SUPPAGINGMODE_32_BIT_GLOBAL:
     3859            return PGMMODE_32_BIT;
     3860
     3861        case SUPPAGINGMODE_PAE:
     3862        case SUPPAGINGMODE_PAE_GLOBAL:
     3863            return PGMMODE_PAE;
     3864
     3865        case SUPPAGINGMODE_PAE_NX:
     3866        case SUPPAGINGMODE_PAE_GLOBAL_NX:
     3867            return PGMMODE_PAE_NX;
     3868
     3869        case SUPPAGINGMODE_AMD64:
     3870        case SUPPAGINGMODE_AMD64_GLOBAL:
     3871            return PGMMODE_AMD64;
     3872
     3873        case SUPPAGINGMODE_AMD64_NX:
     3874        case SUPPAGINGMODE_AMD64_GLOBAL_NX:
     3875            return PGMMODE_AMD64_NX;
     3876
     3877        default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
     3878    }
     3879
     3880    return PGMMODE_INVALID;
     3881}
     3882
     3883
     3884# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    38323885/**
    38333886 * Gets the SLAT mode name.
     
    38483901    }
    38493902}
    3850 #endif  /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
    3851 
    3852 
    3853 #ifdef VBOX_VMM_TARGET_X86
     3903# endif  /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
     3904
     3905
    38543906/**
    38553907 * Notification from CPUM that the EFER.NXE bit has changed.
     
    39083960    }
    39093961}
     3962
    39103963#endif /* VBOX_VMM_TARGET_X86 */
    3911 
    39123964
    39133965/**
     
    41644216}
    41654217
    4166 #ifdef VBOX_VMM_TARGET_X86
    4167 
    4168 # ifdef VBOX_STRICT
    4169 /**
    4170  * Asserts that everything related to the guest CR3 is correctly shadowed.
    4171  *
    4172  * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
    4173  * and assert the correctness of the guest CR3 mapping before asserting that the
    4174  * shadow page tables is in sync with the guest page tables.
    4175  *
    4176  * @returns Number of conflicts.
    4177  * @param   pVM     The cross context VM structure.
    4178  * @param   pVCpu   The cross context virtual CPU structure.
    4179  * @param   cr3     The current guest CR3 register value.
    4180  * @param   cr4     The current guest CR4 register value.
    4181  */
    4182 VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
    4183 {
    4184     AssertReturn(pVM->enmTarget == VMTARGET_X86, 0);
    4185     STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
    4186 
    4187     uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
    4188     AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
    4189     AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
    4190 
    4191     PGM_LOCK_VOID(pVM);
    4192     unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
    4193     PGM_UNLOCK(pVM);
    4194 
    4195     STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
    4196     return cErrors;
    4197 }
    4198 # endif /* VBOX_STRICT */
    4199 
    4200 
    4201 /**
    4202  * Updates PGM's copy of the guest's EPT pointer.
    4203  *
    4204  * @param   pVCpu       The cross context virtual CPU structure.
    4205  * @param   uEptPtr     The EPT pointer.
    4206  *
    4207  * @remarks This can be called as part of VM-entry so we might be in the midst of
    4208  *          switching to VMX non-root mode.
    4209  */
    4210 VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
    4211 {
    4212     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    4213     PGM_LOCK_VOID(pVM);
    4214     pVCpu->pgm.s.uEptPtr = uEptPtr;
    4215     pVCpu->pgm.s.pGstEptPml4R3 = 0;
    4216     pVCpu->pgm.s.pGstEptPml4R0 = 0;
    4217     PGM_UNLOCK(pVM);
    4218 }
    4219 
    4220 #endif /* VBOX_VMM_TARGET_X86 */
    42214218#ifdef PGM_WITH_PAGE_ZEROING_DETECTION
    42224219# ifndef VBOX_VMM_TARGET_X86
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette