VirtualBox

Changeset 92583 in vbox


Ignore:
Timestamp:
Nov 24, 2021 9:13:14 AM (3 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
148465
Message:

VMM: Nested VMX: bugref:10092 Renamed fPdpesMapped as it's rather misleading. More importantly CR3 is mapped and in case of PAE paging, the PAE PDPTEs have been mapped.

Location:
trunk/src/VBox/VMM
Files:
17 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r92553 r92583  
    43564356        /* Inform PGM. */
    43574357        /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
    4358         rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), false /* fPdpesMapped */);
     4358        rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), false /* fCr3Mapped */);
    43594359        AssertRCReturn(rc, rc);
    43604360        /* ignore informational status codes */
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r92568 r92583  
    58705870                !=  (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) )
    58715871            {
    5872                 bool fPdpesMapped;
     5872                bool fCr3Mapped;
    58735873                if (    enmAccessCrX != IEMACCESSCRX_MOV_CRX
    58745874                    || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER)
    58755875                    ||  CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
    5876                     fPdpesMapped = false;
     5876                    fCr3Mapped = false;
    58775877                else
    58785878                {
    58795879                    IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
    5880                     fPdpesMapped = true;
     5880                    fCr3Mapped = true;
    58815881                }
    5882                 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, fPdpesMapped);
     5882                rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, fCr3Mapped);
    58835883                AssertRCReturn(rc, rc);
    58845884                /* ignore informational status codes */
     
    59815981            if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
    59825982            {
    5983                 bool fPdpesMapped;
     5983                bool fCr3Mapped;
    59845984                if (   !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu))
    59855985                    ||  CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
    5986                     fPdpesMapped = false;
     5986                    fCr3Mapped = false;
    59875987                else
    59885988                {
    59895989                    Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
    59905990                    IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX);
    5991                     fPdpesMapped = true;
     5991                    fCr3Mapped = true;
    59925992                }
    5993                 rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), fPdpesMapped);
     5993                rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), fCr3Mapped);
    59945994                AssertRCReturn(rc, rc);
    59955995                /* ignore informational status codes */
     
    60736073            if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
    60746074            {
    6075                 bool fPdpesMapped;
     6075                bool fCr3Mapped;
    60766076                if (   !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER)
    60776077                    || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
    6078                     fPdpesMapped = false;
     6078                    fCr3Mapped = false;
    60796079                else
    60806080                {
    60816081                    Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
    60826082                    IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
    6083                     fPdpesMapped = true;
     6083                    fCr3Mapped = true;
    60846084                }
    6085                 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, fPdpesMapped);
     6085                rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, fCr3Mapped);
    60866086                AssertRCReturn(rc, rc);
    60876087                /* ignore informational status codes */
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r92546 r92583  
    8282 *
    8383 * @returns Strict VBox status code.
    84  * @param   pVCpu           The cross context virtual CPU structure.
    85  * @param   fPdpesMapped    Whether the PAE PDPEs (and PDPT) have been mapped.
    86  */
    87 DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu, bool fPdpesMapped)
     84 * @param   pVCpu       The cross context virtual CPU structure.
     85 * @param   fCr3Mapped  Whether CR3 (and in case of PAE paging, whether PDPEs
     86 *                      and PDPT) have been mapped.
     87 */
     88DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu, bool fCr3Mapped)
    8889{
    8990    /*
     
    105106    if (rc == VINF_SUCCESS)
    106107    {
    107         rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fPdpesMapped);
     108        rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fCr3Mapped);
    108109        AssertRCReturn(rc, rc);
    109110    }
     
    723724         * Invalid PAE PDPEs here causes a #VMEXIT.
    724725         */
    725         bool fPdpesMapped;
     726        bool fCr3Mapped;
    726727        if (   !pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
    727728            && CPUMIsPaePagingEnabled(pVmcbNstGst->u64CR0, pVmcbNstGst->u64CR4, uValidEfer))
     
    729730            rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcbNstGst->u64CR3);
    730731            if (RT_SUCCESS(rc))
    731                 fPdpesMapped = true;
     732                fCr3Mapped = true;
    732733            else
    733734            {
     
    737738        }
    738739        else
    739             fPdpesMapped = false;
     740            fCr3Mapped = false;
    740741
    741742        /*
     
    777778         * Update PGM, IEM and others of a world-switch.
    778779         */
    779         VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu, fPdpesMapped);
     780        VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu, fCr3Mapped);
    780781        if (rcStrict == VINF_SUCCESS)
    781782        { /* likely */ }
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r92581 r92583  
    153153    } while (0)
    154154
    155 /** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
    156 # define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
     155/** Marks a VM-exit failure with a diagnostic reason and logs. */
     156# define IEM_VMX_VMEXIT_FAILED(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
    157157    do \
    158158    { \
     
    160160               HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \
    161161        (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag  = (a_VmxDiag); \
     162    } while (0)
     163
     164/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
     165# define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
     166    do \
     167    { \
     168        IEM_VMX_VMEXIT_FAILED(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag); \
    162169        return VERR_VMX_VMEXIT_FAILED; \
    163170    } while (0)
     
    12301237 * Performs the VMX transition to/from VMX non-root mode.
    12311238 *
    1232  * @param   pVCpu           The cross context virtual CPU structure.
    1233  * @param   fPdpesMapped    Whether the PAE PDPTEs (and PDPT) have been mapped.
     1239 * @param   pVCpu       The cross context virtual CPU structure.
     1240 * @param   fCr3Mapped  Whether CR3 (and in case of PAE paging, whether PDPTEs and
     1241 *                      PDPT) have been mapped.
    12341242*/
    1235 IEM_STATIC int iemVmxTransition(PVMCPUCC pVCpu, bool fPdpesMapped)
     1243IEM_STATIC int iemVmxTransition(PVMCPUCC pVCpu, bool fCr3Mapped)
    12361244{
    12371245    /*
     
    12531261    if (rc == VINF_SUCCESS)
    12541262    {
    1255         rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fPdpesMapped);
     1263        rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fCr3Mapped);
    12561264        AssertRCReturn(rc, rc);
    12571265    }
     
    18881896
    18891897/**
    1890  * Checks the host PAE PDPTEs assuming we are switching to a PAE mode host.
    1891  *
    1892  * @param   pVCpu           The cross context virtual CPU structure.
    1893  * @param   uExitReason     The VMX instruction name (for logging purposes).
    1894  *
    1895  * @remarks Caller must ensure the preconditions are met before calling this
    1896  *          function as failure here will trigger VMX aborts!
    1897  */
    1898 IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPUCC pVCpu, uint32_t uExitReason)
    1899 {
    1900     PCVMXVVMCS const   pVmcs      = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    1901     const char * const pszFailure = "VMX-abort";
    1902     int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64HostCr3.u);
    1903     if (RT_SUCCESS(rc))
    1904         return rc;
    1905     IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpte);
    1906 }
    1907 
    1908 
    1909 /**
    19101898 * Loads the host MSRs from the VM-exit MSR-load area as part of VM-exit.
    19111899 *
     
    20192007     * See Intel spec. 26.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
    20202008     */
    2021     bool fPdpesMapped;
     2009    bool fCr3Mapped;
    20222010    if (   (pVmcs->u64HostCr4.u & X86_CR4_PAE)
    20232011        && !fHostInLongMode
     
    20252013            || pVmcs->u64HostCr3.u != pVCpu->cpum.GstCtx.cr3))
    20262014    {
    2027         int const rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
    2028         if (RT_FAILURE(rc))
    2029         {
    2030             Log(("VM-exit attempting to load invalid PDPTEs -> VMX-Abort\n"));
     2015        int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64HostCr3.u);
     2016        if (RT_SUCCESS(rc))
     2017        { /* likely*/ }
     2018        else
     2019        {
     2020            IEM_VMX_VMEXIT_FAILED(pVCpu, uExitReason, "VMX-abort", kVmxVDiag_Vmexit_HostPdpte);
    20312021            return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
    20322022        }
    2033         fPdpesMapped = true;
     2023        fCr3Mapped = true;
    20342024    }
    20352025    else
    2036         fPdpesMapped = false;
     2026        fCr3Mapped = false;
    20372027
    20382028    iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
     
    20512041
    20522042    /* Perform the VMX transition (PGM updates). */
    2053     VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fPdpesMapped);
     2043    VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fCr3Mapped);
    20542044    if (rcStrict == VINF_SUCCESS)
    20552045    { /* likely */ }
     
    56725662 * @returns VBox status code.
    56735663 * @param   pVCpu           The cross context virtual CPU structure.
    5674  * @param   pfPdpesMapped   Where to store whether PAE PDPTEs (and PDPT) have been
    5675  *                          mapped as part of checking guest state.
    56765664 * @param   pszInstr        The VMX instruction name (for logging purposes).
    56775665 */
     
    74947482                            pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;
    74957483
    7496                         /* We would have mapped PAE PDPTEs when PAE paging is used without EPT. */
    7497                         bool const fPdpesMapped = !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
    7498                                                && iemVmxVmcsIsGuestPaePagingEnabled(pVmcs);
     7484                        /* When EPT isn't used, we would have validated and mapped CR3 and PDPTEs when PAE paging is enabled. */
     7485                        bool const fCr3Mapped = !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
     7486                                             && iemVmxVmcsIsGuestPaePagingEnabled(pVmcs);
    74997487
    75007488                        /* Perform the VMX transition (PGM updates). */
    7501                         VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fPdpesMapped);
     7489                        VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fCr3Mapped);
    75027490                        if (rcStrict == VINF_SUCCESS)
    75037491                        { /* likely */ }
     
    84988486                        /* Invalidate mappings for the linear address tagged with VPID. */
    84998487                        /** @todo PGM support for VPID? Currently just flush everything. */
    8500                         PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
     8488                        PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fCr3Mapped */);
    85018489                        iemVmxVmSucceed(pVCpu);
    85028490                    }
     
    85258513                    /* Invalidate all mappings with VPID. */
    85268514                    /** @todo PGM support for VPID? Currently just flush everything. */
    8527                     PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
     8515                    PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fCr3Mapped */);
    85288516                    iemVmxVmSucceed(pVCpu);
    85298517                }
     
    85428530                /* Invalidate all mappings with non-zero VPIDs. */
    85438531                /** @todo PGM support for VPID? Currently just flush everything. */
    8544                 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
     8532                PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fCr3Mapped */);
    85458533                iemVmxVmSucceed(pVCpu);
    85468534                break;
     
    85538541                    /* Invalidate all mappings with VPID except global translations. */
    85548542                    /** @todo PGM support for VPID? Currently just flush everything. */
    8555                     PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
     8543                    PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fCr3Mapped */);
    85568544                    iemVmxVmSucceed(pVCpu);
    85578545                }
  • trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h

    r92541 r92583  
    11171117    if (fUpdateCr3)
    11181118    {
    1119         int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fPdpesMapped*/);
     1119        int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fCr3Mapped*/);
    11201120        if (rc == VINF_SUCCESS)
    11211121        { /* likely */ }
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r92547 r92583  
    23812381 * @param   cr3             The new cr3.
    23822382 * @param   fGlobal         Indicates whether this is a global flush or not.
    2383  * @param   fPdpesMapped    Whether the PAE PDPEs (and PDPT) have been mapped.
    2384  */
    2385 VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal, bool fPdpesMapped)
     2383 * @param   fCr3Mapped      Whether CR3 (and in case of PAE paging, whether PDPEs
     2384 *                          and PDPT) has been mapped.
     2385 */
     2386VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal, bool fCr3Mapped)
    23862387{
    23872388    STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
     
    24052406    RTGCPHYS       GCPhysCR3    = pgmGetGuestMaskedCr3(pVCpu, cr3);
    24062407#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2407     if (   !fPdpesMapped
     2408    if (   !fCr3Mapped
    24082409        && CPUMIsGuestVmxEptPagingEnabled(pVCpu))
    24092410    {
     
    24282429
    24292430        pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
    2430         rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
     2431        rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fCr3Mapped);
    24312432        if (RT_LIKELY(rc == VINF_SUCCESS))
    24322433        { }
     
    24892490 * @param   pVCpu           The cross context virtual CPU structure.
    24902491 * @param   cr3             The new CR3.
    2491  * @param   fPdpesMapped    Whether the PAE PDPEs (and PDPT) have been mapped.
    2492  */
    2493 VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3, bool fPdpesMapped)
     2492 * @param   fCr3Mapped      Whether CR3 (and in case of PAE paging, whether PDPEs
     2493 *                          and PDPT) has been mapped.
     2494 */
     2495VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3, bool fCr3Mapped)
    24942496{
    24952497    VMCPU_ASSERT_EMT(pVCpu);
     
    25042506    RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
    25052507#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2506     if (   !fPdpesMapped
     2508    if (   !fCr3Mapped
    25072509        && CPUMIsGuestVmxEptPagingEnabled(pVCpu))
    25082510    {
     
    25272529
    25282530        pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
    2529         rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
     2531        rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fCr3Mapped);
    25302532
    25312533        AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
     
    26102612            AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
    26112613            pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
    2612             rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */);
     2614            rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fCr3Mapped */);
    26132615        }
    26142616
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r92561 r92583  
    5353PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
    5454#endif
    55 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped);
     55PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fCr3Mapped);
    5656PGM_BTH_DECL(int, UnmapCR3)(PVMCPUCC pVCpu);
    5757
     
    41314131 * @param   GCPhysCR3       The physical address in the CR3 register. (A20 mask
    41324132 *                          already applied.)
    4133  * @param   fPdpesMapped    Whether the PAE PDPEs (and PDPT) have been mapped.
     4133 * @param   fCr3Mapped      Whether CR3 (and in case of PAE paging, whether PDPEs
     4134 *                          and PDPT) has been mapped.
    41344135 */
    4135 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped)
     4136PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fCr3Mapped)
    41364137{
    41374138    PVMCC pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM);
     
    41474148
    41484149# if PGM_GST_TYPE == PGM_TYPE_PAE
    4149     if (!fPdpesMapped)
     4150    if (!fCr3Mapped)
    41504151# else
    4151     NOREF(fPdpesMapped);
     4152    NOREF(fCr3Mapped);
    41524153#endif
    41534154    {
     
    42044205    }
    42054206#else /* prot/real stub */
    4206     NOREF(fPdpesMapped);
     4207    NOREF(fCr3Mapped);
    42074208#endif
    42084209
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r92459 r92583  
    5252    AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
    5353    AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
    54     return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */);
     54    return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fCr3Mapped */);
    5555}
    5656
  • trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h

    r92498 r92583  
    55855585    {
    55865586        Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
    5587         PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */);
     5587        PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fCr3Mapped */);
    55885588        Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    55895589    }
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r92495 r92583  
    29252925    {
    29262926        AssertMsg(pCtx->cr3 == pVmcbGuest->u64CR3, ("cr3=%#RX64 vmcb_cr3=%#RX64\n", pCtx->cr3, pVmcbGuest->u64CR3));
    2927         PGMUpdateCR3(pVCpu, pCtx->cr3, false /* fPdpesMapped */);
     2927        PGMUpdateCR3(pVCpu, pCtx->cr3, false /* fCr3Mapped */);
    29282928    }
    29292929}
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r92495 r92583  
    80588058    {
    80598059        Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
    8060         PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */);
     8060        PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fCr3Mapped */);
    80618061        Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    80628062    }
  • trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp

    r92541 r92583  
    27812781        {
    27822782            LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
    2783             rc = PGMUpdateCR3(pGVCpu, pCtx->cr3, false /*fPdpesMapped*/);
     2783            rc = PGMUpdateCR3(pGVCpu, pCtx->cr3, false /*fCr3Mapped*/);
    27842784            if (rc == VINF_SUCCESS)
    27852785            { /* likely */ }
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r92520 r92583  
    14571457    {
    14581458        CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
    1459         int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */);
     1459        int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fCr3Mapped */);
    14601460        if (RT_FAILURE(rc2))
    14611461            return rc2;
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp

    r92560 r92583  
    11251125    if (fUpdateCr3)
    11261126    {
    1127         int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fPdpesMapped*/);
     1127        int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fCr3Mapped*/);
    11281128        if (rc == VINF_SUCCESS)
    11291129        { /* likely */ }
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp

    r92579 r92583  
    15661566        if (fUpdateCr3)
    15671567        {
    1568             int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fPdpesMapped*/);
     1568            int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fCr3Mapped*/);
    15691569            if (rc == VINF_SUCCESS)
    15701570            { /* likely */ }
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp

    r92468 r92583  
    18961896                {
    18971897                    LogFlow(("nemR3NativeRunGC: calling PGMFlushTLB...\n"));
    1898                     int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /*fGlobal*/, false /*fPdpesMapped*/);
     1898                    int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /*fGlobal*/, false /*fCr3Mapped*/);
    18991899                    AssertRCReturn(rc, rc);
    19001900                    if (rcStrict == VINF_NEM_FLUSH_TLB)
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r92480 r92583  
    26922692    DECLCALLBACKMEMBER(int, pfnPrefetchPage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage));
    26932693    DECLCALLBACKMEMBER(int, pfnVerifyAccessSyncPage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    2694     DECLCALLBACKMEMBER(int, pfnMapCR3,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped));
     2694    DECLCALLBACKMEMBER(int, pfnMapCR3,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fCr3Mapped));
    26952695    DECLCALLBACKMEMBER(int, pfnUnmapCR3,(PVMCPUCC pVCpu));
    26962696    DECLCALLBACKMEMBER(int, pfnEnter,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3));
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette