VirtualBox

Changeset 91580 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Oct 6, 2021 7:22:04 AM (3 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:10092 Made changes to PGM++ to handle invalid PAE PDPEs being loaded.

Location:
trunk/src/VBox/VMM
Files:
17 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r91305 r91580  
    23252325
    23262326/**
    2327  * Sets the PAE PDPTEs for the guest.
     2327 * Sets the PAE PDPEs for the guest.
    23282328 *
    23292329 * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    2330  * @param   paPaePdpes  The PAE PDPTEs to set.
     2330 * @param   paPaePdpes  The PAE PDPEs to set.
    23312331 */
    23322332VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes)
     
    23362336        pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u;
    23372337    pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
     2338}
     2339
     2340
     2341/**
     2342 * Gets the PAE PDPTEs for the guest.
     2343 *
     2344 * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
     2345 * @param   paPaePdpes  Where to store the PAE PDPEs.
     2346 */
     2347VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes)
     2348{
     2349    Assert(paPaePdpes);
     2350    CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3);
     2351    for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
     2352        paPaePdpes[i].u = pVCpu->cpum.s.Guest.aPaePdpes[i].u;
    23382353}
    23392354
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r91360 r91580  
    205205    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPatMsr              , "GuestPatMsr"               ),
    206206    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPcide               , "GuestPcide"                ),
    207     VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys    , "GuestPdpteCr3ReadPhys"     ),
    208     VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte0Rsvd          , "GuestPdpte0Rsvd"           ),
    209     VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte1Rsvd          , "GuestPdpte1Rsvd"           ),
    210     VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte2Rsvd          , "GuestPdpte2Rsvd"           ),
    211     VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte3Rsvd          , "GuestPdpte3Rsvd"           ),
     207    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte               , "GuestPdpteRsvd"            ),
    212208    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf    , "GuestPndDbgXcptBsNoTf"     ),
    213209    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf      , "GuestPndDbgXcptBsTf"       ),
     
    362358    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmxRoot                  , "VmxRoot"                   ),
    363359    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Vpid                     , "Vpid"                      ),
    364     VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys      , "HostPdpteCr3ReadPhys"      ),
    365     VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte0Rsvd            , "HostPdpte0Rsvd"            ),
    366     VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte1Rsvd            , "HostPdpte1Rsvd"            ),
    367     VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte2Rsvd            , "HostPdpte2Rsvd"            ),
    368     VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte3Rsvd            , "HostPdpte3Rsvd"            ),
     360    VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte                 , "HostPdpte"                 ),
    369361    VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoad                   , "MsrLoad"                   ),
    370362    VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoadCount              , "MsrLoadCount"              ),
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r91281 r91580  
    43554355
    43564356        /* Inform PGM. */
    4357         rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
     4357        /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
     4358        rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), false /* fPdpesMapped */);
    43584359        AssertRCReturn(rc, rc);
    43594360        /* ignore informational status codes */
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r91297 r91580  
    39183918    if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE))
    39193919    {
    3920         int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
     3920        int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, false /* fPdpesMapped */);
    39213921        AssertRCReturn(rc, rc);
    39223922        /* ignore informational status codes */
     
    56985698
    56995699/**
     5700 * Helper for mapping CR3 and PAE PDPEs for 'mov CRx,GReg'.
     5701 */
     5702#define IEM_MAP_PAE_PDPES_AT_CR3_RET(a_pVCpu, a_iCrReg, a_uCr3) \
     5703    do \
     5704    { \
     5705        int const rcX = PGMGstMapPaePdpesAtCr3(a_pVCpu, a_uCr3); \
     5706        if (RT_SUCCESS(rcX)) \
     5707        { /* likely */ } \
     5708        else \
     5709        { \
     5710            if (rcX == VERR_PGM_PAE_PDPE_RSVD) \
     5711            { \
     5712                Log(("iemCImpl_load_Cr%#x: Trying to load invalid PAE PDPEs\n", a_iCrReg)); \
     5713                return iemRaiseGeneralProtectionFault0(a_pVCpu); \
     5714            } \
     5715            Log(("iemCImpl_load_Cr%#x: PGMGstReadPaePdpesAtCr3 failed %Rrc\n", a_iCrReg, rcX)); \
     5716            return rcX; \
     5717        } \
     5718    } while (0)
     5719
     5720
     5721/**
    57005722 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
    57015723 *
     
    58075829            }
    58085830
    5809             /** @todo check reserved PDPTR bits as AMD states. */
    5810 
    58115831            /*
    58125832             * SVM nested-guest CR0 write intercepts.
     
    58325852
    58335853            /*
    5834              * Change CR0.
    5835              */
    5836             CPUMSetGuestCR0(pVCpu, uNewCrX);
    5837             Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
    5838 
    5839             /*
    58405854             * Change EFER.LMA if entering or leaving long mode.
    58415855             */
     5856            uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;
    58425857            if (   (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
    58435858                && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
    58445859            {
    5845                 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;
    58465860                if (uNewCrX & X86_CR0_PG)
    58475861                    NewEFER |= MSR_K6_EFER_LMA;
     
    58595873                !=  (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) )
    58605874            {
    5861                 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
     5875                bool fPdpesMapped;
     5876                if (    enmAccessCrX != IEMACCESSCRX_MOV_CRX
     5877                    || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER)
     5878                    ||  CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
     5879                    fPdpesMapped = false;
     5880                else
     5881                {
     5882                    IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
     5883                    fPdpesMapped = true;
     5884                }
     5885                rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, fPdpesMapped);
    58625886                AssertRCReturn(rc, rc);
    58635887                /* ignore informational status codes */
    58645888            }
     5889
     5890            /*
     5891             * Change CR0.
     5892             */
     5893            CPUMSetGuestCR0(pVCpu, uNewCrX);
     5894            Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
     5895
    58655896            rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
    58665897            break;
     
    59355966            }
    59365967
    5937             /** @todo If we're in PAE mode we should check the PDPTRs for
    5938              *        invalid bits. */
     5968            /* Inform PGM. */
     5969            if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
     5970            {
     5971                bool fPdpesMapped;
     5972                if (   !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu))
     5973                    ||  CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
     5974                    fPdpesMapped = false;
     5975                else
     5976                {
     5977                    Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
     5978                    IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX);
     5979                    fPdpesMapped = true;
     5980                }
     5981                rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), fPdpesMapped);
     5982                AssertRCReturn(rc, rc);
     5983                /* ignore informational status codes */
     5984            }
    59395985
    59405986            /* Make the change. */
     
    59425988            AssertRCSuccessReturn(rc, rc);
    59435989
    5944             /* Inform PGM. */
    5945             if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
    5946             {
    5947                 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
    5948                 AssertRCReturn(rc, rc);
    5949                 /* ignore informational status codes */
    5950             }
    59515990            rcStrict = VINF_SUCCESS;
    59525991            break;
     
    60186057
    60196058            /*
     6059             * Notify PGM.
     6060             */
     6061            if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
     6062            {
     6063                bool fPdpesMapped;
     6064                if (   !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER)
     6065                    || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
     6066                    fPdpesMapped = false;
     6067                else
     6068                {
     6069                    Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
     6070                    IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
     6071                    fPdpesMapped = true;
     6072                }
     6073                rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, fPdpesMapped);
     6074                AssertRCReturn(rc, rc);
     6075                /* ignore informational status codes */
     6076            }
     6077
     6078            /*
    60206079             * Change it.
    60216080             */
     
    60246083            Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX);
    60256084
    6026             /*
    6027              * Notify SELM and PGM.
    6028              */
    6029             /* SELM - VME may change things wrt to the TSS shadowing. */
    6030             if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
    6031                 Log(("iemCImpl_load_CrX: VME %d -> %d\n", RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
    6032 
    6033             /* PGM - flushing and mode. */
    6034             if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
    6035             {
    6036                 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
    6037                 AssertRCReturn(rc, rc);
    6038                 /* ignore informational status codes */
    6039             }
    60406085            rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
    60416086            break;
     
    66926737
    66936738                /* Invalidate mappings for the linear address tagged with PCID except global translations. */
    6694                 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
     6739                PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */, false /* fPdpesMapped */);
    66956740                break;
    66966741            }
     
    67056750                }
    67066751                /* Invalidate all mappings associated with PCID except global translations. */
    6707                 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
     6752                PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */, false /* fPdpesMapped */);
    67086753                break;
    67096754            }
     
    67116756            case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
    67126757            {
    6713                 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
     6758                PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
    67146759                break;
    67156760            }
     
    67176762            case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
    67186763            {
    6719                 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
     6764                PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */, false /* fPdpesMapped */);
    67206765                break;
    67216766            }
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r91291 r91580  
    8282 *
    8383 * @returns Strict VBox status code.
    84  * @param   pVCpu       The cross context virtual CPU structure.
    85  */
    86 DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu)
     84 * @param   pVCpu           The cross context virtual CPU structure.
     85 * @param   fPdpesMapped    Whether the PAE PDPEs (and PDPT) have been mapped.
     86 */
     87DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu, bool fPdpesMapped)
    8788{
    8889    /*
     
    106107    if (rc == VINF_SUCCESS)
    107108    {
    108         rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
     109        rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fPdpesMapped);
    109110        AssertRCReturn(rc, rc);
    110111    }
     
    307308
    308309            /*
    309              * Reload the guest's "host state".
     310             * If we are switching to PAE mode host, validate the PDPEs first.
     311             * Any invalid PDPEs here causes a VCPU shutdown.
    310312             */
    311             CPUMSvmVmExitRestoreHostState(pVCpu, IEM_GET_CTX(pVCpu));
    312 
    313             /*
    314              * Update PGM, IEM and others of a world-switch.
    315              */
    316             rcStrict = iemSvmWorldSwitch(pVCpu);
    317             if (rcStrict == VINF_SUCCESS)
    318                 rcStrict = VINF_SVM_VMEXIT;
    319             else if (RT_SUCCESS(rcStrict))
     313            PCSVMHOSTSTATE pHostState = &pVCpu->cpum.GstCtx.hwvirt.svm.HostState;
     314            bool const fHostInPaeMode = CPUMIsPaePagingEnabled(pHostState->uCr0, pHostState->uCr4, pHostState->uEferMsr);
     315            if (fHostInPaeMode)
     316                rcStrict = PGMGstMapPaePdpesAtCr3(pVCpu, pHostState->uCr3);
     317            if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    320318            {
    321                 LogFlow(("iemSvmVmexit: Setting passup status from iemSvmWorldSwitch %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    322                 iemSetPassUpStatus(pVCpu, rcStrict);
    323                 rcStrict = VINF_SVM_VMEXIT;
     319                /*
     320                 * Reload the host state.
     321                 */
     322                CPUMSvmVmExitRestoreHostState(pVCpu, IEM_GET_CTX(pVCpu));
     323
     324                /*
     325                 * Update PGM, IEM and others of a world-switch.
     326                 */
     327                rcStrict = iemSvmWorldSwitch(pVCpu, fHostInPaeMode);
     328                if (rcStrict == VINF_SUCCESS)
     329                    rcStrict = VINF_SVM_VMEXIT;
     330                else if (RT_SUCCESS(rcStrict))
     331                {
     332                    LogFlow(("iemSvmVmexit: Setting passup status from iemSvmWorldSwitch %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     333                    iemSetPassUpStatus(pVCpu, rcStrict);
     334                    rcStrict = VINF_SVM_VMEXIT;
     335                }
     336                else
     337                    LogFlow(("iemSvmVmexit: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    324338            }
    325339            else
    326                 LogFlow(("iemSvmVmexit: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     340            {
     341                Log(("iemSvmVmexit: PAE PDPEs invalid while restoring host state. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     342                rcStrict = VINF_EM_TRIPLE_FAULT;
     343            }
    327344        }
    328345        else
    329346        {
    330347            AssertMsgFailed(("iemSvmVmexit: Mapping VMCB at %#RGp failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, VBOXSTRICTRC_VAL(rcStrict)));
    331             rcStrict = VERR_SVM_VMEXIT_FAILED;
     348            rcStrict = VINF_EM_TRIPLE_FAULT;
    332349        }
    333350    }
     
    705722
    706723        /*
     724         * Validate and map PAE PDPEs if the guest will be using PAE paging.
     725         * Invalid PAE PDPEs here causes a #VMEXIT.
     726         */
     727        bool fPdpesMapped;
     728        if (   !pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
     729            && CPUMIsPaePagingEnabled(pVmcbNstGst->u64CR0, pVmcbNstGst->u64CR4, uValidEfer))
     730        {
     731            rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcbNstGst->u64CR3);
     732            if (RT_SUCCESS(rc))
     733                fPdpesMapped = true;
     734            else
     735            {
     736                Log(("iemSvmVmrun: PAE PDPEs invalid -> #VMEXIT\n"));
     737                return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     738            }
     739        }
     740        else
     741            fPdpesMapped = false;
     742
     743        /*
    707744         * Copy the remaining guest state from the VMCB to the guest-CPU context.
    708745         */
     
    742779         * Update PGM, IEM and others of a world-switch.
    743780         */
    744         VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu);
     781        VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu, fPdpesMapped);
    745782        if (rcStrict == VINF_SUCCESS)
    746783        { /* likely */ }
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r91427 r91580  
    11041104
    11051105/**
    1106  * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
    1107  * failure during VM-entry of a nested-guest.
    1108  *
    1109  * @param   iSegReg     The PDPTE entry index.
    1110  */
    1111 IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
    1112 {
    1113     Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
    1114     switch (iPdpte)
    1115     {
    1116         case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
    1117         case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
    1118         case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
    1119         case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
    1120         IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
    1121     }
    1122 }
    1123 
    1124 
    1125 /**
    1126  * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
    1127  * failure during VM-exit of a nested-guest.
    1128  *
    1129  * @param   iSegReg     The PDPTE entry index.
    1130  */
    1131 IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
    1132 {
    1133     Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
    1134     switch (iPdpte)
    1135     {
    1136         case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
    1137         case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
    1138         case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
    1139         case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
    1140         IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
    1141     }
    1142 }
    1143 
    1144 
    1145 /**
    11461106 * Saves the guest control registers, debug registers and some MSRs are part of
    11471107 * VM-exit.
     
    12541214
    12551215/**
    1256  * Perform a VMX transition updated PGM, IEM and CPUM.
    1257  *
    1258  * @param   pVCpu   The cross context virtual CPU structure.
    1259  */
    1260 IEM_STATIC int iemVmxWorldSwitch(PVMCPUCC pVCpu)
     1216 * Performs the VMX transition to/from VMX non-root mode.
     1217 *
     1218 * @param   pVCpu           The cross context virtual CPU structure.
     1219 * @param   fPdpesMapped    Whether the PAE PDPTEs (and PDPT) have been mapped.
     1220*/
     1221IEM_STATIC int iemVmxTransition(PVMCPUCC pVCpu, bool fPdpesMapped)
    12611222{
    12621223    /*
     
    12801241    if (rc == VINF_SUCCESS)
    12811242    {
    1282         rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
     1243        rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fPdpesMapped);
    12831244        AssertRCReturn(rc, rc);
    12841245    }
     
    19031864
    19041865/**
    1905  * Checks host PDPTes as part of VM-exit.
     1866 * Checks the host PAE PDPTEs assuming we are switching to a PAE mode host.
    19061867 *
    19071868 * @param   pVCpu           The cross context virtual CPU structure.
    1908  * @param   uExitReason     The VM-exit reason (for logging purposes).
     1869 * @param   uExitReason     The VMX instruction name (for logging purposes).
     1870 *
     1871 * @remarks Caller must ensure the preconditions are met before calling this
     1872 *          function as failure here will trigger VMX aborts!
    19091873 */
    19101874IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPUCC pVCpu, uint32_t uExitReason)
    19111875{
    1912     /*
    1913      * Check host PDPTEs.
    1914      * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
    1915      */
    1916     PCVMXVVMCS const    pVmcs           = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    1917     const char * const  pszFailure      = "VMX-abort";
    1918     bool const          fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    1919 
    1920     if (   (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
    1921         && !fHostInLongMode)
    1922     {
    1923         uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
    1924         X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
    1925         int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
    1926         if (RT_SUCCESS(rc))
    1927         {
    1928             uint8_t idxInvalid;
    1929             bool const fValid = CPUMArePaePdpesValid(&aPdptes[0], &idxInvalid);
    1930             if (fValid)
    1931             { /* likely */ }
    1932             else
    1933             {
    1934                 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(idxInvalid);
    1935                 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
    1936             }
    1937         }
    1938         else
    1939             IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
    1940     }
    1941 
    1942     NOREF(pszFailure);
    1943     NOREF(uExitReason);
    1944     return VINF_SUCCESS;
     1876    PCVMXVVMCS const   pVmcs      = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     1877    const char * const pszFailure = "VMX-abort";
     1878    int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64HostCr3.u);
     1879    if (RT_SUCCESS(rc))
     1880        return rc;
     1881    IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpte);
    19451882}
    19461883
     
    19501887 *
    19511888 * @returns VBox status code.
    1952  * @param   pVCpu       The cross context virtual CPU structure.
    1953  * @param   pszInstr    The VMX instruction name (for logging purposes).
     1889 * @param   pVCpu           The cross context virtual CPU structure.
     1890 * @param   uExitReason     The VMX instruction name (for logging purposes).
    19541891 */
    19551892IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPUCC pVCpu, uint32_t uExitReason)
     
    20541991    }
    20551992
     1993    /*
     1994     * Check host PAE PDPTEs prior to loading the host state.
     1995     * See Intel spec. 26.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
     1996     */
     1997    bool fPdpesMapped;
     1998    if (   (pVmcs->u64HostCr4.u & X86_CR4_PAE)
     1999        && !fHostInLongMode
     2000        && (   !CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx)
     2001            || pVmcs->u64HostCr3.u != pVCpu->cpum.GstCtx.cr3))
     2002    {
     2003        int const rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
     2004        if (RT_FAILURE(rc))
     2005        {
     2006            Log(("VM-exit attempting to load invalid PDPTEs -> VMX-Abort\n"));
     2007            return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
     2008        }
     2009        fPdpesMapped = true;
     2010    }
     2011    else
     2012        fPdpesMapped = false;
     2013
    20562014    iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
    20572015    iemVmxVmexitLoadHostSegRegs(pVCpu);
     
    20692027
    20702028    /* Perform the VMX transition (PGM updates). */
    2071     VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
     2029    VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fPdpesMapped);
    20722030    if (rcStrict == VINF_SUCCESS)
    2073     {
    2074         /* Check host PDPTEs (only when we've fully switched page tables_. */
    2075         /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
    2076         int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
    2077         if (RT_FAILURE(rc))
    2078         {
    2079             Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
    2080             return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
    2081         }
    2082     }
     2031    { /* likely */ }
    20832032    else if (RT_SUCCESS(rcStrict))
    20842033    {
    2085         Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
     2034        Log3(("VM-exit: iemVmxTransition returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
    20862035              uExitReason));
    20872036        rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
     
    20892038    else
    20902039    {
    2091         Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
     2040        Log3(("VM-exit: iemVmxTransition failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
    20922041        return VBOXSTRICTRC_VAL(rcStrict);
    20932042    }
     
    56435592 * Checks guest PDPTEs as part of VM-entry.
    56445593 *
    5645  * @param   pVCpu       The cross context virtual CPU structure.
    5646  * @param   pszInstr    The VMX instruction name (for logging purposes).
    5647  */
    5648 IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, const char *pszInstr)
     5594 * @param   pVCpu           The cross context virtual CPU structure.
     5595 * @param   pfPdpesMapped   Where to store whether PAE PDPTEs (and PDPT) have been
     5596 *                          mapped as part of checking guest state.
     5597 * @param   pszInstr        The VMX instruction name (for logging purposes).
     5598 */
     5599IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, bool *pfPdpesMapped, const char *pszInstr)
    56495600{
    56505601    /*
     
    56545605    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    56555606    const char * const pszFailure = "VM-exit";
     5607    *pfPdpesMapped = false;
    56565608
    56575609    if (   !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)
     
    56595611        &&  (pVmcs->u64GuestCr0.u & X86_CR0_PG))
    56605612    {
    5661         /* Get the PDPTEs. */
    5662         X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
    56635613#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    56645614        if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
    56655615        {
    5666             aPdptes[0].u = pVmcs->u64GuestPdpte0.u;
    5667             aPdptes[1].u = pVmcs->u64GuestPdpte1.u;
    5668             aPdptes[2].u = pVmcs->u64GuestPdpte2.u;
    5669             aPdptes[3].u = pVmcs->u64GuestPdpte3.u;
    5670         }
    5671         else
    5672 #endif
    5673         {
    5674             uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
    5675             int const rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
    5676             if (RT_FAILURE(rc))
     5616            /* Get PDPTEs from the VMCS. */
     5617            X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES];
     5618            aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u;
     5619            aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u;
     5620            aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u;
     5621            aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u;
     5622
     5623            /* Check validity of the PDPTEs. */
     5624            bool const fValid = PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]);
     5625            if (fValid)
     5626            { /* likely */ }
     5627            else
    56775628            {
    56785629                iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
    5679                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
     5630                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
    56805631            }
    56815632        }
    5682 
    5683         /* Check validity of the PDPTEs. */
    5684         uint8_t idxInvalid;
    5685         bool const fValid = CPUMArePaePdpesValid(&aPdptes[0], &idxInvalid);
    5686         if (fValid)
    5687         { /* likely */ }
    5688         else
    5689         {
    5690             VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(idxInvalid);
    5691             iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
    5692             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
     5633        else
     5634#endif
     5635        {
     5636            int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u);
     5637            if (rc == VINF_SUCCESS)
     5638                *pfPdpesMapped = true;
     5639            else
     5640            {
     5641                iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
     5642                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
     5643            }
    56935644        }
    56945645    }
     
    57045655 *
    57055656 * @returns VBox status code.
    5706  * @param   pVCpu       The cross context virtual CPU structure.
    5707  * @param   pszInstr    The VMX instruction name (for logging purposes).
    5708  */
    5709 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, const char *pszInstr)
     5657 * @param   pVCpu           The cross context virtual CPU structure.
     5658 * @param   pfPdpesMapped   Where to store whether PAE PDPTEs (and PDPT) have been
     5659 *                          mapped as part of checking guest state.
     5660 * @param   pszInstr        The VMX instruction name (for logging purposes).
     5661 */
     5662IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, bool *pfPdpesMapped, const char *pszInstr)
    57105663{
    57115664    int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
     
    57235676                    rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
    57245677                    if (RT_SUCCESS(rc))
    5725                         return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
     5678                        return iemVmxVmentryCheckGuestPdptes(pVCpu, pfPdpesMapped, pszInstr);
    57265679                }
    57275680            }
     
    74197372            iemVmxVmentrySaveNmiBlockingFF(pVCpu);
    74207373
    7421             rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
     7374            bool fPdpesMapped;
     7375            rc = iemVmxVmentryCheckGuestState(pVCpu, &fPdpesMapped, pszInstr);
    74227376            if (RT_SUCCESS(rc))
    74237377            {
     
    74357389
    74367390                        /* Perform the VMX transition (PGM updates). */
    7437                         VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
     7391                        VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fPdpesMapped);
    74387392                        if (rcStrict == VINF_SUCCESS)
    74397393                        { /* likely */ }
    74407394                        else if (RT_SUCCESS(rcStrict))
    74417395                        {
    7442                             Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
     7396                            Log3(("%s: iemVmxTransition returns %Rrc -> Setting passup status\n", pszInstr,
    74437397                                  VBOXSTRICTRC_VAL(rcStrict)));
    74447398                            rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
     
    74467400                        else
    74477401                        {
    7448                             Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
     7402                            Log3(("%s: iemVmxTransition failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
    74497403                            return rcStrict;
    74507404                        }
     
    84378391                        /* Invalidate mappings for the linear address tagged with VPID. */
    84388392                        /** @todo PGM support for VPID? Currently just flush everything. */
    8439                         PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
     8393                        PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
    84408394                        iemVmxVmSucceed(pVCpu);
    84418395                    }
     
    84648418                    /* Invalidate all mappings with VPID. */
    84658419                    /** @todo PGM support for VPID? Currently just flush everything. */
    8466                     PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
     8420                    PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
    84678421                    iemVmxVmSucceed(pVCpu);
    84688422                }
     
    84818435                /* Invalidate all mappings with non-zero VPIDs. */
    84828436                /** @todo PGM support for VPID? Currently just flush everything. */
    8483                 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
     8437                PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
    84848438                iemVmxVmSucceed(pVCpu);
    84858439                break;
     
    84928446                    /* Invalidate all mappings with VPID except global translations. */
    84938447                    /** @todo PGM support for VPID? Currently just flush everything. */
    8494                     PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
     8448                    PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
    84958449                    iemVmxVmSucceed(pVCpu);
    84968450                }
  • trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h

    r91281 r91580  
    492492            return rc;
    493493        if (rc == VERR_NEM_FLUSH_TLB)
    494             return PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/);
     494        {
     495            rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/, false /*fPdpesMapped*/);
     496            return rc;
     497        }
    495498        AssertLogRelRCReturn(rc, rc);
    496499        return rc;
     
    11041107    if (fUpdateCr3)
    11051108    {
    1106         int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3);
    1107         AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
     1109        int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fPdpesMapped*/);
     1110        if (rc == VINF_SUCCESS)
     1111        { /* likely */ }
     1112        else
     1113            AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
    11081114    }
    11091115
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r91345 r91580  
    21872187
    21882188/**
     2189 * Checks whether the given PAE PDPEs are potentially valid for the guest.
     2190 *
     2191 * @returns @c true if the PDPE is valid, @c false otherwise.
     2192 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     2193 * @param   paPaePdpes  The PAE PDPEs to validate.
     2194 *
     2195 * @remarks This function -only- checks the reserved bits in the PDPE entries.
     2196 */
     2197VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
     2198{
     2199    Assert(paPaePdpes);
     2200    for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
     2201    {
     2202        X86PDPE const PaePdpe = paPaePdpes[i];
     2203        if (   !(PaePdpe.u & X86_PDPE_P)
     2204            || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
     2205        { /* likely */ }
     2206        else
     2207            return false;
     2208    }
     2209    return true;
     2210}
     2211
     2212
     2213/**
    21892214 * Performs the lazy mapping of the 32-bit guest PD.
    21902215 *
     
    23802405 * @param   pVCpu   The cross context virtual CPU structure.
    23812406 */
    2382 static void pgmGstUpdatePaePdpes(PVMCPU pVCpu)
     2407static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
    23832408{
    23842409    for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
     
    23922417
    23932418/**
     2419 * Gets the PGM CR3 value masked according to the current guest mode.
     2420 *
     2421 * @returns The masked PGM CR3 value.
     2422 * @param   pVCpu       The cross context virtual CPU structure.
     2423 * @param   uCr3    The raw guest CR3 value.
     2424 */
     2425DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
     2426{
     2427    RTGCPHYS GCPhysCR3;
     2428    switch (pVCpu->pgm.s.enmGuestMode)
     2429    {
     2430        case PGMMODE_PAE:
     2431        case PGMMODE_PAE_NX:
     2432            GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAE_PAGE_MASK);
     2433            break;
     2434        case PGMMODE_AMD64:
     2435        case PGMMODE_AMD64_NX:
     2436            GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_AMD64_PAGE_MASK);
     2437            break;
     2438        default:
     2439            GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAGE_MASK);
     2440            break;
     2441    }
     2442    PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
     2443    return GCPhysCR3;
     2444}
     2445
     2446
     2447/**
    23942448 * Performs and schedules necessary updates following a CR3 load or reload.
    23952449 *
     
    23992453 * @retval  VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
    24002454 *          safely be ignored and overridden since the FF will be set too then.
    2401  * @param   pVCpu       The cross context virtual CPU structure.
    2402  * @param   cr3         The new cr3.
    2403  * @param   fGlobal     Indicates whether this is a global flush or not.
    2404  */
    2405 VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
     2455 * @param   pVCpu           The cross context virtual CPU structure.
     2456 * @param   cr3             The new cr3.
     2457 * @param   fGlobal         Indicates whether this is a global flush or not.
     2458 * @param   fPdpesMapped    Whether the PAE PDPEs (and PDPT) have been mapped.
     2459 */
     2460VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal, bool fPdpesMapped)
    24062461{
    24072462    STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
     
    24232478     */
    24242479    int rc = VINF_SUCCESS;
    2425     RTGCPHYS GCPhysCR3;
    2426     switch (pVCpu->pgm.s.enmGuestMode)
    2427     {
    2428         case PGMMODE_PAE:
    2429         case PGMMODE_PAE_NX:
    2430             GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
    2431             break;
    2432         case PGMMODE_AMD64:
    2433         case PGMMODE_AMD64_NX:
    2434             GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
    2435             break;
    2436         default:
    2437             GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
    2438             break;
    2439     }
    2440     PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
    2441 
    24422480    RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
     2481    RTGCPHYS const GCPhysCR3    = pgmGetGuestMaskedCr3(pVCpu, cr3);
    24432482    if (GCPhysOldCR3 != GCPhysCR3)
    24442483    {
     
    24482487
    24492488        pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
    2450         rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
     2489        rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
    24512490        if (RT_LIKELY(rc == VINF_SUCCESS))
    24522491        {
     
    24942533
    24952534        /*
    2496          * Update PAE PDPTEs.
     2535         * Flush PAE PDPTEs.
    24972536         */
    24982537        if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
    2499             pgmGstUpdatePaePdpes(pVCpu);
     2538            pgmGstFlushPaePdpes(pVCpu);
    25002539    }
    25012540
     
    25202559 *          paging modes).  This can safely be ignored and overridden since the
    25212560 *          FF will be set too then.
    2522  * @param   pVCpu       The cross context virtual CPU structure.
    2523  * @param   cr3         The new cr3.
    2524  */
    2525 VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
     2561 * @param   pVCpu           The cross context virtual CPU structure.
     2562 * @param   cr3             The new CR3.
     2563 * @param   fPdpesMapped    Whether the PAE PDPEs (and PDPT) have been mapped.
     2564 */
     2565VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3, bool fPdpesMapped)
    25262566{
    25272567    VMCPU_ASSERT_EMT(pVCpu);
     
    25372577     */
    25382578    int rc = VINF_SUCCESS;
    2539     RTGCPHYS GCPhysCR3;
    2540     switch (pVCpu->pgm.s.enmGuestMode)
    2541     {
    2542         case PGMMODE_PAE:
    2543         case PGMMODE_PAE_NX:
    2544             GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
    2545             break;
    2546         case PGMMODE_AMD64:
    2547         case PGMMODE_AMD64_NX:
    2548             GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
    2549             break;
    2550         default:
    2551             GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
    2552             break;
    2553     }
    2554     PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
    2555 
     2579    RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
    25562580    if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
    25572581    {
     
    25612585
    25622586        pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
    2563         rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
     2587        rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
    25642588
    25652589        AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
    25662590    }
    25672591    /*
    2568      * Update PAE PDPTEs.
     2592     * Flush PAE PDPTEs.
    25692593     */
    25702594    else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
    2571         pgmGstUpdatePaePdpes(pVCpu);
     2595        pgmGstFlushPaePdpes(pVCpu);
    25722596
    25732597    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
     
    26362660        pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
    26372661
    2638         RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
    2639         RTGCPHYS GCPhysCR3;
    2640         switch (pVCpu->pgm.s.enmGuestMode)
    2641         {
    2642             case PGMMODE_PAE:
    2643             case PGMMODE_PAE_NX:
    2644                 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
    2645                 break;
    2646             case PGMMODE_AMD64:
    2647             case PGMMODE_AMD64_NX:
    2648                 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
    2649                 break;
    2650             default:
    2651                 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
    2652                 break;
    2653         }
    2654         PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
    2655 
     2662        RTGCPHYS const GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
     2663        RTGCPHYS const GCPhysCR3    = pgmGetGuestMaskedCr3(pVCpu, cr3);
    26562664        if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
    26572665        {
     
    26602668            AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
    26612669            pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
    2662             rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
     2670            rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */);
    26632671        }
    26642672
     
    27242732        PGM_INVL_VCPU_TLBS(pVCpu);
    27252733    return rc;
     2734}
     2735
     2736
     2737/**
     2738 * Maps all the PAE PDPE entries.
     2739 *
     2740 * @returns VBox status code.
     2741 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     2742 * @param   paPaePdpes  The new PAE PDPE values.
     2743 *
     2744 * @remarks This function may be invoked during the process of changing the guest
     2745 *          paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
     2746 *          reflect PAE paging just yet.
     2747 */
     2748VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
     2749{
     2750    Assert(paPaePdpes);
     2751    for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
     2752    {
     2753        X86PDPE const PaePdpe = paPaePdpes[i];
     2754
     2755        /*
     2756         * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
     2757         * are deferred.[1] Also, different situations require different handling of invalid
     2758         * PDPE entries. Here we assume the caller has already validated or doesn't require
     2759         * validation of the PDPEs.
     2760         *
     2761         * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
     2762         */
     2763        if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
     2764        {
     2765            PVMCC   pVM = pVCpu->CTX_SUFF(pVM);
     2766            RTHCPTR HCPtr;
     2767            RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
     2768
     2769            PGM_LOCK_VOID(pVM);
     2770            PPGMPAGE    pPage  = pgmPhysGetPage(pVM, GCPhys);
     2771            AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
     2772            int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
     2773            PGM_UNLOCK(pVM);
     2774            if (RT_SUCCESS(rc))
     2775            {
     2776#  ifdef IN_RING3
     2777                pVCpu->pgm.s.apGstPaePDsR3[i]    = (PX86PDPAE)HCPtr;
     2778                pVCpu->pgm.s.apGstPaePDsR0[i]    = NIL_RTR0PTR;
     2779#  else
     2780                pVCpu->pgm.s.apGstPaePDsR3[i]    = NIL_RTR3PTR;
     2781                pVCpu->pgm.s.apGstPaePDsR0[i]    = (PX86PDPAE)HCPtr;
     2782#  endif
     2783                pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
     2784                continue;
     2785            }
     2786            AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
     2787        }
     2788        pVCpu->pgm.s.apGstPaePDsR3[i]    = 0;
     2789        pVCpu->pgm.s.apGstPaePDsR0[i]    = 0;
     2790        pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
     2791    }
     2792
     2793    return VINF_SUCCESS;
     2794}
     2795
     2796
     2797/**
     2798 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
     2799 *
     2800 * @returns VBox status code.
     2801 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
     2802 * @param   cr3     The guest CR3 value.
     2803 *
     2804 * @remarks This function may be invoked during the process of changing the guest
     2805 *          paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
     2806 *          PAE paging just yet.
     2807 */
     2808VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
     2809{
     2810    /*
     2811     * Read the page-directory-pointer table (PDPT) at CR3.
     2812     */
     2813    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     2814    RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
     2815    PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
     2816
     2817    PGM_LOCK_VOID(pVM);
     2818    PPGMPAGE pPageCR3  = pgmPhysGetPage(pVM, GCPhysCR3);
     2819    AssertReturnStmt(pPageCR3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
     2820
     2821    X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
     2822    RTHCPTR HCPtrGuestCr3;
     2823    int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3, (void **)&HCPtrGuestCr3);
     2824    PGM_UNLOCK(pVM);
     2825    AssertRCReturn(rc, rc);
     2826    memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
     2827
     2828    /*
     2829     * Validate the page-directory-pointer table entries (PDPE).
     2830     */
     2831    if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
     2832    {
     2833        /*
     2834         * Map the PDPT.
     2835         * We deliberately don't update PGM's GCPhysCR3 here as it's expected
     2836         * that PGMFlushTLB will be called soon and only a change to CR3 then
     2837         * will cause the shadow page tables to be updated.
     2838         */
     2839#  ifdef IN_RING3
     2840        pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
     2841        pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
     2842#  else
     2843        pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
     2844        pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
     2845#  endif
     2846
     2847        /*
     2848         * Update CPUM.
     2849         * We do this prior to mapping the PDPEs to keep the order consistent
     2850         * with what's used in HM. In practice, it doesn't really matter.
     2851         */
     2852        CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
     2853
     2854        /*
     2855         * Map the PDPEs.
     2856         */
     2857        return PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
     2858    }
     2859    return VERR_PGM_PAE_PDPE_RSVD;
    27262860}
    27272861
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r91271 r91580  
    5353PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
    5454#endif
    55 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3);
     55PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped);
    5656PGM_BTH_DECL(int, UnmapCR3)(PVMCPUCC pVCpu);
    5757
     
    42954295 *
    42964296 * @param   pVCpu           The cross context virtual CPU structure.
    4297  * @param   GCPhysCR3       The physical address in the CR3 register.  (A20
    4298  *                          mask already applied.)
     4297 * @param   GCPhysCR3       The physical address in the CR3 register. (A20 mask
     4298 *                          already applied.)
     4299 * @param   fPdpesMapped    Whether the PAE PDPEs (and PDPT) have been mapped.
    42994300 */
    4300 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3)
     4301PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped)
    43014302{
    43024303    PVMCC pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM);
     4304    int rc = VINF_SUCCESS;
    43034305
    43044306    /* Update guest paging info. */
     
    43104312    PGM_A20_ASSERT_MASKED(pVCpu, GCPhysCR3);
    43114313
    4312     /*
    4313      * Map the page CR3 points at.
    4314      */
    4315     RTHCPTR     HCPtrGuestCR3;
    4316     PGM_LOCK_VOID(pVM);
    4317     PPGMPAGE    pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3);
    4318     AssertReturn(pPageCR3, VERR_PGM_INVALID_CR3_ADDR);
    4319     /** @todo this needs some reworking wrt. locking?  */
    4320     int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
    4321     PGM_UNLOCK(pVM);
    4322     if (RT_SUCCESS(rc))
    4323     {
     4314# if PGM_GST_TYPE == PGM_TYPE_PAE
     4315    if (!fPdpesMapped)
     4316# else
     4317    NOREF(fPdpesMapped);
     4318#endif
     4319    {
     4320        /*
     4321         * Map the page CR3 points at.
     4322         */
     4323        RTHCPTR     HCPtrGuestCR3;
     4324        PGM_LOCK_VOID(pVM);
     4325        PPGMPAGE    pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3);
     4326        AssertReturnStmt(pPageCR3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
     4327        /** @todo this needs some reworking wrt. locking?  */
     4328        rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
     4329        PGM_UNLOCK(pVM);
     4330        if (RT_SUCCESS(rc))
     4331        {
    43244332# if PGM_GST_TYPE == PGM_TYPE_32BIT
    43254333#  ifdef IN_RING3
    4326         pVCpu->pgm.s.pGst32BitPdR3 = (PX86PD)HCPtrGuestCR3;
    4327         pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
     4334            pVCpu->pgm.s.pGst32BitPdR3 = (PX86PD)HCPtrGuestCR3;
     4335            pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
    43284336#  else
    4329         pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR3PTR;
    4330         pVCpu->pgm.s.pGst32BitPdR0 = (PX86PD)HCPtrGuestCR3;
     4337            pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR3PTR;
     4338            pVCpu->pgm.s.pGst32BitPdR0 = (PX86PD)HCPtrGuestCR3;
    43314339#  endif
    43324340
    43334341# elif PGM_GST_TYPE == PGM_TYPE_PAE
    43344342#  ifdef IN_RING3
    4335         pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCR3;
    4336         pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
     4343            pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCR3;
     4344            pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
    43374345#  else
    4338         pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
    4339         pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCR3;
    4340 #  endif
    4341 
    4342         /*
    4343          * Map the 4 PDs too.
    4344          */
    4345         X86PDPE aGstPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
    4346         memcpy(&aGstPaePdpes, HCPtrGuestCR3, sizeof(aGstPaePdpes));
    4347         CPUMSetGuestPaePdpes(pVCpu, &aGstPaePdpes[0]);
    4348         for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    4349         {
    4350             X86PDPE PaePdpe = aGstPaePdpes[i];
    4351             if (PaePdpe.u & X86_PDPE_P)
    4352             {
    4353                 RTHCPTR     HCPtr;
    4354                 RTGCPHYS    GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
    4355                 PGM_LOCK_VOID(pVM);
    4356                 PPGMPAGE    pPage  = pgmPhysGetPage(pVM, GCPhys);
    4357                 AssertReturn(pPage, VERR_PGM_INVALID_PDPE_ADDR);
    4358                 int rc2 = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
    4359                 PGM_UNLOCK(pVM);
    4360                 if (RT_SUCCESS(rc2))
    4361                 {
    4362 #  ifdef IN_RING3
    4363                     pVCpu->pgm.s.apGstPaePDsR3[i]     = (PX86PDPAE)HCPtr;
    4364                     pVCpu->pgm.s.apGstPaePDsR0[i]     = NIL_RTR0PTR;
    4365 #  else
    4366                     pVCpu->pgm.s.apGstPaePDsR3[i]     = NIL_RTR3PTR;
    4367                     pVCpu->pgm.s.apGstPaePDsR0[i]     = (PX86PDPAE)HCPtr;
    4368 #  endif
    4369                     pVCpu->pgm.s.aGCPhysGstPaePDs[i]  = GCPhys;
    4370                     continue;
    4371                 }
    4372                 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
    4373             }
    4374 
    4375             pVCpu->pgm.s.apGstPaePDsR3[i]     = 0;
    4376             pVCpu->pgm.s.apGstPaePDsR0[i]     = 0;
    4377             pVCpu->pgm.s.aGCPhysGstPaePDs[i]  = NIL_RTGCPHYS;
    4378         }
     4346            pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
     4347            pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCR3;
     4348#  endif
     4349
     4350            /*
     4351             * Update CPUM and map the 4 PDs too.
     4352             */
     4353            X86PDPE aGstPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
     4354            memcpy(&aGstPaePdpes, HCPtrGuestCR3, sizeof(aGstPaePdpes));
     4355            CPUMSetGuestPaePdpes(pVCpu, &aGstPaePdpes[0]);
     4356            PGMGstMapPaePdpes(pVCpu, &aGstPaePdpes[0]);
    43794357
    43804358# elif PGM_GST_TYPE == PGM_TYPE_AMD64
    43814359#  ifdef IN_RING3
    4382         pVCpu->pgm.s.pGstAmd64Pml4R3 = (PX86PML4)HCPtrGuestCR3;
    4383         pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
     4360            pVCpu->pgm.s.pGstAmd64Pml4R3 = (PX86PML4)HCPtrGuestCR3;
     4361            pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
    43844362#  else
    4385         pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
    4386         pVCpu->pgm.s.pGstAmd64Pml4R0 = (PX86PML4)HCPtrGuestCR3;
    4387 #  endif
    4388 # endif
    4389     }
    4390     else
    4391         AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
    4392 
     4363            pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
     4364            pVCpu->pgm.s.pGstAmd64Pml4R0 = (PX86PML4)HCPtrGuestCR3;
     4365#  endif
     4366# endif
     4367        }
     4368        else
     4369            AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
     4370    }
    43934371#else /* prot/real stub */
    4394     int rc = VINF_SUCCESS;
     4372    NOREF(fPdpesMapped);
    43954373#endif
    43964374
     
    44224400
    44234401    Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
    4424     rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
    4425                       NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/,
    4426                       &pNewShwPageCR3);
    4427     AssertFatalRC(rc);
    4428     rc = VINF_SUCCESS;
     4402    int const rc2 = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, PGMPOOLACCESS_DONTCARE,
     4403                                 PGM_A20_IS_ENABLED(pVCpu), NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/, &pNewShwPageCR3);
     4404    AssertFatalRC(rc2);
    44294405
    44304406    pVCpu->pgm.s.CTX_SUFF(pShwPageCR3) = pNewShwPageCR3;
     
    44444420    Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    44454421#   endif
    4446     rc = pgmMapActivateCR3(pVM, pNewShwPageCR3);
    4447     AssertRCReturn(rc, rc);
     4422    int const rc3 = pgmMapActivateCR3(pVM, pNewShwPageCR3);
     4423    AssertRCReturn(rc3, rc3);
    44484424#  endif
    44494425
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r86487 r91580  
    5353    AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
    5454    AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
    55     return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
     55    return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */);
    5656}
    5757
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r91308 r91580  
    29242924    {
    29252925        AssertMsg(pCtx->cr3 == pVmcbGuest->u64CR3, ("cr3=%#RX64 vmcb_cr3=%#RX64\n", pCtx->cr3, pVmcbGuest->u64CR3));
    2926         PGMUpdateCR3(pVCpu, pCtx->cr3);
     2926        PGMUpdateCR3(pVCpu, pCtx->cr3, false /* fPdpesMapped */);
    29272927    }
    29282928}
     
    39943994    /* Could happen as a result of longjump. */
    39953995    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
    3996         PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
     3996        PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */);
    39973997
    39983998    /* Update pending interrupts into the APIC's IRR. */
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r91358 r91580  
    80538053        {
    80548054            Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
    8055             PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
    8056         }
    8057         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
     8055            PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */);
     8056            Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
     8057        }
    80588058    }
    80598059
     
    1090210902    {
    1090310903        Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
    10904         int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
     10904        int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */);
    1090510905        AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
    1090610906                        ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
  • trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp

    r91323 r91580  
    21902190        {
    21912191            LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
    2192             rc = PGMUpdateCR3(pGVCpu, pCtx->cr3);
    2193             AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
     2192            rc = PGMUpdateCR3(pGVCpu, pCtx->cr3, false /*fPdpesMapped*/);
     2193            if (rc == VINF_SUCCESS)
     2194            { /* likely */ }
     2195            else
     2196                AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
    21942197        }
    21952198        else
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r91271 r91580  
    14521452    {
    14531453        CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
    1454         int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
     1454        int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */);
    14551455        if (RT_FAILURE(rc2))
    14561456            return rc2;
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp

    r88654 r91580  
    17121712                {
    17131713                    LogFlow(("nemR3NativeRunGC: calling PGMFlushTLB...\n"));
    1714                     int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true);
     1714                    int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /*fGlobal*/, false /*fPdpesMapped*/);
    17151715                    AssertRCReturn(rc, rc);
    17161716                    if (rcStrict == VINF_NEM_FLUSH_TLB)
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r91271 r91580  
    23432343    uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
    23442344    if (   idxBth < RT_ELEMENTS(g_aPgmBothModeData)
    2345         && g_aPgmBothModeData[idxBth].pfnMapCR3)
     2345        && g_aPgmBothModeData[idxBth].pfnUnmapCR3)
    23462346    {
    23472347        rc = g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r91450 r91580  
    27982798    DECLCALLBACKMEMBER(int, pfnPrefetchPage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage));
    27992799    DECLCALLBACKMEMBER(int, pfnVerifyAccessSyncPage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    2800     DECLCALLBACKMEMBER(int, pfnMapCR3,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3));
     2800    DECLCALLBACKMEMBER(int, pfnMapCR3,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped));
    28012801    DECLCALLBACKMEMBER(int, pfnUnmapCR3,(PVMCPUCC pVCpu));
    28022802    DECLCALLBACKMEMBER(int, pfnEnter,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3));
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette