VirtualBox

Changeset 92566 in vbox


Ignore:
Timestamp:
Nov 23, 2021 3:29:41 PM (3 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
148448
Message:

VMM/IEM: Nested VMX: bugref:10092 We need to declare we have entered/exited VMX non-root mode prior to loading guest state since PGM (and others in future) might need this information while loading guest state.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r92546 r92566  
    676676
    677677/**
     678 * Determines whether the guest is using PAE paging given the VMCS.
     679 *
     680 * @returns @c true if PAE paging mode is used, @c false otherwise.
     681 * @param   pVmcs       Pointer to the virtual VMCS.
     682 */
     683DECL_FORCE_INLINE(bool) iemVmxVmcsIsGuestPaePagingEnabled(PCVMXVVMCS pVmcs)
     684{
     685    return (   !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)
     686            &&  (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
     687            &&  (pVmcs->u64GuestCr0.u & X86_CR0_PG));
     688}
     689
     690
     691/**
    678692 * Sets the Exit qualification VMCS field.
    679693 *
     
    25832597    VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_VMX_ALL_MASK);
    25842598
     2599    /*
     2600     * We're no longer in nested-guest execution mode.
     2601     *
     2602     * It is important to do this prior to loading the host state because
     2603     * PGM looks at fInVmxNonRootMode to determine if it needs to perform
     2604     * second-level address translation while switching to host CR3.
     2605     */
     2606    pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
     2607
    25852608    /* Restore the host (outer guest) state. */
    25862609    VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
     
    25922615    else
    25932616        Log3(("vmexit: Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
    2594 
    2595     /* We're no longer in nested-guest execution mode. */
    2596     pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
    25972617
    25982618    /* Notify HM that the current VMCS fields have been modified. */
     
    56035623 *
    56045624 * @param   pVCpu           The cross context virtual CPU structure.
    5605  * @param   pfPdpesMapped   Where to store whether PAE PDPTEs (and PDPT) have been
    5606  *                          mapped as part of checking guest state.
    56075625 * @param   pszInstr        The VMX instruction name (for logging purposes).
    56085626 */
    5609 IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, bool *pfPdpesMapped, const char *pszInstr)
     5627IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, const char *pszInstr)
    56105628{
    56115629    /*
     
    56155633    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    56165634    const char * const pszFailure = "VM-exit";
    5617     *pfPdpesMapped = false;
    5618 
    5619     if (   !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)
    5620         &&  (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
    5621         &&  (pVmcs->u64GuestCr0.u & X86_CR0_PG))
    5622     {
     5635
    56235636#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    5624         if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
    5625         {
    5626             /* Get PDPTEs from the VMCS. */
    5627             X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES];
    5628             aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u;
    5629             aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u;
    5630             aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u;
    5631             aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u;
    5632 
    5633             /* Check validity of the PDPTEs. */
    5634             bool const fValid = PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]);
    5635             if (fValid)
    5636             { /* likely */ }
    5637             else
    5638             {
    5639                 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
    5640                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
    5641             }
    5642         }
    5643         else
     5637    /*
     5638     * When EPT is used, we only validate the PAE PDPTEs provided in the VMCS.
     5639     * Otherwise, we load any PAE PDPTEs referenced by CR3 at a later point.
     5640     */
     5641    if (   iemVmxVmcsIsGuestPaePagingEnabled(pVmcs)
     5642        && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT))
     5643    {
     5644        /* Get PDPTEs from the VMCS. */
     5645        X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES];
     5646        aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u;
     5647        aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u;
     5648        aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u;
     5649        aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u;
     5650
     5651        /* Check validity of the PDPTEs. */
     5652        bool const fValid = PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]);
     5653        if (fValid)
     5654        { /* likely */ }
     5655        else
     5656        {
     5657            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
     5658            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
     5659        }
     5660    }
    56445661#endif
    5645         {
    5646             int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u);
    5647             if (RT_SUCCESS(rc))
    5648                 *pfPdpesMapped = true;
    5649             else
    5650             {
    5651                 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
    5652                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
    5653             }
    5654         }
    5655     }
    56565662
    56575663    NOREF(pszInstr);
     
    56705676 * @param   pszInstr        The VMX instruction name (for logging purposes).
    56715677 */
    5672 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, bool *pfPdpesMapped, const char *pszInstr)
     5678IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, const char *pszInstr)
    56735679{
    56745680    int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
     
    56865692                    rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
    56875693                    if (RT_SUCCESS(rc))
    5688                         return iemVmxVmentryCheckGuestPdptes(pVCpu, pfPdpesMapped, pszInstr);
     5694                        return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
    56895695                }
    56905696            }
     
    66296635 * @returns VBox status code.
    66306636 * @param   pVCpu   The cross context virtual CPU structure.
     6637 * @param   pszInstr    The VMX instruction name (for logging purposes).
    66316638 *
    66326639 * @remarks This must be called only after loading the nested-guest register state
    66336640 *          (especially nested-guest RIP).
    66346641 */
    6635 IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPUCC pVCpu)
     6642IEM_STATIC int iemVmxVmentryLoadGuestNonRegState(PVMCPUCC pVCpu, const char *pszInstr)
    66366643{
    66376644    /*
     
    66396646     * See Intel spec. 26.6 "Special Features of VM Entry"
    66406647     */
     6648    const char *const pszFailure  = "VM-exit";
    66416649    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    66426650
     
    66726680
    66736681    /*
    6674      * Load the PAE PDPTEs from the VMCS when using EPT with PAE paging.
    6675      */
    6676     if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
    6677     {
    6678         if (   !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)
    6679             &&  (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
    6680             &&  (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
    6681         {
     6682     * Load the guest's PAE PDPTEs.
     6683     */
     6684    if (iemVmxVmcsIsGuestPaePagingEnabled(pVmcs))
     6685    {
     6686        if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
     6687        {
     6688            /*
     6689             * With EPT, we've already validated these while checking the guest state.
     6690             * Just load them directly from the VMCS here.
     6691             */
    66826692            X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES];
    66836693            aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u;
     
    66896699                pVCpu->cpum.GstCtx.aPaePdpes[i].u = aPaePdptes[i].u;
    66906700        }
    6691 
    6692         /*
    6693          * Set PGM's copy of the EPT pointer.
    6694          * The EPTP has already been validated while checking guest state.
    6695          */
     6701        else
     6702        {
     6703            /*
     6704             * Without EPT, we must load the PAE PDPTEs referenced by CR3.
     6705             * This involves loading (and mapping) CR3 and validating them now.
     6706             */
     6707            int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u);
     6708            if (RT_SUCCESS(rc))
     6709            { /* likely */ }
     6710            else
     6711            {
     6712                iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
     6713                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
     6714            }
     6715        }
     6716    }
     6717
     6718    /*
     6719     * Set PGM's copy of the EPT pointer.
     6720     * The EPTP has already been validated while checking guest state.
     6721     */
     6722    if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
    66966723        PGMSetGuestEptPtr(pVCpu, pVmcs->u64EptPtr.u);
    6697     }
    66986724
    66996725    /* VPID is irrelevant. We don't support VPID yet. */
     
    67016727    /* Clear address-range monitoring. */
    67026728    EMMonitorWaitClear(pVCpu);
     6729
     6730    return VINF_SUCCESS;
    67036731}
    67046732
     
    69206948
    69216949    /* Load guest non-register state (such as interrupt shadows, NMI blocking etc). */
    6922     iemVmxVmentryLoadGuestNonRegState(pVCpu);
     6950    int rc = iemVmxVmentryLoadGuestNonRegState(pVCpu, pszInstr);
     6951    if (rc == VINF_SUCCESS)
     6952    { /* likely */ }
     6953    else
     6954        return rc;
    69236955
    69246956    /* Load VMX related structures and state referenced by the VMCS. */
    6925     int rc = iemVmxVmentryLoadGuestVmcsRefState(pVCpu, pszInstr);
     6957    rc = iemVmxVmentryLoadGuestVmcsRefState(pVCpu, pszInstr);
    69266958    if (rc == VINF_SUCCESS)
    69276959    { /* likely */ }
     
    74337465            iemVmxVmentrySaveNmiBlockingFF(pVCpu);
    74347466
    7435             bool fPdpesMapped;
    7436             rc = iemVmxVmentryCheckGuestState(pVCpu, &fPdpesMapped, pszInstr);
     7467            rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
    74377468            if (RT_SUCCESS(rc))
    74387469            {
     7470                /*
     7471                 * We've now entered nested-guest execution.
     7472                 *
     7473                 * It is important do this prior to loading the guest state because
     7474                 * as part of loading the guest state, PGM (and perhaps other components
     7475                 * in the future) relies on detecting whether VMX non-root mode has been
     7476                 * entered.
     7477                 */
     7478                pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
     7479
    74397480                rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
    74407481                if (RT_SUCCESS(rc))
     
    74487489                        if (uInstrId == VMXINSTRID_VMLAUNCH)
    74497490                            pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;
     7491
     7492                        /* We would have mapped PAE PDPTEs when PAE paging is used without EPT. */
     7493                        bool const fPdpesMapped = !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
     7494                                               && iemVmxVmcsIsGuestPaePagingEnabled(pVmcs);
    74507495
    74517496                        /* Perform the VMX transition (PGM updates). */
     
    74677512                        /* Paranoia. */
    74687513                        Assert(rcStrict == VINF_SUCCESS);
    7469 
    7470                         /* We've now entered nested-guest execution. */
    7471                         pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
    74727514
    74737515                        /*
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette