VirtualBox

Changeset 97037 in vbox


Ignore:
Timestamp:
Oct 7, 2022 8:05:52 AM (2 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
153983
Message:

VMM/IEM: Nested VMX: bugref:10092 Fix for PAE nested-guests.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp

    r97020 r97037  
    696696 * @returns @c true if PAE paging mode is used, @c false otherwise.
    697697 * @param   pVmcs       Pointer to the virtual VMCS.
     698 *
     699 * @warning Only use this prior to switching the guest-CPU state with the
     700 *          nested-guest CPU state!
    698701 */
    699702DECL_FORCE_INLINE(bool) iemVmxVmcsIsGuestPaePagingEnabled(PCVMXVVMCS pVmcs)
     
    15071510
    15081511    /*
    1509      * PAE PDPTEs.
    1510      *
    1511      * If EPT is enabled and PAE paging was used at the time of the VM-exit,
    1512      * the PDPTEs are saved from the VMCS. Otherwise they're undefined but
    1513      * we zero them for consistency.
    1514      */
    1515     if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
    1516     {
    1517         if (   !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)
    1518             &&  (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
    1519             &&  (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
    1520         {
    1521             pVmcs->u64GuestPdpte0.u = pVCpu->cpum.GstCtx.aPaePdpes[0].u;
    1522             pVmcs->u64GuestPdpte1.u = pVCpu->cpum.GstCtx.aPaePdpes[1].u;
    1523             pVmcs->u64GuestPdpte2.u = pVCpu->cpum.GstCtx.aPaePdpes[2].u;
    1524             pVmcs->u64GuestPdpte3.u = pVCpu->cpum.GstCtx.aPaePdpes[3].u;
    1525         }
    1526         else
    1527         {
    1528             pVmcs->u64GuestPdpte0.u = 0;
    1529             pVmcs->u64GuestPdpte1.u = 0;
    1530             pVmcs->u64GuestPdpte2.u = 0;
    1531             pVmcs->u64GuestPdpte3.u = 0;
    1532         }
    1533 
    1534         /* Clear PGM's copy of the EPT pointer for added safety. */
    1535         PGMSetGuestEptPtr(pVCpu, 0 /* uEptPtr */);
    1536     }
    1537     else
    1538     {
     1512     * Save the guest PAE PDPTEs.
     1513     */
     1514    if (   !CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx)
     1515        || !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT))
     1516    {
     1517        /*
     1518         * Without EPT or when the nested-guest is not using PAE paging, the values saved
     1519         * in the VMCS during VM-exit are undefined. We zero them here for consistency.
     1520         */
    15391521        pVmcs->u64GuestPdpte0.u = 0;
    15401522        pVmcs->u64GuestPdpte1.u = 0;
     
    15421524        pVmcs->u64GuestPdpte3.u = 0;
    15431525    }
     1526    else
     1527    {
     1528        /*
     1529         * With EPT and when the nested-guest is using PAE paging, we update the PDPTEs from
     1530         * the nested-guest CPU context. Both IEM (Mov CRx) and hardware-assisted execution
     1531         * of the nested-guest is expected to have updated them.
     1532         */
     1533        pVmcs->u64GuestPdpte0.u = pVCpu->cpum.GstCtx.aPaePdpes[0].u;
     1534        pVmcs->u64GuestPdpte1.u = pVCpu->cpum.GstCtx.aPaePdpes[1].u;
     1535        pVmcs->u64GuestPdpte2.u = pVCpu->cpum.GstCtx.aPaePdpes[2].u;
     1536        pVmcs->u64GuestPdpte3.u = pVCpu->cpum.GstCtx.aPaePdpes[3].u;
     1537    }
     1538
     1539    /* Clear PGM's copy of the EPT pointer for added safety. */
     1540    if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
     1541        PGMSetGuestEptPtr(pVCpu, 0 /* uEptPtr */);
    15441542}
    15451543
     
    59785976
    59795977    /*
    5980      * When EPT is used, we only validate the PAE PDPTEs provided in the VMCS.
     5978     * When EPT is used, we need to validate the PAE PDPTEs provided in the VMCS.
    59815979     * Otherwise, we load any PAE PDPTEs referenced by CR3 at a later point.
    59825980     */
     
    59925990
    59935991        /* Check validity of the PDPTEs. */
    5994         bool const fValid = PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]);
    5995         if (fValid)
     5992        if (PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]))
    59965993        { /* likely */ }
    59975994        else
     
    70377034     * Load the guest's PAE PDPTEs.
    70387035     */
    7039     if (iemVmxVmcsIsGuestPaePagingEnabled(pVmcs))
    7040     {
    7041         if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
    7042         {
    7043             /*
    7044              * With EPT, we've already validated these while checking the guest state.
    7045              * Just load them directly from the VMCS here.
    7046              */
    7047             X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES];
    7048             aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u;
    7049             aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u;
    7050             aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u;
    7051             aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u;
    7052             AssertCompile(RT_ELEMENTS(aPaePdptes) == RT_ELEMENTS(pVCpu->cpum.GstCtx.aPaePdpes));
    7053             for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.GstCtx.aPaePdpes); i++)
    7054                 pVCpu->cpum.GstCtx.aPaePdpes[i].u = aPaePdptes[i].u;
    7055         }
     7036    if (!iemVmxVmcsIsGuestPaePagingEnabled(pVmcs))
     7037    {
     7038        /*
     7039         * When PAE paging is not used we clear the PAE PDPTEs for safety
     7040         * in case we might be switching from a PAE host to a non-PAE guest.
     7041         */
     7042        pVCpu->cpum.GstCtx.aPaePdpes[0].u = 0;
     7043        pVCpu->cpum.GstCtx.aPaePdpes[1].u = 0;
     7044        pVCpu->cpum.GstCtx.aPaePdpes[2].u = 0;
     7045        pVCpu->cpum.GstCtx.aPaePdpes[3].u = 0;
     7046    }
     7047    else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
     7048    {
     7049        /*
     7050         * With EPT and the nested-guest using PAE paging, we've already validated the PAE PDPTEs
     7051         * while checking the guest state. We can load them into the nested-guest CPU state now.
     7052         * They'll later be used while mapping CR3 and the PAE PDPTEs.
     7053         */
     7054        pVCpu->cpum.GstCtx.aPaePdpes[0].u = pVmcs->u64GuestPdpte0.u;
     7055        pVCpu->cpum.GstCtx.aPaePdpes[1].u = pVmcs->u64GuestPdpte1.u;
     7056        pVCpu->cpum.GstCtx.aPaePdpes[2].u = pVmcs->u64GuestPdpte2.u;
     7057        pVCpu->cpum.GstCtx.aPaePdpes[3].u = pVmcs->u64GuestPdpte3.u;
     7058    }
     7059    else
     7060    {
     7061        /*
     7062         * Without EPT and the nested-guest using PAE paging, we must load the PAE PDPTEs
     7063         * referenced by CR3. This involves loading (and mapping) CR3 and validating them now.
     7064         */
     7065        int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u);
     7066        if (RT_SUCCESS(rc))
     7067        { /* likely */ }
    70567068        else
    70577069        {
    7058             /*
    7059              * Without EPT, we must load the PAE PDPTEs referenced by CR3.
    7060              * This involves loading (and mapping) CR3 and validating them now.
    7061              */
    7062             int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u);
    7063             if (RT_SUCCESS(rc))
    7064             { /* likely */ }
    7065             else
    7066             {
    7067                 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
    7068                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
    7069             }
     7070            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
     7071            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
    70707072        }
    70717073    }
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette