VirtualBox

Ignore:
Timestamp:
Oct 6, 2021 7:22:04 AM (3 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:10092 Made changes to PGM++ to handle invalid PAE PDPEs being loaded.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r91427 r91580  
    11041104
    11051105/**
    1106  * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
    1107  * failure during VM-entry of a nested-guest.
    1108  *
    1109  * @param   iSegReg     The PDPTE entry index.
    1110  */
    1111 IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
    1112 {
    1113     Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
    1114     switch (iPdpte)
    1115     {
    1116         case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
    1117         case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
    1118         case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
    1119         case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
    1120         IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
    1121     }
    1122 }
    1123 
    1124 
    1125 /**
    1126  * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
    1127  * failure during VM-exit of a nested-guest.
    1128  *
    1129  * @param   iSegReg     The PDPTE entry index.
    1130  */
    1131 IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
    1132 {
    1133     Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
    1134     switch (iPdpte)
    1135     {
    1136         case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
    1137         case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
    1138         case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
    1139         case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
    1140         IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
    1141     }
    1142 }
    1143 
    1144 
    1145 /**
    11461106 * Saves the guest control registers, debug registers and some MSRs are part of
    11471107 * VM-exit.
     
    12541214
    12551215/**
    1256  * Perform a VMX transition updated PGM, IEM and CPUM.
    1257  *
    1258  * @param   pVCpu   The cross context virtual CPU structure.
    1259  */
    1260 IEM_STATIC int iemVmxWorldSwitch(PVMCPUCC pVCpu)
     1216 * Performs the VMX transition to/from VMX non-root mode.
     1217 *
     1218 * @param   pVCpu           The cross context virtual CPU structure.
     1219 * @param   fPdpesMapped    Whether the PAE PDPTEs (and PDPT) have been mapped.
     1220*/
     1221IEM_STATIC int iemVmxTransition(PVMCPUCC pVCpu, bool fPdpesMapped)
    12611222{
    12621223    /*
     
    12801241    if (rc == VINF_SUCCESS)
    12811242    {
    1282         rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
     1243        rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fPdpesMapped);
    12831244        AssertRCReturn(rc, rc);
    12841245    }
     
    19031864
    19041865/**
    1905  * Checks host PDPTes as part of VM-exit.
     1866 * Checks the host PAE PDPTEs assuming we are switching to a PAE mode host.
    19061867 *
    19071868 * @param   pVCpu           The cross context virtual CPU structure.
    1908  * @param   uExitReason     The VM-exit reason (for logging purposes).
     1869 * @param   uExitReason     The VMX instruction name (for logging purposes).
     1870 *
     1871 * @remarks Caller must ensure the preconditions are met before calling this
     1872 *          function as failure here will trigger VMX aborts!
    19091873 */
    19101874IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPUCC pVCpu, uint32_t uExitReason)
    19111875{
    1912     /*
    1913      * Check host PDPTEs.
    1914      * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
    1915      */
    1916     PCVMXVVMCS const    pVmcs           = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    1917     const char * const  pszFailure      = "VMX-abort";
    1918     bool const          fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    1919 
    1920     if (   (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
    1921         && !fHostInLongMode)
    1922     {
    1923         uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
    1924         X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
    1925         int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
    1926         if (RT_SUCCESS(rc))
    1927         {
    1928             uint8_t idxInvalid;
    1929             bool const fValid = CPUMArePaePdpesValid(&aPdptes[0], &idxInvalid);
    1930             if (fValid)
    1931             { /* likely */ }
    1932             else
    1933             {
    1934                 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(idxInvalid);
    1935                 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
    1936             }
    1937         }
    1938         else
    1939             IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
    1940     }
    1941 
    1942     NOREF(pszFailure);
    1943     NOREF(uExitReason);
    1944     return VINF_SUCCESS;
     1876    PCVMXVVMCS const   pVmcs      = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     1877    const char * const pszFailure = "VMX-abort";
     1878    int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64HostCr3.u);
     1879    if (RT_SUCCESS(rc))
     1880        return rc;
     1881    IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpte);
    19451882}
    19461883
     
    19501887 *
    19511888 * @returns VBox status code.
    1952  * @param   pVCpu       The cross context virtual CPU structure.
    1953  * @param   pszInstr    The VMX instruction name (for logging purposes).
     1889 * @param   pVCpu           The cross context virtual CPU structure.
     1890 * @param   uExitReason     The VMX instruction name (for logging purposes).
    19541891 */
    19551892IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPUCC pVCpu, uint32_t uExitReason)
     
    20541991    }
    20551992
     1993    /*
     1994     * Check host PAE PDPTEs prior to loading the host state.
     1995     * See Intel spec. 26.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
     1996     */
     1997    bool fPdpesMapped;
     1998    if (   (pVmcs->u64HostCr4.u & X86_CR4_PAE)
     1999        && !fHostInLongMode
     2000        && (   !CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx)
     2001            || pVmcs->u64HostCr3.u != pVCpu->cpum.GstCtx.cr3))
     2002    {
     2003        int const rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
     2004        if (RT_FAILURE(rc))
     2005        {
     2006            Log(("VM-exit attempting to load invalid PDPTEs -> VMX-Abort\n"));
     2007            return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
     2008        }
     2009        fPdpesMapped = true;
     2010    }
     2011    else
     2012        fPdpesMapped = false;
     2013
    20562014    iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
    20572015    iemVmxVmexitLoadHostSegRegs(pVCpu);
     
    20692027
    20702028    /* Perform the VMX transition (PGM updates). */
    2071     VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
     2029    VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fPdpesMapped);
    20722030    if (rcStrict == VINF_SUCCESS)
    2073     {
    2074         /* Check host PDPTEs (only when we've fully switched page tables_. */
    2075         /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
    2076         int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
    2077         if (RT_FAILURE(rc))
    2078         {
    2079             Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
    2080             return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
    2081         }
    2082     }
     2031    { /* likely */ }
    20832032    else if (RT_SUCCESS(rcStrict))
    20842033    {
    2085         Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
     2034        Log3(("VM-exit: iemVmxTransition returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
    20862035              uExitReason));
    20872036        rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
     
    20892038    else
    20902039    {
    2091         Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
     2040        Log3(("VM-exit: iemVmxTransition failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
    20922041        return VBOXSTRICTRC_VAL(rcStrict);
    20932042    }
     
    56435592 * Checks guest PDPTEs as part of VM-entry.
    56445593 *
    5645  * @param   pVCpu       The cross context virtual CPU structure.
    5646  * @param   pszInstr    The VMX instruction name (for logging purposes).
    5647  */
    5648 IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, const char *pszInstr)
     5594 * @param   pVCpu           The cross context virtual CPU structure.
     5595 * @param   pfPdpesMapped   Where to store whether PAE PDPTEs (and PDPT) have been
     5596 *                          mapped as part of checking guest state.
     5597 * @param   pszInstr        The VMX instruction name (for logging purposes).
     5598 */
     5599IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, bool *pfPdpesMapped, const char *pszInstr)
    56495600{
    56505601    /*
     
    56545605    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    56555606    const char * const pszFailure = "VM-exit";
     5607    *pfPdpesMapped = false;
    56565608
    56575609    if (   !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)
     
    56595611        &&  (pVmcs->u64GuestCr0.u & X86_CR0_PG))
    56605612    {
    5661         /* Get the PDPTEs. */
    5662         X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
    56635613#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    56645614        if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
    56655615        {
    5666             aPdptes[0].u = pVmcs->u64GuestPdpte0.u;
    5667             aPdptes[1].u = pVmcs->u64GuestPdpte1.u;
    5668             aPdptes[2].u = pVmcs->u64GuestPdpte2.u;
    5669             aPdptes[3].u = pVmcs->u64GuestPdpte3.u;
    5670         }
    5671         else
    5672 #endif
    5673         {
    5674             uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
    5675             int const rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
    5676             if (RT_FAILURE(rc))
     5616            /* Get PDPTEs from the VMCS. */
     5617            X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES];
     5618            aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u;
     5619            aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u;
     5620            aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u;
     5621            aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u;
     5622
     5623            /* Check validity of the PDPTEs. */
     5624            bool const fValid = PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]);
     5625            if (fValid)
     5626            { /* likely */ }
     5627            else
    56775628            {
    56785629                iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
    5679                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
     5630                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
    56805631            }
    56815632        }
    5682 
    5683         /* Check validity of the PDPTEs. */
    5684         uint8_t idxInvalid;
    5685         bool const fValid = CPUMArePaePdpesValid(&aPdptes[0], &idxInvalid);
    5686         if (fValid)
    5687         { /* likely */ }
    5688         else
    5689         {
    5690             VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(idxInvalid);
    5691             iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
    5692             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
     5633        else
     5634#endif
     5635        {
     5636            int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u);
     5637            if (rc == VINF_SUCCESS)
     5638                *pfPdpesMapped = true;
     5639            else
     5640            {
     5641                iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
     5642                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
     5643            }
    56935644        }
    56945645    }
     
    57045655 *
    57055656 * @returns VBox status code.
    5706  * @param   pVCpu       The cross context virtual CPU structure.
    5707  * @param   pszInstr    The VMX instruction name (for logging purposes).
    5708  */
    5709 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, const char *pszInstr)
     5657 * @param   pVCpu           The cross context virtual CPU structure.
     5658 * @param   pfPdpesMapped   Where to store whether PAE PDPTEs (and PDPT) have been
     5659 *                          mapped as part of checking guest state.
     5660 * @param   pszInstr        The VMX instruction name (for logging purposes).
     5661 */
     5662IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, bool *pfPdpesMapped, const char *pszInstr)
    57105663{
    57115664    int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
     
    57235676                    rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
    57245677                    if (RT_SUCCESS(rc))
    5725                         return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
     5678                        return iemVmxVmentryCheckGuestPdptes(pVCpu, pfPdpesMapped, pszInstr);
    57265679                }
    57275680            }
     
    74197372            iemVmxVmentrySaveNmiBlockingFF(pVCpu);
    74207373
    7421             rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
     7374            bool fPdpesMapped;
     7375            rc = iemVmxVmentryCheckGuestState(pVCpu, &fPdpesMapped, pszInstr);
    74227376            if (RT_SUCCESS(rc))
    74237377            {
     
    74357389
    74367390                        /* Perform the VMX transition (PGM updates). */
    7437                         VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
     7391                        VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fPdpesMapped);
    74387392                        if (rcStrict == VINF_SUCCESS)
    74397393                        { /* likely */ }
    74407394                        else if (RT_SUCCESS(rcStrict))
    74417395                        {
    7442                             Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
     7396                            Log3(("%s: iemVmxTransition returns %Rrc -> Setting passup status\n", pszInstr,
    74437397                                  VBOXSTRICTRC_VAL(rcStrict)));
    74447398                            rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
     
    74467400                        else
    74477401                        {
    7448                             Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
     7402                            Log3(("%s: iemVmxTransition failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
    74497403                            return rcStrict;
    74507404                        }
     
    84378391                        /* Invalidate mappings for the linear address tagged with VPID. */
    84388392                        /** @todo PGM support for VPID? Currently just flush everything. */
    8439                         PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
     8393                        PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
    84408394                        iemVmxVmSucceed(pVCpu);
    84418395                    }
     
    84648418                    /* Invalidate all mappings with VPID. */
    84658419                    /** @todo PGM support for VPID? Currently just flush everything. */
    8466                     PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
     8420                    PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
    84678421                    iemVmxVmSucceed(pVCpu);
    84688422                }
     
    84818435                /* Invalidate all mappings with non-zero VPIDs. */
    84828436                /** @todo PGM support for VPID? Currently just flush everything. */
    8483                 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
     8437                PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
    84848438                iemVmxVmSucceed(pVCpu);
    84858439                break;
     
    84928446                    /* Invalidate all mappings with VPID except global translations. */
    84938447                    /** @todo PGM support for VPID? Currently just flush everything. */
    8494                     PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
     8448                    PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */);
    84958449                    iemVmxVmSucceed(pVCpu);
    84968450                }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette