VirtualBox

Changeset 74133 in vbox for trunk/src


Ignore:
Timestamp:
Sep 7, 2018 6:17:07 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
124901
Message:

VMM/IEM: Nested VMX: bugref:9180 vmlaunch/vmresume bits.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r74114 r74133  
    142142    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsAllowed1        , "EntryCtlsAllowed1"       ),
    143143    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsDisallowed0     , "EntryCtlsDisallowed0"    ),
    144     VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostAddrSpace            , "HostAddrSpace"           ),
     144    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryInstrLen            , "EntryInstrLen"           ),
     145    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryInstrLenZero        , "EntryInstrLenZero"       ),
     146    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodePe    , "EntryIntInfoErrCodePe"   ),
     147    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodeVec   , "EntryIntInfoErrCodeVec"  ),
     148    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoTypeVecRsvd  , "EntryIntInfoTypeVecRsvd" ),
     149    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryXcptErrCodeRsvd     , "EntryXcptErrCodeRsvd"    ),
     150    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1         , "ExitCtlsAllowed1"        ),
     151    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0      , "ExitCtlsDisallowed0"     ),
    145152    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr0Fixed0            , "HostCr0Fixed0"           ),
    146153    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr0Fixed1            , "HostCr0Fixed1"           ),
     
    152159    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCsTr                 , "HostCsTr"                ),
    153160    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostEferMsr              , "HostEferMsr"             ),
     161    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostEferMsrRsvd          , "HostEferMsrRsvd"         ),
    154162    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostGuestLongMode        , "HostGuestLongMode"       ),
    155163    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostGuestLongModeNoCpu   , "HostGuestLongModeNoCpu"  ),
     
    162170    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSs                   , "HostSs"                  ),
    163171    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSysenterEspEip       , "HostSysenterEspEip"      ),
    164     VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryInstrLen            , "EntryInstrLen"           ),
    165     VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryInstrLenZero        , "EntryInstrLenZero"       ),
    166     VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodePe    , "EntryIntInfoErrCodePe"   ),
    167     VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodeVec   , "EntryIntInfoErrCodeVec"  ),
    168     VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoTypeVecRsvd  , "EntryIntInfoTypeVecRsvd" ),
    169     VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryXcptErrCodeRsvd     , "EntryXcptErrCodeRsvd"    ),
    170     VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1         , "ExitCtlsAllowed1"        ),
    171     VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0      , "ExitCtlsDisallowed0"     ),
     172    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0Fixed0           , "GuestCr0Fixed0"          ),
     173    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0Fixed1           , "GuestCr0Fixed1"          ),
     174    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0PgPe             , "GuestCr0PgPe"            ),
     175    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr3                 , "GuestCr3"                ),
     176    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr4Fixed0           , "GuestCr4Fixed0"          ),
     177    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr4Fixed1           , "GuestCr4Fixed1"          ),
     178    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestDebugCtl            , "GuestDebugCtl"           ),
     179    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestDr7                 , "GuestDr7"                ),
     180    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestEferMsr             , "GuestEferMsr"            ),
     181    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestEferMsrRsvd         , "GuestEferMsrRsvd"        ),
     182    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPae                 , "GuestPae"                ),
     183    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPatMsr              , "GuestPatMsr"             ),
     184    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPcide               , "GuestPcide"              ),
     185    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSysenterEspEip      , "GuestSysenterEspEip"     ),
    172186    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_LongModeCS               , "LongModeCS"              ),
    173187    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_NmiWindowExit            , "NmiWindowExit"           ),
     
    591605        {
    592606            /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
    593             uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
     607            uCr0Mask &= ~(X86_CR0_PG | X86_CR0_PE);
    594608        }
    595609        else
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r74130 r74133  
    360360    } while (0)
    361361# endif /* !IEM_WITH_CODE_TLB */
    362 
    363 /** The maximum physical address width in bits. */
    364 #define IEM_VMX_MAX_PHYSADDR_WIDTH(a_pVCpu)         (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->cVmxMaxPhysAddrWidth)
    365362
    366363/** Whether a shadow VMCS is present for the given VCPU. */
     
    15381535
    15391536    /* VMCS physical-address width limits. */
    1540     if (GCPhysVmcs >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     1537    if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    15411538    {
    15421539        Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
     
    16971694
    16981695    /* VMCS physical-address width limits. */
    1699     if (GCPhysVmcs >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     1696    if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    17001697    {
    17011698        Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
     
    18641861
    18651862        /* VMXON physical-address width limits. */
    1866         if (GCPhysVmxon >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     1863        if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    18671864        {
    18681865            Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
     
    19591956
    19601957/**
    1961  * Checks host state as part of VM-entry.
     1958 * Checks guest-state as part of VM-entry.
     1959 *
     1960 * @returns VBox status code.
     1961 * @param   pVCpu           The cross context virtual CPU structure.
     1962 * @param   pszInstr        The VMX instruction name (for logging purposes).
     1963 */
     1964IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
     1965{
     1966    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     1967
     1968    /*
     1969     * Guest Control Registers, Debug Registers, and MSRs.
     1970     * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
     1971     */
     1972    bool const fUnrestrictedGuest = pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST;
     1973    /* CR0 reserved bits. */
     1974    {
     1975        /* CR0 MB1 bits. */
     1976        uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
     1977        Assert(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD));
     1978        if (fUnrestrictedGuest)
     1979            u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
     1980        if (~pVmcs->u64GuestCr0.u & u64Cr0Fixed0)
     1981        {
     1982            Log(("%s: Invalid guest CR0 %#RX32 (fixed0) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr0.u));
     1983            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr0Fixed0;
     1984            return VERR_VMX_VMENTRY_FAILED;
     1985        }
     1986
     1987        /* CR0 MBZ bits. */
     1988        uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
     1989        if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
     1990        {
     1991            Log(("%s: Invalid guest CR0 %#RX64 (fixed1) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr0.u));
     1992            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr0Fixed1;
     1993            return VERR_VMX_VMENTRY_FAILED;
     1994        }
     1995
     1996        /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
     1997        if (   !fUnrestrictedGuest
     1998            &&  (pVmcs->u64GuestCr0.u & X86_CR0_PG)
     1999            && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
     2000        {
     2001            Log(("%s: Invalid guest CR0.PG and CR0.PE combination %#RX64 -> VM-exit\n", pszInstr, pVmcs->u64GuestCr0.u));
     2002            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr0PgPe;
     2003            return VERR_VMX_VMENTRY_FAILED;
     2004        }
     2005    }
     2006
     2007    /* CR4 reserved bits. */
     2008    {
     2009        /* CR4 MB1 bits. */
     2010        uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
     2011        if (~pVmcs->u64GuestCr4.u & u64Cr4Fixed0)
     2012        {
     2013            Log(("%s: Invalid host CR4 %#RX64 (fixed0) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr4.u));
     2014            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr4Fixed0;
     2015            return VERR_VMX_VMENTRY_FAILED;
     2016        }
     2017
     2018        /* CR4 MBZ bits. */
     2019        uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
     2020        if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
     2021        {
     2022            Log(("%s: Invalid host CR4 %#RX64 (fixed1) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr4.u));
     2023            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr4Fixed1;
     2024            return VERR_VMX_VMENTRY_FAILED;
     2025        }
     2026    }
     2027
     2028    /* DEBUGCTL MSR. */
     2029    if (   (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
     2030        && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
     2031    {
     2032        Log(("%s: DEBUGCTL MSR (%#RX64) reserved bits set -> VM-exit\n", pszInstr, pVmcs->u64GuestDebugCtlMsr.u));
     2033        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestDebugCtl;
     2034        return VERR_VMX_VMENTRY_FAILED;
     2035    }
     2036
     2037    /* 64-bit CPU checks. */
     2038    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
     2039    {
     2040        bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
     2041        if (fGstInLongMode)
     2042        {
     2043            /* PAE must be set. */
     2044            if (   (pVmcs->u64GuestCr0.u & X86_CR0_PG)
     2045                && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
     2046            { /* likely */ }
     2047            else
     2048            {
     2049                Log(("%s: Guest PAE not set when guest is in long mode\n", pszInstr));
     2050                pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestPae;
     2051                return VERR_VMX_VMENTRY_FAILED;
     2052            }
     2053        }
     2054        else
     2055        {
     2056            /* PCIDE should not be set. */
     2057            if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
     2058            { /* likely */ }
     2059            else
     2060            {
     2061                Log(("%s: Guest PCIDE set when guest is not in long mode\n", pszInstr));
     2062                pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestPcide;
     2063                return VERR_VMX_VMENTRY_FAILED;
     2064            }
     2065        }
     2066
     2067        /* CR3. */
     2068        if (pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
     2069        {
     2070            Log(("%s: Guest CR3 (%#RX64) invalid\n", pszInstr, pVmcs->u64GuestCr3.u));
     2071            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr3;
     2072            return VERR_VMX_VMENTRY_FAILED;
     2073        }
     2074
     2075        /* DR7. */
     2076        if (   (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
     2077            && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
     2078        {
     2079            Log(("%s: Guest DR7 (%#RX64) invalid", pszInstr, pVmcs->u64GuestDr7.u));
     2080            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestDr7;
     2081            return VERR_VMX_VMENTRY_FAILED;
     2082        }
     2083
     2084        /* SYSENTER ESP and SYSENTER EIP. */
     2085        if (   X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
     2086            && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
     2087        { /* likely */ }
     2088        else
     2089        {
     2090            Log(("%s: Guest Sysenter ESP (%#RX64) / EIP (%#RX64) not canonical -> VMFail\n", pszInstr,
     2091                 pVmcs->u64GuestSysenterEsp.u, pVmcs->u64GuestSysenterEip.u));
     2092            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestSysenterEspEip;
     2093            return VERR_VMX_VMENTRY_FAILED;
     2094        }
     2095    }
     2096
     2097    Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));  /* We don't support loading IA32_PERF_GLOBAL_CTRL MSR yet. */
     2098
     2099    /* PAT MSR. */
     2100    if (   (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
     2101        && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
     2102    {
     2103        Log(("%s: Guest PAT MSR (%#RX64) invalid\n", pszInstr, pVmcs->u64GuestPatMsr.u));
     2104        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestPatMsr;
     2105        return VERR_VMX_VMENTRY_FAILED;
     2106    }
     2107
     2108    /* EFER MSR. */
     2109    uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
     2110    if (   (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
     2111        && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
     2112    {
     2113        Log(("%s: Guest EFER MSR (%#RX64) reserved bits set\n", pszInstr, pVmcs->u64GuestEferMsr.u));
     2114        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestEferMsrRsvd;
     2115        return VERR_VMX_VMENTRY_FAILED;
     2116    }
     2117    bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
     2118    bool const fGstLma        = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
     2119    bool const fGstLme        = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
     2120    if (   fGstInLongMode == fGstLma
     2121        && (   !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
     2122            || fGstLma == fGstLme))
     2123    { /* likely */ }
     2124    else
     2125    {
     2126        Log(("%s: Guest EFER MSR (%#RX64) invalid\n", pszInstr, pVmcs->u64GuestEferMsr.u));
     2127        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestEferMsr;
     2128        return VERR_VMX_VMENTRY_FAILED;
     2129    }
     2130
     2131    Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));   /* We don't support loading IA32_BNDCFGS MSR yet. */
     2132
     2133    return VINF_SUCCESS;
     2134}
     2135
     2136
     2137/**
     2138 * Checks host-state as part of VM-entry.
    19622139 *
    19632140 * @returns VBox status code.
     
    19792156        if (~pVmcs->u64HostCr0.u & u64Cr0Fixed0)
    19802157        {
    1981             Log(("%s: Invalid host CR0 %#RX32 (fixed0) -> VMFail\n", pszInstr, pVmcs->u64HostCr0));
     2158            Log(("%s: Invalid host CR0 %#RX64 (fixed0) -> VMFail\n", pszInstr, pVmcs->u64HostCr0.u));
    19822159            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr0Fixed0;
    19832160            return VERR_VMX_VMENTRY_FAILED;
     
    19882165        if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
    19892166        {
    1990             Log(("%s: Invalid host CR0 %#RX32 (fixed1) -> VMFail\n", pszInstr, pVmcs->u64HostCr0));
     2167            Log(("%s: Invalid host CR0 %#RX64 (fixed1) -> VMFail\n", pszInstr, pVmcs->u64HostCr0.u));
    19912168            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr0Fixed1;
    19922169            return VERR_VMX_VMENTRY_FAILED;
     
    20002177        if (~pVmcs->u64HostCr4.u & u64Cr4Fixed0)
    20012178        {
    2002             Log(("%s: Invalid host CR4 %#RX64 (fixed0) -> VMFail\n", pszInstr, pVmcs->u64HostCr4));
     2179            Log(("%s: Invalid host CR4 %#RX64 (fixed0) -> VMFail\n", pszInstr, pVmcs->u64HostCr4.u));
    20032180            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr4Fixed0;
    20042181            return VERR_VMX_VMENTRY_FAILED;
     
    20092186        if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
    20102187        {
    2011             Log(("%s: Invalid host CR4 %#RX64 (fixed1) -> VMFail\n", pszInstr, pVmcs->u64HostCr4));
     2188            Log(("%s: Invalid host CR4 %#RX64 (fixed1) -> VMFail\n", pszInstr, pVmcs->u64HostCr4.u));
    20122189            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr4Fixed1;
    20132190            return VERR_VMX_VMENTRY_FAILED;
     
    20182195    {
    20192196        /* CR3 reserved bits. */
    2020         if (pVmcs->u64HostCr3.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
    2021         {
    2022             Log(("%s: Invalid host CR3 %#RX64 -> VMFail\n", pszInstr, pVmcs->u64HostCr3));
     2197        if (pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
     2198        {
     2199            Log(("%s: Invalid host CR3 %#RX64 -> VMFail\n", pszInstr, pVmcs->u64HostCr3.u));
    20232200            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr3;
    20242201            return VERR_VMX_VMENTRY_FAILED;
     
    20522229    uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
    20532230    if (   (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
    2054         && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
     2231        && (pVmcs->u64HostEferMsr.u & ~uValidEferMask))
    20552232    {
    20562233        Log(("%s: Host EFER MSR (%#RX64) reserved bits set\n", pszInstr, pVmcs->u64HostEferMsr.u));
    2057         pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostEferMsr;
     2234        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostEferMsrRsvd;
    20582235        return VERR_VMX_VMENTRY_FAILED;
    20592236    }
    2060     bool const fVirtHostInLongMode    = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    2061     bool const fNstGstLongModeActive  = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_BIT_LMA);
    2062     bool const fNstGstLongModeEnabled = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_BIT_LME);
    2063     if (   fVirtHostInLongMode == fNstGstLongModeActive
    2064         && fVirtHostInLongMode == fNstGstLongModeEnabled)
     2237    bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     2238    bool const fHostLma        = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
     2239    bool const fHostLme        = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
     2240    if (   fHostInLongMode == fHostLma
     2241        && fHostInLongMode == fHostLme)
    20652242    { /* likely */ }
    20662243    else
    20672244    {
    20682245        Log(("%s: Host EFER MSR (%#RX64) LMA, LME, host addr-space size mismatch\n", pszInstr, pVmcs->u64HostEferMsr.u));
    2069         pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostAddrSpace;
     2246        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostEferMsr;
    20702247        return VERR_VMX_VMENTRY_FAILED;
    20712248    }
     
    21032280
    21042281    /* SS cannot be 0 if 32-bit host. */
    2105     if (   fVirtHostInLongMode
     2282    if (   fHostInLongMode
    21062283        || pVmcs->HostSs)
    21072284    { /* likely */ }
     
    21342311     * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
    21352312     */
     2313    bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
    21362314    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
    21372315    {
    2138         bool const fGstInLongMode    = CPUMIsGuestInLongMode(pVCpu);
    2139         bool const fNstGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
     2316        bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
    21402317
    21412318        /* Logical processor in IA-32e mode. */
    2142         if (fGstInLongMode)
    2143         {
    2144             if (fVirtHostInLongMode)
     2319        if (fCpuInLongMode)
     2320        {
     2321            if (fHostInLongMode)
    21452322            {
    21462323                /* PAE must be set. */
     
    21742351        {
    21752352            /* Logical processor is outside IA-32e mode. */
    2176             if (   !fNstGstInLongMode
    2177                 && !fVirtHostInLongMode)
     2353            if (   !fGstInLongMode
     2354                && !fHostInLongMode)
    21782355            {
    21792356                /* PCIDE should not be set. */
     
    22082385    {
    22092386        /* Host address-space size for 32-bit CPUs. */
    2210         bool const fNstGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
    2211         if (   !fNstGstInLongMode
    2212             && !fVirtHostInLongMode)
     2387        if (   !fGstInLongMode
     2388            && !fHostInLongMode)
    22132389        { /* likely */ }
    22142390        else
     
    23402516    {
    23412517        if (   (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
    2342             || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     2518            || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    23432519            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
    23442520        {
     
    23982574    {
    23992575        if (   (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
    2400             || (pVmcs->u64AddrExitMsrStore.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     2576            || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    24012577            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
    24022578        {
     
    24112587    {
    24122588        if (   (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
    2413             || (pVmcs->u64AddrExitMsrLoad.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     2589            || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    24142590            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
    24152591        {
     
    25162692    {
    25172693        if (   (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
    2518             || (pVmcs->u64AddrIoBitmapA.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     2694            || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    25192695            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
    25202696        {
     
    25252701
    25262702        if (   (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
    2527             || (pVmcs->u64AddrIoBitmapB.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     2703            || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    25282704            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
    25292705        {
     
    25382714    {
    25392715        if (   (pVmcs->u64AddrMsrBitmap.u & X86_PAGE_4K_OFFSET_MASK)
    2540             || (pVmcs->u64AddrMsrBitmap.u >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     2716            || (pVmcs->u64AddrMsrBitmap.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    25412717            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrMsrBitmap.u))
    25422718        {
     
    25532729        RTGCPHYS GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
    25542730        if (   (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
    2555             || (GCPhysVirtApic >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     2731            || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    25562732            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
    25572733        {
     
    26482824        RTGCPHYS GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
    26492825        if (   (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
    2650             || (GCPhysApicAccess >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     2826            || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    26512827            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
    26522828        {
     
    26972873        RTGCPHYS GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
    26982874        if (   ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
    2699             || ( GCPhysVmreadBitmap >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     2875            || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    27002876            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
    27012877        {
     
    27082884        RTGCPHYS GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
    27092885        if (   ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
    2710             || ( GCPhysVmwriteBitmap >> IEM_VMX_MAX_PHYSADDR_WIDTH(pVCpu))
     2886            || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    27112887            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
    27122888        {
     
    28903066    }
    28913067
     3068    /*
     3069     * Check guest-state fields.
     3070     */
     3071    rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
     3072    if (rc == VINF_SUCCESS)
     3073    { /* likely */ }
     3074    else
     3075    {
     3076        /* VMExit. */
     3077        return VINF_SUCCESS;
     3078    }
     3079
     3080
    28923081    pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_Success;
    28933082    iemVmxVmSucceed(pVCpu);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette