- Timestamp:
- Sep 7, 2018 6:17:07 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 124901
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r74114 r74133 142 142 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsAllowed1 , "EntryCtlsAllowed1" ), 143 143 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsDisallowed0 , "EntryCtlsDisallowed0" ), 144 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostAddrSpace , "HostAddrSpace" ), 144 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryInstrLen , "EntryInstrLen" ), 145 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryInstrLenZero , "EntryInstrLenZero" ), 146 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodePe , "EntryIntInfoErrCodePe" ), 147 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodeVec , "EntryIntInfoErrCodeVec" ), 148 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoTypeVecRsvd , "EntryIntInfoTypeVecRsvd" ), 149 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryXcptErrCodeRsvd , "EntryXcptErrCodeRsvd" ), 150 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1 , "ExitCtlsAllowed1" ), 151 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0 , "ExitCtlsDisallowed0" ), 145 152 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr0Fixed0 , "HostCr0Fixed0" ), 146 153 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr0Fixed1 , "HostCr0Fixed1" ), … … 152 159 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCsTr , "HostCsTr" ), 153 160 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostEferMsr , "HostEferMsr" ), 161 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostEferMsrRsvd , "HostEferMsrRsvd" ), 154 162 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostGuestLongMode , "HostGuestLongMode" ), 155 163 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostGuestLongModeNoCpu , "HostGuestLongModeNoCpu" ), … … 162 170 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSs , "HostSs" ), 163 171 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSysenterEspEip , "HostSysenterEspEip" ), 164 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryInstrLen , "EntryInstrLen" ), 165 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryInstrLenZero , "EntryInstrLenZero" ), 166 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodePe , "EntryIntInfoErrCodePe" ), 167 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodeVec , "EntryIntInfoErrCodeVec" ), 168 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoTypeVecRsvd , "EntryIntInfoTypeVecRsvd" ), 169 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryXcptErrCodeRsvd , "EntryXcptErrCodeRsvd" ), 170 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1 , "ExitCtlsAllowed1" ), 171 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0 , "ExitCtlsDisallowed0" ), 172 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0Fixed0 , "GuestCr0Fixed0" ), 173 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0Fixed1 , "GuestCr0Fixed1" ), 174 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0PgPe , "GuestCr0PgPe" ), 175 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr3 , "GuestCr3" ), 176 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr4Fixed0 , "GuestCr4Fixed0" ), 177 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr4Fixed1 , "GuestCr4Fixed1" ), 178 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestDebugCtl , "GuestDebugCtl" ), 179 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestDr7 , "GuestDr7" ), 180 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestEferMsr , "GuestEferMsr" ), 181 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestEferMsrRsvd , "GuestEferMsrRsvd" ), 182 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPae , "GuestPae" ), 183 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPatMsr , "GuestPatMsr" ), 184 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPcide , "GuestPcide" ), 185 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSysenterEspEip , "GuestSysenterEspEip" ), 172 186 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_LongModeCS , "LongModeCS" ), 173 187 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_NmiWindowExit , "NmiWindowExit" ), … … 591 605 { 592 606 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */ 593 uCr0Mask &= ~(X86_CR0_PG |X86_CR0_PE);607 uCr0Mask &= ~(X86_CR0_PG | X86_CR0_PE); 594 608 } 595 609 else -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r74130 r74133 360 360 } while (0) 361 361 # endif /* !IEM_WITH_CODE_TLB */ 362 363 /** The maximum physical address width in bits. */364 #define IEM_VMX_MAX_PHYSADDR_WIDTH(a_pVCpu) (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->cVmxMaxPhysAddrWidth)365 362 366 363 /** Whether a shadow VMCS is present for the given VCPU. */ … … 1538 1535 1539 1536 /* VMCS physical-address width limits. */ 1540 if (GCPhysVmcs >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))1537 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 1541 1538 { 1542 1539 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n")); … … 1697 1694 1698 1695 /* VMCS physical-address width limits. */ 1699 if (GCPhysVmcs >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))1696 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 1700 1697 { 1701 1698 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n")); … … 1864 1861 1865 1862 /* VMXON physical-address width limits. */ 1866 if (GCPhysVmxon >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))1863 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 1867 1864 { 1868 1865 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n")); … … 1959 1956 1960 1957 /** 1961 * Checks host state as part of VM-entry. 1958 * Checks guest-state as part of VM-entry. 1959 * 1960 * @returns VBox status code. 1961 * @param pVCpu The cross context virtual CPU structure. 1962 * @param pszInstr The VMX instruction name (for logging purposes). 1963 */ 1964 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr) 1965 { 1966 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 1967 1968 /* 1969 * Guest Control Registers, Debug Registers, and MSRs. 1970 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs". 1971 */ 1972 bool const fUnrestrictedGuest = pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST; 1973 /* CR0 reserved bits. */ 1974 { 1975 /* CR0 MB1 bits. */ 1976 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu); 1977 Assert(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)); 1978 if (fUnrestrictedGuest) 1979 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG); 1980 if (~pVmcs->u64GuestCr0.u & u64Cr0Fixed0) 1981 { 1982 Log(("%s: Invalid guest CR0 %#RX32 (fixed0) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr0.u)); 1983 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr0Fixed0; 1984 return VERR_VMX_VMENTRY_FAILED; 1985 } 1986 1987 /* CR0 MBZ bits. */ 1988 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu); 1989 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1) 1990 { 1991 Log(("%s: Invalid guest CR0 %#RX64 (fixed1) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr0.u)); 1992 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr0Fixed1; 1993 return VERR_VMX_VMENTRY_FAILED; 1994 } 1995 1996 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */ 1997 if ( !fUnrestrictedGuest 1998 && (pVmcs->u64GuestCr0.u & X86_CR0_PG) 1999 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE)) 2000 { 2001 Log(("%s: Invalid guest CR0.PG and CR0.PE combination %#RX64 -> VM-exit\n", pszInstr, pVmcs->u64GuestCr0.u)); 2002 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr0PgPe; 2003 return VERR_VMX_VMENTRY_FAILED; 2004 } 2005 } 2006 2007 /* CR4 reserved bits. */ 2008 { 2009 /* CR4 MB1 bits. */ 2010 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu); 2011 if (~pVmcs->u64GuestCr4.u & u64Cr4Fixed0) 2012 { 2013 Log(("%s: Invalid host CR4 %#RX64 (fixed0) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr4.u)); 2014 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr4Fixed0; 2015 return VERR_VMX_VMENTRY_FAILED; 2016 } 2017 2018 /* CR4 MBZ bits. */ 2019 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu); 2020 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1) 2021 { 2022 Log(("%s: Invalid host CR4 %#RX64 (fixed1) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr4.u)); 2023 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr4Fixed1; 2024 return VERR_VMX_VMENTRY_FAILED; 2025 } 2026 } 2027 2028 /* DEBUGCTL MSR. */ 2029 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 2030 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL)) 2031 { 2032 Log(("%s: DEBUGCTL MSR (%#RX64) reserved bits set -> VM-exit\n", pszInstr, pVmcs->u64GuestDebugCtlMsr.u)); 2033 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestDebugCtl; 2034 return VERR_VMX_VMENTRY_FAILED; 2035 } 2036 2037 /* 64-bit CPU checks. */ 2038 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode) 2039 { 2040 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 2041 if (fGstInLongMode) 2042 { 2043 /* PAE must be set. */ 2044 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG) 2045 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE)) 2046 { /* likely */ } 2047 else 2048 { 2049 Log(("%s: Guest PAE not set when guest is in long mode\n", pszInstr)); 2050 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestPae; 2051 return VERR_VMX_VMENTRY_FAILED; 2052 } 2053 } 2054 else 2055 { 2056 /* PCIDE should not be set. */ 2057 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE)) 2058 { /* likely */ } 2059 else 2060 { 2061 Log(("%s: Guest PCIDE set when guest is not in long mode\n", pszInstr)); 2062 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestPcide; 2063 return VERR_VMX_VMENTRY_FAILED; 2064 } 2065 } 2066 2067 /* CR3. */ 2068 if (pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth) 2069 { 2070 Log(("%s: Guest CR3 (%#RX64) invalid\n", pszInstr, pVmcs->u64GuestCr3.u)); 2071 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr3; 2072 return VERR_VMX_VMENTRY_FAILED; 2073 } 2074 2075 /* DR7. */ 2076 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 2077 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK)) 2078 { 2079 Log(("%s: Guest DR7 (%#RX64) invalid", pszInstr, pVmcs->u64GuestDr7.u)); 2080 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestDr7; 2081 return VERR_VMX_VMENTRY_FAILED; 2082 } 2083 2084 /* SYSENTER ESP and SYSENTER EIP. */ 2085 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u) 2086 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u)) 2087 { /* likely */ } 2088 else 2089 { 2090 Log(("%s: Guest Sysenter ESP (%#RX64) / EIP (%#RX64) not canonical -> VMFail\n", pszInstr, 2091 pVmcs->u64GuestSysenterEsp.u, pVmcs->u64GuestSysenterEip.u)); 2092 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestSysenterEspEip; 2093 return VERR_VMX_VMENTRY_FAILED; 2094 } 2095 } 2096 2097 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)); /* We don't support loading IA32_PERF_GLOBAL_CTRL MSR yet. */ 2098 2099 /* PAT MSR. */ 2100 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR) 2101 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u)) 2102 { 2103 Log(("%s: Guest PAT MSR (%#RX64) invalid\n", pszInstr, pVmcs->u64GuestPatMsr.u)); 2104 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestPatMsr; 2105 return VERR_VMX_VMENTRY_FAILED; 2106 } 2107 2108 /* EFER MSR. */ 2109 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM)); 2110 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR) 2111 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask)) 2112 { 2113 Log(("%s: Guest EFER MSR (%#RX64) reserved bits set\n", pszInstr, pVmcs->u64GuestEferMsr.u)); 2114 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestEferMsrRsvd; 2115 return VERR_VMX_VMENTRY_FAILED; 2116 } 2117 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 2118 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA); 2119 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME); 2120 if ( fGstInLongMode == fGstLma 2121 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG) 2122 || fGstLma == fGstLme)) 2123 { /* likely */ } 2124 else 2125 { 2126 Log(("%s: Guest EFER MSR (%#RX64) invalid\n", pszInstr, pVmcs->u64GuestEferMsr.u)); 2127 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestEferMsr; 2128 return VERR_VMX_VMENTRY_FAILED; 2129 } 2130 2131 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR)); /* We don't support loading IA32_BNDCFGS MSR yet. */ 2132 2133 return VINF_SUCCESS; 2134 } 2135 2136 2137 /** 2138 * Checks host-state as part of VM-entry. 1962 2139 * 1963 2140 * @returns VBox status code. … … 1979 2156 if (~pVmcs->u64HostCr0.u & u64Cr0Fixed0) 1980 2157 { 1981 Log(("%s: Invalid host CR0 %#RX 32 (fixed0) -> VMFail\n", pszInstr, pVmcs->u64HostCr0));2158 Log(("%s: Invalid host CR0 %#RX64 (fixed0) -> VMFail\n", pszInstr, pVmcs->u64HostCr0.u)); 1982 2159 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr0Fixed0; 1983 2160 return VERR_VMX_VMENTRY_FAILED; … … 1988 2165 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1) 1989 2166 { 1990 Log(("%s: Invalid host CR0 %#RX 32 (fixed1) -> VMFail\n", pszInstr, pVmcs->u64HostCr0));2167 Log(("%s: Invalid host CR0 %#RX64 (fixed1) -> VMFail\n", pszInstr, pVmcs->u64HostCr0.u)); 1991 2168 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr0Fixed1; 1992 2169 return VERR_VMX_VMENTRY_FAILED; … … 2000 2177 if (~pVmcs->u64HostCr4.u & u64Cr4Fixed0) 2001 2178 { 2002 Log(("%s: Invalid host CR4 %#RX64 (fixed0) -> VMFail\n", pszInstr, pVmcs->u64HostCr4 ));2179 Log(("%s: Invalid host CR4 %#RX64 (fixed0) -> VMFail\n", pszInstr, pVmcs->u64HostCr4.u)); 2003 2180 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr4Fixed0; 2004 2181 return VERR_VMX_VMENTRY_FAILED; … … 2009 2186 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1) 2010 2187 { 2011 Log(("%s: Invalid host CR4 %#RX64 (fixed1) -> VMFail\n", pszInstr, pVmcs->u64HostCr4 ));2188 Log(("%s: Invalid host CR4 %#RX64 (fixed1) -> VMFail\n", pszInstr, pVmcs->u64HostCr4.u)); 2012 2189 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr4Fixed1; 2013 2190 return VERR_VMX_VMENTRY_FAILED; … … 2018 2195 { 2019 2196 /* CR3 reserved bits. */ 2020 if (pVmcs->u64HostCr3.u >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2021 { 2022 Log(("%s: Invalid host CR3 %#RX64 -> VMFail\n", pszInstr, pVmcs->u64HostCr3 ));2197 if (pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth) 2198 { 2199 Log(("%s: Invalid host CR3 %#RX64 -> VMFail\n", pszInstr, pVmcs->u64HostCr3.u)); 2023 2200 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr3; 2024 2201 return VERR_VMX_VMENTRY_FAILED; … … 2052 2229 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM)); 2053 2230 if ( (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR) 2054 && (pVmcs->u64 GuestEferMsr.u & ~uValidEferMask))2231 && (pVmcs->u64HostEferMsr.u & ~uValidEferMask)) 2055 2232 { 2056 2233 Log(("%s: Host EFER MSR (%#RX64) reserved bits set\n", pszInstr, pVmcs->u64HostEferMsr.u)); 2057 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostEferMsr ;2234 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostEferMsrRsvd; 2058 2235 return VERR_VMX_VMENTRY_FAILED; 2059 2236 } 2060 bool const f VirtHostInLongMode= RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);2061 bool const f NstGstLongModeActive = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_BIT_LMA);2062 bool const f NstGstLongModeEnabled = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_BIT_LME);2063 if ( f VirtHostInLongMode == fNstGstLongModeActive2064 && f VirtHostInLongMode == fNstGstLongModeEnabled)2237 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 2238 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA); 2239 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME); 2240 if ( fHostInLongMode == fHostLma 2241 && fHostInLongMode == fHostLme) 2065 2242 { /* likely */ } 2066 2243 else 2067 2244 { 2068 2245 Log(("%s: Host EFER MSR (%#RX64) LMA, LME, host addr-space size mismatch\n", pszInstr, pVmcs->u64HostEferMsr.u)); 2069 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_Host AddrSpace;2246 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostEferMsr; 2070 2247 return VERR_VMX_VMENTRY_FAILED; 2071 2248 } … … 2103 2280 2104 2281 /* SS cannot be 0 if 32-bit host. */ 2105 if ( f VirtHostInLongMode2282 if ( fHostInLongMode 2106 2283 || pVmcs->HostSs) 2107 2284 { /* likely */ } … … 2134 2311 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size". 2135 2312 */ 2313 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 2136 2314 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode) 2137 2315 { 2138 bool const fGstInLongMode = CPUMIsGuestInLongMode(pVCpu); 2139 bool const fNstGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 2316 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu); 2140 2317 2141 2318 /* Logical processor in IA-32e mode. */ 2142 if (f GstInLongMode)2143 { 2144 if (f VirtHostInLongMode)2319 if (fCpuInLongMode) 2320 { 2321 if (fHostInLongMode) 2145 2322 { 2146 2323 /* PAE must be set. */ … … 2174 2351 { 2175 2352 /* Logical processor is outside IA-32e mode. */ 2176 if ( !f NstGstInLongMode2177 && !f VirtHostInLongMode)2353 if ( !fGstInLongMode 2354 && !fHostInLongMode) 2178 2355 { 2179 2356 /* PCIDE should not be set. */ … … 2208 2385 { 2209 2386 /* Host address-space size for 32-bit CPUs. */ 2210 bool const fNstGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 2211 if ( !fNstGstInLongMode 2212 && !fVirtHostInLongMode) 2387 if ( !fGstInLongMode 2388 && !fHostInLongMode) 2213 2389 { /* likely */ } 2214 2390 else … … 2340 2516 { 2341 2517 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK) 2342 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2518 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2343 2519 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u)) 2344 2520 { … … 2398 2574 { 2399 2575 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK) 2400 || (pVmcs->u64AddrExitMsrStore.u >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2576 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2401 2577 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u)) 2402 2578 { … … 2411 2587 { 2412 2588 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK) 2413 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2589 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2414 2590 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u)) 2415 2591 { … … 2516 2692 { 2517 2693 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK) 2518 || (pVmcs->u64AddrIoBitmapA.u >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2694 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2519 2695 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u)) 2520 2696 { … … 2525 2701 2526 2702 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK) 2527 || (pVmcs->u64AddrIoBitmapB.u >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2703 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2528 2704 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u)) 2529 2705 { … … 2538 2714 { 2539 2715 if ( (pVmcs->u64AddrMsrBitmap.u & X86_PAGE_4K_OFFSET_MASK) 2540 || (pVmcs->u64AddrMsrBitmap.u >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2716 || (pVmcs->u64AddrMsrBitmap.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2541 2717 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrMsrBitmap.u)) 2542 2718 { … … 2553 2729 RTGCPHYS GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 2554 2730 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK) 2555 || (GCPhysVirtApic >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2731 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2556 2732 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic)) 2557 2733 { … … 2648 2824 RTGCPHYS GCPhysApicAccess = pVmcs->u64AddrApicAccess.u; 2649 2825 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK) 2650 || (GCPhysApicAccess >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2826 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2651 2827 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess)) 2652 2828 { … … 2697 2873 RTGCPHYS GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u; 2698 2874 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK) 2699 || ( GCPhysVmreadBitmap >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2875 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2700 2876 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap)) 2701 2877 { … … 2708 2884 RTGCPHYS GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u; 2709 2885 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK) 2710 || ( GCPhysVmwriteBitmap >> IEM_ VMX_MAX_PHYSADDR_WIDTH(pVCpu))2886 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2711 2887 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap)) 2712 2888 { … … 2890 3066 } 2891 3067 3068 /* 3069 * Check guest-state fields. 3070 */ 3071 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr); 3072 if (rc == VINF_SUCCESS) 3073 { /* likely */ } 3074 else 3075 { 3076 /* VMExit. */ 3077 return VINF_SUCCESS; 3078 } 3079 3080 2892 3081 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_Success; 2893 3082 iemVmxVmSucceed(pVCpu);
Note:
See TracChangeset
for help on using the changeset viewer.