Changeset 74151 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Sep 8, 2018 5:42:01 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r74133 r74151 31 31 *********************************************************************************************************************************/ 32 32 #define VMX_INSTR_DIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc 33 static const char * const g_apszVmxInstrDiagDesc[ kVmxVInstrDiag_Last] =33 static const char * const g_apszVmxInstrDiagDesc[] = 34 34 { 35 35 /* Internal processing errors. */ … … 54 54 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign , "PtrAlign" ), 55 55 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap , "PtrMap" ), 56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Ptr PhysRead , "PtrPhysRead" ),56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrReadPhys , "PtrReadPhys" ), 57 57 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth , "PtrWidth" ), 58 58 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode" ), 59 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ), 59 60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success , "Success" ), 60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ),61 61 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxAlreadyRoot , "VmxAlreadyRoot" ), 62 62 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe , "Vmxe" ), … … 150 150 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1 , "ExitCtlsAllowed1" ), 151 151 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0 , "ExitCtlsDisallowed0" ), 152 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0Fixed0 , "GuestCr0Fixed0" ), 153 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0Fixed1 , "GuestCr0Fixed1" ), 154 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0PgPe , "GuestCr0PgPe" ), 155 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr3 , "GuestCr3" ), 156 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr4Fixed0 , "GuestCr4Fixed0" ), 157 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr4Fixed1 , "GuestCr4Fixed1" ), 158 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestDebugCtl , "GuestDebugCtl" ), 159 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestDr7 , "GuestDr7" ), 160 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestEferMsr , "GuestEferMsr" ), 161 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestEferMsrRsvd , "GuestEferMsrRsvd" ), 162 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPae , "GuestPae" ), 163 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPatMsr , "GuestPatMsr" ), 164 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPcide , "GuestPcide" ), 165 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrCsDefBig , "GuestSegAttrCsDefBig" ), 166 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrCsDplEqSs , "GuestSegAttrCsDplEqSs" ), 167 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrCsDplLtSs , "GuestSegAttrCsDplLtSs" ), 168 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrCsDplZero , "GuestSegAttrCsDplZero" ), 169 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrCsType , "GuestSegAttrCsType" ), 170 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrCsTypeRead , "GuestSegAttrCsTypeRead" ), 171 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeCs , "GuestSegAttrDescTypeCs" ), 172 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeDs , "GuestSegAttrDescTypeDs" ), 173 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeEs , "GuestSegAttrDescTypeEs" ), 174 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeFs , "GuestSegAttrDescTypeFs" ), 175 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeGs , "GuestSegAttrDescTypeGs" ), 176 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeSs , "GuestSegAttrDescTypeSs" ), 177 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplCs , "GuestSegAttrDplRplCs" ), 178 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplDs , "GuestSegAttrDplRplDs" ), 179 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplEs , "GuestSegAttrDplRplEs" ), 180 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplFs , "GuestSegAttrDplRplFs" ), 181 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplGs , "GuestSegAttrDplRplGs" ), 182 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplSs , "GuestSegAttrDplRplSs" ), 183 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrGranCs , "GuestSegAttrGranCs" ), 184 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrGranDs , "GuestSegAttrGranDs" ), 185 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrGranEs , "GuestSegAttrGranEs" ), 186 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrGranFs , "GuestSegAttrGranFs" ), 187 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrGranGs , "GuestSegAttrGranGs" ), 188 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrGranSs , "GuestSegAttrGranSs" ), 189 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrLdtrDescType , "GuestSegAttrLdtrDescType"), 190 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrLdtrGran , "GuestSegAttrLdtrGran" ), 191 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrLdtrPresent , "GuestSegAttrLdtrPresent" ), 192 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrLdtrRsvd , "GuestSegAttrLdtrRsvd" ), 193 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrLdtrType , "GuestSegAttrLdtrType" ), 194 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrPresentCs , "GuestSegAttrPresentCs" ), 195 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrPresentDs , "GuestSegAttrPresentDs" ), 196 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrPresentEs , "GuestSegAttrPresentEs" ), 197 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrPresentFs , "GuestSegAttrPresentFs" ), 198 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrPresentGs , "GuestSegAttrPresentGs" ), 199 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrPresentSs , "GuestSegAttrPresentSs" ), 200 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdCs , "GuestSegAttrRsvdCs" ), 201 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdDs , "GuestSegAttrRsvdDs" ), 202 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdEs , "GuestSegAttrRsvdEs" ), 203 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdFs , "GuestSegAttrRsvdFs" ), 204 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdGs , "GuestSegAttrRsvdGs" ), 205 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdSs , "GuestSegAttrRsvdSs" ), 206 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrSsDplEqRpl , "GuestSegAttrSsDplEqRpl" ), 207 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrSsDplZero , "GuestSegAttrSsDplZero " ), 208 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrSsType , "GuestSegAttrSsType" ), 209 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTrDescType , "GuestSegAttrTrDescType" ), 210 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTrGran , "GuestSegAttrTrGran" ), 211 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTrPresent , "GuestSegAttrTrPresent" ), 212 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTrRsvd , "GuestSegAttrTrRsvd" ), 213 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTrType , "GuestSegAttrTrType" ), 214 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTrUnusable , "GuestSegAttrTrUnusable" ), 215 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccCs , "GuestSegAttrTypeAccCs" ), 216 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccDs , "GuestSegAttrTypeAccDs" ), 217 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccEs , "GuestSegAttrTypeAccEs" ), 218 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccFs , "GuestSegAttrTypeAccFs" ), 219 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccGs , "GuestSegAttrTypeAccGs" ), 220 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccSs , "GuestSegAttrTypeAccSs" ), 221 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrV86Cs , "GuestSegAttrV86Cs" ), 222 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrV86Ds , "GuestSegAttrV86Ds" ), 223 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrV86Es , "GuestSegAttrV86Es" ), 224 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrV86Fs , "GuestSegAttrV86Fs" ), 225 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrV86Gs , "GuestSegAttrV86Gs" ), 226 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegAttrV86Ss , "GuestSegAttrV86Ss" ), 227 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseCs , "GuestSegBaseCs" ), 228 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseDs , "GuestSegBaseDs" ), 229 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseEs , "GuestSegBaseEs" ), 230 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseFs , "GuestSegBaseFs" ), 231 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseGs , "GuestSegBaseGs" ), 232 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseLdtr , "GuestSegBaseLdtr" ), 233 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseSs , "GuestSegBaseSs" ), 234 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseTr , "GuestSegBaseTr" ), 235 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseV86Cs , "GuestSegBaseV86Cs" ), 236 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseV86Ds , "GuestSegBaseV86Ds" ), 237 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseV86Es , "GuestSegBaseV86Es" ), 238 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseV86Fs , "GuestSegBaseV86Fs" ), 239 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseV86Gs , "GuestSegBaseV86Gs" ), 240 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegBaseV86Ss , "GuestSegBaseV86Ss" ), 241 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegLimitV86Cs , "GuestSegLimitV86Cs" ), 242 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegLimitV86Ds , "GuestSegLimitV86Ds" ), 243 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegLimitV86Es , "GuestSegLimitV86Es" ), 244 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegLimitV86Fs , "GuestSegLimitV86Fs" ), 245 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegLimitV86Gs , "GuestSegLimitV86Gs" ), 246 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegLimitV86Ss , "GuestSegLimitV86Ss" ), 247 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegSelCsSsRpl , "GuestSegSelCsSsRpl" ), 248 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegSelLdtr , "GuestSegSelLdtr" ), 249 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSegSelTr , "GuestSegSelTr" ), 250 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSysenterEspEip , "GuestSysenterEspEip" ), 152 251 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr0Fixed0 , "HostCr0Fixed0" ), 153 252 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr0Fixed1 , "HostCr0Fixed1" ), … … 170 269 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSs , "HostSs" ), 171 270 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSysenterEspEip , "HostSysenterEspEip" ), 172 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0Fixed0 , "GuestCr0Fixed0" ),173 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0Fixed1 , "GuestCr0Fixed1" ),174 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr0PgPe , "GuestCr0PgPe" ),175 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr3 , "GuestCr3" ),176 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr4Fixed0 , "GuestCr4Fixed0" ),177 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestCr4Fixed1 , "GuestCr4Fixed1" ),178 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestDebugCtl , "GuestDebugCtl" ),179 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestDr7 , "GuestDr7" ),180 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestEferMsr , "GuestEferMsr" ),181 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestEferMsrRsvd , "GuestEferMsrRsvd" ),182 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPae , "GuestPae" ),183 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPatMsr , "GuestPatMsr" ),184 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestPcide , "GuestPcide" ),185 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_GuestSysenterEspEip , "GuestSysenterEspEip" ),186 271 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_LongModeCS , "LongModeCS" ), 187 272 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_NmiWindowExit , "NmiWindowExit" ), … … 206 291 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmcsClear , "VmcsClear" ), 207 292 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmcsLaunch , "VmcsLaunch" ), 293 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmreadBitmapPtrReadPhys , "VmreadBitmapPtrReadPhys" ), 294 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmwriteBitmapPtrReadPhys , "VmwriteBitmapPtrReadPhys"), 208 295 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmxRoot , "VmxRoot" ), 209 296 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Vpid , "Vpid" ) 210 /* kVmxVInstrDiag_ Last*/297 /* kVmxVInstrDiag_End */ 211 298 }; 299 AssertCompile(RT_ELEMENTS(g_apszVmxInstrDiagDesc) == kVmxVInstrDiag_End); 212 300 #undef VMX_INSTR_DIAG_DESC 213 301 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r74134 r74151 364 364 #define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS) 365 365 366 366 367 /** Gets the guest-physical address of the shadows VMCS for the given VCPU. */ 367 368 #define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u64VmcsLinkPtr.u) … … 427 428 return iemRaiseUndefinedOpcode(a_pVCpu); \ 428 429 } \ 430 } while (0) 431 432 /** Marks a VM-entry failure with a diagnostic reason, logs and returns. */ 433 #define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_InsDiag) \ 434 do \ 435 { \ 436 Log(("%s: VM-entry failed! enmInstrDiag=%u (%s) -> %s\n", (a_pszInstr), (a_InsDiag), \ 437 HMVmxGetInstrDiagDesc(a_InsDiag), (a_pszFailure))); \ 438 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = (a_InsDiag); \ 439 return VERR_VMX_VMENTRY_FAILED; \ 429 440 } while (0) 430 441 … … 2038 2049 2039 2050 /** 2040 * Checks guest-state as part of VM-entry. 2041 * 2042 * @returns VBox status code. 2051 * Gets the instruction diagnostic for segment base checks during VM-entry of a 2052 * nested-guest. 2053 * 2054 * @param iSegReg The segment index (X86_SREG_XXX). 2055 */ 2056 IEM_STATIC VMXVINSTRDIAG iemVmxVmentryGetInstrDiagSegBase(unsigned iSegReg) 2057 { 2058 switch (iSegReg) 2059 { 2060 case X86_SREG_CS: return kVmxVInstrDiag_Vmentry_GuestSegBaseCs; 2061 case X86_SREG_DS: return kVmxVInstrDiag_Vmentry_GuestSegBaseDs; 2062 case X86_SREG_ES: return kVmxVInstrDiag_Vmentry_GuestSegBaseEs; 2063 case X86_SREG_FS: return kVmxVInstrDiag_Vmentry_GuestSegBaseFs; 2064 case X86_SREG_GS: return kVmxVInstrDiag_Vmentry_GuestSegBaseGs; 2065 default: return kVmxVInstrDiag_Vmentry_GuestSegBaseSs; 2066 } 2067 } 2068 2069 2070 /** 2071 * Gets the instruction diagnostic for segment base checks during VM-entry of a 2072 * nested-guest that is in Virtual-8086 mode. 2073 * 2074 * @param iSegReg The segment index (X86_SREG_XXX). 2075 */ 2076 IEM_STATIC VMXVINSTRDIAG iemVmxVmentryGetInstrDiagSegBaseV86(unsigned iSegReg) 2077 { 2078 switch (iSegReg) 2079 { 2080 case X86_SREG_CS: return kVmxVInstrDiag_Vmentry_GuestSegBaseV86Cs; 2081 case X86_SREG_DS: return kVmxVInstrDiag_Vmentry_GuestSegBaseV86Ds; 2082 case X86_SREG_ES: return kVmxVInstrDiag_Vmentry_GuestSegBaseV86Es; 2083 case X86_SREG_FS: return kVmxVInstrDiag_Vmentry_GuestSegBaseV86Fs; 2084 case X86_SREG_GS: return kVmxVInstrDiag_Vmentry_GuestSegBaseV86Gs; 2085 default: 2086 Assert(iSegReg == X86_SREG_SS); 2087 return kVmxVInstrDiag_Vmentry_GuestSegBaseV86Ss; 2088 } 2089 } 2090 2091 2092 /** 2093 * Gets the instruction diagnostic for segment limit checks during VM-entry of a 2094 * nested-guest that is in Virtual-8086 mode. 2095 * 2096 * @param iSegReg The segment index (X86_SREG_XXX). 2097 */ 2098 IEM_STATIC VMXVINSTRDIAG iemVmxVmentryGetInstrDiagSegLimitV86(unsigned iSegReg) 2099 { 2100 switch (iSegReg) 2101 { 2102 case X86_SREG_CS: return kVmxVInstrDiag_Vmentry_GuestSegLimitV86Cs; 2103 case X86_SREG_DS: return kVmxVInstrDiag_Vmentry_GuestSegLimitV86Ds; 2104 case X86_SREG_ES: return kVmxVInstrDiag_Vmentry_GuestSegLimitV86Es; 2105 case X86_SREG_FS: return kVmxVInstrDiag_Vmentry_GuestSegLimitV86Fs; 2106 case X86_SREG_GS: return kVmxVInstrDiag_Vmentry_GuestSegLimitV86Gs; 2107 default: 2108 Assert(iSegReg == X86_SREG_SS); 2109 return kVmxVInstrDiag_Vmentry_GuestSegLimitV86Ss; 2110 } 2111 } 2112 2113 2114 /** 2115 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a 2116 * nested-guest that is in Virtual-8086 mode. 2117 * 2118 * @param iSegReg The segment index (X86_SREG_XXX). 2119 */ 2120 IEM_STATIC VMXVINSTRDIAG iemVmxVmentryGetInstrDiagSegAttrV86(unsigned iSegReg) 2121 { 2122 switch (iSegReg) 2123 { 2124 case X86_SREG_CS: return kVmxVInstrDiag_Vmentry_GuestSegAttrV86Cs; 2125 case X86_SREG_DS: return kVmxVInstrDiag_Vmentry_GuestSegAttrV86Ds; 2126 case X86_SREG_ES: return kVmxVInstrDiag_Vmentry_GuestSegAttrV86Es; 2127 case X86_SREG_FS: return kVmxVInstrDiag_Vmentry_GuestSegAttrV86Fs; 2128 case X86_SREG_GS: return kVmxVInstrDiag_Vmentry_GuestSegAttrV86Gs; 2129 default: 2130 Assert(iSegReg == X86_SREG_SS); 2131 return kVmxVInstrDiag_Vmentry_GuestSegAttrV86Ss; 2132 } 2133 } 2134 2135 2136 /** 2137 * Gets the instruction diagnostic for segment attributes reserved bits failure 2138 * during VM-entry of a nested-guest. 2139 * 2140 * @param iSegReg The segment index (X86_SREG_XXX). 2141 */ 2142 IEM_STATIC VMXVINSTRDIAG iemVmxVmentryGetInstrDiagSegAttrRsvd(unsigned iSegReg) 2143 { 2144 switch (iSegReg) 2145 { 2146 case X86_SREG_CS: return kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdCs; 2147 case X86_SREG_DS: return kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdDs; 2148 case X86_SREG_ES: return kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdEs; 2149 case X86_SREG_FS: return kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdFs; 2150 case X86_SREG_GS: return kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdGs; 2151 default: 2152 Assert(iSegReg == X86_SREG_SS); 2153 return kVmxVInstrDiag_Vmentry_GuestSegAttrRsvdSs; 2154 } 2155 } 2156 2157 2158 /** 2159 * Gets the instruction diagnostic for segment attributes descriptor-type 2160 * (code/segment or system) failure during VM-entry of a nested-guest. 2161 * 2162 * @param iSegReg The segment index (X86_SREG_XXX). 2163 */ 2164 IEM_STATIC VMXVINSTRDIAG iemVmxVmentryGetInstrDiagSegAttrDescType(unsigned iSegReg) 2165 { 2166 switch (iSegReg) 2167 { 2168 case X86_SREG_CS: return kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeCs; 2169 case X86_SREG_DS: return kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeDs; 2170 case X86_SREG_ES: return kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeEs; 2171 case X86_SREG_FS: return kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeFs; 2172 case X86_SREG_GS: return kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeGs; 2173 default: 2174 Assert(iSegReg == X86_SREG_SS); 2175 return kVmxVInstrDiag_Vmentry_GuestSegAttrDescTypeSs; 2176 } 2177 } 2178 2179 2180 /** 2181 * Gets the instruction diagnostic for segment attributes descriptor-type 2182 * (code/segment or system) failure during VM-entry of a nested-guest. 2183 * 2184 * @param iSegReg The segment index (X86_SREG_XXX). 2185 */ 2186 IEM_STATIC VMXVINSTRDIAG iemVmxVmentryGetInstrDiagSegAttrPresent(unsigned iSegReg) 2187 { 2188 switch (iSegReg) 2189 { 2190 case X86_SREG_CS: return kVmxVInstrDiag_Vmentry_GuestSegAttrPresentCs; 2191 case X86_SREG_DS: return kVmxVInstrDiag_Vmentry_GuestSegAttrPresentDs; 2192 case X86_SREG_ES: return kVmxVInstrDiag_Vmentry_GuestSegAttrPresentEs; 2193 case X86_SREG_FS: return kVmxVInstrDiag_Vmentry_GuestSegAttrPresentFs; 2194 case X86_SREG_GS: return kVmxVInstrDiag_Vmentry_GuestSegAttrPresentGs; 2195 default: 2196 Assert(iSegReg == X86_SREG_SS); 2197 return kVmxVInstrDiag_Vmentry_GuestSegAttrPresentSs; 2198 } 2199 } 2200 2201 2202 /** 2203 * Gets the instruction diagnostic for segment attribute granularity failure during 2204 * VM-entry of a nested-guest. 2205 * 2206 * @param iSegReg The segment index (X86_SREG_XXX). 2207 */ 2208 IEM_STATIC VMXVINSTRDIAG iemVmxVmentryGetInstrDiagSegAttrGran(unsigned iSegReg) 2209 { 2210 switch (iSegReg) 2211 { 2212 case X86_SREG_CS: return kVmxVInstrDiag_Vmentry_GuestSegAttrGranCs; 2213 case X86_SREG_DS: return kVmxVInstrDiag_Vmentry_GuestSegAttrGranDs; 2214 case X86_SREG_ES: return kVmxVInstrDiag_Vmentry_GuestSegAttrGranEs; 2215 case X86_SREG_FS: return kVmxVInstrDiag_Vmentry_GuestSegAttrGranFs; 2216 case X86_SREG_GS: return kVmxVInstrDiag_Vmentry_GuestSegAttrGranGs; 2217 default: 2218 Assert(iSegReg == X86_SREG_SS); 2219 return kVmxVInstrDiag_Vmentry_GuestSegAttrGranSs; 2220 } 2221 } 2222 2223 /** 2224 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during 2225 * VM-entry of a nested-guest. 2226 * 2227 * @param iSegReg The segment index (X86_SREG_XXX). 2228 */ 2229 IEM_STATIC VMXVINSTRDIAG iemVmxVmentryGetInstrDiagSegAttrDplRpl(unsigned iSegReg) 2230 { 2231 switch (iSegReg) 2232 { 2233 case X86_SREG_CS: return kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplCs; 2234 case X86_SREG_DS: return kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplDs; 2235 case X86_SREG_ES: return kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplEs; 2236 case X86_SREG_FS: return kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplFs; 2237 case X86_SREG_GS: return kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplGs; 2238 default: 2239 Assert(iSegReg == X86_SREG_SS); 2240 return kVmxVInstrDiag_Vmentry_GuestSegAttrDplRplSs; 2241 } 2242 } 2243 2244 2245 /** 2246 * Gets the instruction diagnostic for segment attribute type accessed failure 2247 * during VM-entry of a nested-guest. 2248 * 2249 * @param iSegReg The segment index (X86_SREG_XXX). 2250 */ 2251 IEM_STATIC VMXVINSTRDIAG iemVmxVmentryGetInstrDiagSegAttrTypeAcc(unsigned iSegReg) 2252 { 2253 switch (iSegReg) 2254 { 2255 case X86_SREG_CS: return kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccCs; 2256 case X86_SREG_DS: return kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccDs; 2257 case X86_SREG_ES: return kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccEs; 2258 case X86_SREG_FS: return kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccFs; 2259 case X86_SREG_GS: return kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccGs; 2260 default: 2261 Assert(iSegReg == X86_SREG_SS); 2262 return kVmxVInstrDiag_Vmentry_GuestSegAttrTypeAccSs; 2263 } 2264 } 2265 2266 2267 /** 2268 * Checks guest control registers, debug registers and MSRs as part of VM-entry. 2269 * 2043 2270 * @param pVCpu The cross context virtual CPU structure. 2044 2271 * @param pszInstr The VMX instruction name (for logging purposes). 2045 2272 */ 2046 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr) 2047 { 2048 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 2049 2273 IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr) 2274 { 2050 2275 /* 2051 2276 * Guest Control Registers, Debug Registers, and MSRs. 2052 2277 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs". 2053 2278 */ 2054 bool const fUnrestrictedGuest = pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST; 2279 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 2280 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST); 2281 const char *const pszFailure = "VM-exit"; 2282 2055 2283 /* CR0 reserved bits. */ 2056 2284 { … … 2061 2289 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG); 2062 2290 if (~pVmcs->u64GuestCr0.u & u64Cr0Fixed0) 2063 { 2064 Log(("%s: Invalid guest CR0 %#RX32 (fixed0) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr0.u)); 2065 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr0Fixed0; 2066 return VERR_VMX_VMENTRY_FAILED; 2067 } 2291 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestCr0Fixed0); 2068 2292 2069 2293 /* CR0 MBZ bits. */ 2070 2294 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu); 2071 2295 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1) 2072 { 2073 Log(("%s: Invalid guest CR0 %#RX64 (fixed1) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr0.u)); 2074 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr0Fixed1; 2075 return VERR_VMX_VMENTRY_FAILED; 2076 } 2296 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestCr0Fixed1); 2077 2297 2078 2298 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */ … … 2080 2300 && (pVmcs->u64GuestCr0.u & X86_CR0_PG) 2081 2301 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE)) 2082 { 2083 Log(("%s: Invalid guest CR0.PG and CR0.PE combination %#RX64 -> VM-exit\n", pszInstr, pVmcs->u64GuestCr0.u)); 2084 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr0PgPe; 2085 return VERR_VMX_VMENTRY_FAILED; 2086 } 2302 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestCr0PgPe); 2087 2303 } 2088 2304 … … 2092 2308 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu); 2093 2309 if (~pVmcs->u64GuestCr4.u & u64Cr4Fixed0) 2094 { 2095 Log(("%s: Invalid host CR4 %#RX64 (fixed0) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr4.u)); 2096 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr4Fixed0; 2097 return VERR_VMX_VMENTRY_FAILED; 2098 } 2310 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestCr4Fixed0); 2099 2311 2100 2312 /* CR4 MBZ bits. */ 2101 2313 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu); 2102 2314 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1) 2103 { 2104 Log(("%s: Invalid host CR4 %#RX64 (fixed1) -> VM-exit\n", pszInstr, pVmcs->u64GuestCr4.u)); 2105 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr4Fixed1; 2106 return VERR_VMX_VMENTRY_FAILED; 2107 } 2315 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestCr4Fixed1); 2108 2316 } 2109 2317 … … 2111 2319 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 2112 2320 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL)) 2113 { 2114 Log(("%s: DEBUGCTL MSR (%#RX64) reserved bits set -> VM-exit\n", pszInstr, pVmcs->u64GuestDebugCtlMsr.u)); 2115 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestDebugCtl; 2116 return VERR_VMX_VMENTRY_FAILED; 2117 } 2321 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestDebugCtl); 2118 2322 2119 2323 /* 64-bit CPU checks. */ 2324 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 2120 2325 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode) 2121 2326 { 2122 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);2123 2327 if (fGstInLongMode) 2124 2328 { … … 2128 2332 { /* likely */ } 2129 2333 else 2130 { 2131 Log(("%s: Guest PAE not set when guest is in long mode\n", pszInstr)); 2132 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestPae; 2133 return VERR_VMX_VMENTRY_FAILED; 2134 } 2334 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestPae); 2135 2335 } 2136 2336 else … … 2140 2340 { /* likely */ } 2141 2341 else 2142 { 2143 Log(("%s: Guest PCIDE set when guest is not in long mode\n", pszInstr)); 2144 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestPcide; 2145 return VERR_VMX_VMENTRY_FAILED; 2146 } 2342 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestPcide); 2147 2343 } 2148 2344 2149 2345 /* CR3. */ 2150 if (pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth) 2151 { 2152 Log(("%s: Guest CR3 (%#RX64) invalid\n", pszInstr, pVmcs->u64GuestCr3.u)); 2153 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestCr3; 2154 return VERR_VMX_VMENTRY_FAILED; 2155 } 2346 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)) 2347 { /* likely */ } 2348 else 2349 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestCr3); 2156 2350 2157 2351 /* DR7. */ 2158 2352 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 2159 2353 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK)) 2160 { 2161 Log(("%s: Guest DR7 (%#RX64) invalid", pszInstr, pVmcs->u64GuestDr7.u)); 2162 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestDr7; 2163 return VERR_VMX_VMENTRY_FAILED; 2164 } 2354 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestDr7); 2165 2355 2166 2356 /* SYSENTER ESP and SYSENTER EIP. */ … … 2169 2359 { /* likely */ } 2170 2360 else 2171 { 2172 Log(("%s: Guest Sysenter ESP (%#RX64) / EIP (%#RX64) not canonical -> VMFail\n", pszInstr, 2173 pVmcs->u64GuestSysenterEsp.u, pVmcs->u64GuestSysenterEip.u)); 2174 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestSysenterEspEip; 2175 return VERR_VMX_VMENTRY_FAILED; 2176 } 2361 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSysenterEspEip); 2177 2362 } 2178 2363 … … 2182 2367 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR) 2183 2368 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u)) 2184 { 2185 Log(("%s: Guest PAT MSR (%#RX64) invalid\n", pszInstr, pVmcs->u64GuestPatMsr.u)); 2186 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestPatMsr; 2187 return VERR_VMX_VMENTRY_FAILED; 2188 } 2369 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestPatMsr); 2189 2370 2190 2371 /* EFER MSR. */ … … 2192 2373 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR) 2193 2374 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask)) 2194 { 2195 Log(("%s: Guest EFER MSR (%#RX64) reserved bits set\n", pszInstr, pVmcs->u64GuestEferMsr.u)); 2196 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestEferMsrRsvd; 2197 return VERR_VMX_VMENTRY_FAILED; 2198 } 2199 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 2375 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestEferMsrRsvd); 2376 2200 2377 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA); 2201 2378 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME); … … 2205 2382 { /* likely */ } 2206 2383 else 2207 { 2208 Log(("%s: Guest EFER MSR (%#RX64) invalid\n", pszInstr, pVmcs->u64GuestEferMsr.u)); 2209 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_GuestEferMsr; 2210 return VERR_VMX_VMENTRY_FAILED; 2211 } 2384 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestEferMsr); 2212 2385 2213 2386 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR)); /* We don't support loading IA32_BNDCFGS MSR yet. */ 2214 2387 2388 NOREF(pszInstr); 2389 NOREF(pszFailure); 2215 2390 return VINF_SUCCESS; 2216 2391 } … … 2218 2393 2219 2394 /** 2220 * Checks host-state as part of VM-entry. 2395 * Checks guest segment registers, LDTR and TR as part of VM-entry. 2396 * 2397 * @param pVCpu The cross context virtual CPU structure. 2398 * @param pszInstr The VMX instruction name (for logging purposes). 2399 */ 2400 IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr) 2401 { 2402 /* 2403 * Segment registers. 2404 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". 2405 */ 2406 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 2407 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM); 2408 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST); 2409 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 2410 const char *const pszFailure = "VM-exit"; 2411 2412 /* Selectors. */ 2413 if ( !fGstInV86Mode 2414 && !fUnrestrictedGuest 2415 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL)) 2416 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegSelCsSsRpl); 2417 2418 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++) 2419 { 2420 CPUMSELREG SelReg; 2421 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg); 2422 if (RT_LIKELY(rc == VINF_SUCCESS)) 2423 { /* likely */ } 2424 else 2425 return rc; 2426 2427 /* 2428 * Virtual-8086 mode checks. 2429 */ 2430 if (fGstInV86Mode) 2431 { 2432 /* Base address. */ 2433 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4) 2434 { /* likely */ } 2435 else 2436 { 2437 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegBaseV86(iSegReg); 2438 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2439 } 2440 2441 /* Limit. */ 2442 if (SelReg.u32Limit == 0xffff) 2443 { /* likely */ } 2444 else 2445 { 2446 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegLimitV86(iSegReg); 2447 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2448 } 2449 2450 /* Attribute. */ 2451 if (SelReg.Attr.u == 0xf3) 2452 { /* likely */ } 2453 else 2454 { 2455 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegAttrV86(iSegReg); 2456 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2457 } 2458 2459 /* We're done; move to checking the next segment. */ 2460 continue; 2461 } 2462 2463 /* Checks done by 64-bit CPUs. */ 2464 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode) 2465 { 2466 /* Base address. */ 2467 if ( iSegReg == X86_SREG_FS 2468 || iSegReg == X86_SREG_GS) 2469 { 2470 if (X86_IS_CANONICAL(SelReg.u64Base)) 2471 { /* likely */ } 2472 else 2473 { 2474 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegBase(iSegReg); 2475 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2476 } 2477 } 2478 else if (iSegReg == X86_SREG_CS) 2479 { 2480 if (!RT_HI_U32(SelReg.u64Base)) 2481 { /* likely */ } 2482 else 2483 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegBaseCs); 2484 } 2485 else 2486 { 2487 if ( SelReg.Attr.n.u1Unusable 2488 || !RT_HI_U32(SelReg.u64Base)) 2489 { /* likely */ } 2490 else 2491 { 2492 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegBase(iSegReg); 2493 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2494 } 2495 } 2496 } 2497 2498 /* 2499 * Checks outside Virtual-8086 mode. 2500 */ 2501 uint8_t const uSegType = SelReg.Attr.n.u4Type; 2502 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType; 2503 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable; 2504 uint8_t const uDpl = SelReg.Attr.n.u2Dpl; 2505 uint8_t const fPresent = SelReg.Attr.n.u1Present; 2506 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity; 2507 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig; 2508 uint8_t const fSegLong = SelReg.Attr.n.u1Long; 2509 2510 /* Code or usable segment. */ 2511 if ( iSegReg == X86_SREG_CS 2512 || fUsable) 2513 { 2514 /* Reserved bits (bits 31:17 and bits 11:8). */ 2515 if (!(SelReg.Attr.u & 0xfffe0f00)) 2516 { /* likely */ } 2517 else 2518 { 2519 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegAttrRsvd(iSegReg); 2520 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2521 } 2522 2523 /* Descriptor type. */ 2524 if (fCodeDataSeg) 2525 { /* likely */ } 2526 else 2527 { 2528 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegAttrDescType(iSegReg); 2529 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2530 } 2531 2532 /* Present. */ 2533 if (fPresent) 2534 { /* likely */ } 2535 else 2536 { 2537 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegAttrPresent(iSegReg); 2538 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2539 } 2540 2541 /* Granularity. */ 2542 if ( ( (SelReg.u32Limit & 0xfff) != 0xfff 2543 && uGranularity == 0) 2544 || ( (SelReg.u32Limit & 0xfff00000) 2545 && uGranularity == 1)) 2546 { /* likely */ } 2547 else 2548 { 2549 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegAttrGran(iSegReg); 2550 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2551 } 2552 } 2553 2554 if (iSegReg == X86_SREG_CS) 2555 { 2556 /* Segment Type and DPL. */ 2557 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED) 2558 && fUnrestrictedGuest) 2559 { 2560 if (uDpl == 0) 2561 { /* likely */ } 2562 else 2563 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrCsDplZero); 2564 } 2565 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED) 2566 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED)) 2567 { 2568 X86DESCATTR SsAttr; SsAttr.u = pVmcs->u32GuestSsAttr; 2569 if (uDpl == SsAttr.n.u2Dpl) 2570 { /* likely */ } 2571 else 2572 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrCsDplEqSs); 2573 } 2574 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED) 2575 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED)) 2576 { 2577 X86DESCATTR SsAttr; SsAttr.u = pVmcs->u32GuestSsAttr; 2578 if (uDpl <= SsAttr.n.u2Dpl) 2579 { /* likely */ } 2580 else 2581 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrCsDplLtSs); 2582 } 2583 else 2584 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrCsType); 2585 2586 /* Def/Big. */ 2587 if ( fGstInLongMode 2588 && fSegLong) 2589 { 2590 if (uDefBig == 0) 2591 { /* likely */ } 2592 else 2593 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrCsDefBig); 2594 } 2595 } 2596 else if (iSegReg == X86_SREG_SS) 2597 { 2598 /* Segment Type. */ 2599 if ( !fUsable 2600 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED) 2601 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)) 2602 { /* likely */ } 2603 else 2604 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrSsType); 2605 2606 /* DPL. */ 2607 if (fUnrestrictedGuest) 2608 { 2609 if (uDpl == (SelReg.Sel & X86_SEL_RPL)) 2610 { /* likely */ } 2611 else 2612 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrSsDplEqRpl); 2613 } 2614 X86DESCATTR CsAttr; CsAttr.u = pVmcs->u32GuestCsAttr; 2615 if ( CsAttr.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED) 2616 || (pVmcs->u64GuestCr0.u & X86_CR0_PE)) 2617 { 2618 if (uDpl == 0) 2619 { /* likely */ } 2620 else 2621 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrSsDplZero); 2622 } 2623 } 2624 else 2625 { 2626 /* DS, ES, FS, GS. */ 2627 if (fUsable) 2628 { 2629 /* Segment type. */ 2630 if (uSegType & X86_SEL_TYPE_ACCESSED) 2631 { /* likely */ } 2632 else 2633 { 2634 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegAttrTypeAcc(iSegReg); 2635 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2636 } 2637 2638 if ( !(uSegType & X86_SEL_TYPE_CODE) 2639 || (uSegType & X86_SEL_TYPE_READ)) 2640 { /* likely */ } 2641 else 2642 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrCsTypeRead); 2643 2644 /* DPL. */ 2645 if ( !fUnrestrictedGuest 2646 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED)) 2647 { 2648 if (uDpl >= (SelReg.Sel & X86_SEL_RPL)) 2649 { /* likely */ } 2650 else 2651 { 2652 VMXVINSTRDIAG const enmDiag = iemVmxVmentryGetInstrDiagSegAttrDplRpl(iSegReg); 2653 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 2654 } 2655 } 2656 } 2657 } 2658 } 2659 2660 /* 2661 * LDTR. 2662 */ 2663 { 2664 CPUMSELREG Ldtr; 2665 Ldtr.Sel = pVmcs->GuestLdtr; 2666 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit; 2667 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u; 2668 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit; 2669 2670 if (!Ldtr.Attr.n.u1Unusable) 2671 { 2672 /* Selector. */ 2673 if (!(Ldtr.Sel & X86_SEL_LDT)) 2674 { /* likely */ } 2675 else 2676 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegSelLdtr); 2677 2678 /* Base. */ 2679 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode) 2680 { 2681 if (X86_IS_CANONICAL(Ldtr.u64Base)) 2682 { /* likely */ } 2683 else 2684 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegBaseLdtr); 2685 } 2686 2687 /* Attributes. */ 2688 /* Reserved bits (bits 31:17 and bits 11:8). */ 2689 if (!(Ldtr.Attr.u & 0xfffe0f00)) 2690 { /* likely */ } 2691 else 2692 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrLdtrRsvd); 2693 2694 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT) 2695 { /* likely */ } 2696 else 2697 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrLdtrType); 2698 2699 if (!Ldtr.Attr.n.u1DescType) 2700 { /* likely */ } 2701 else 2702 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrLdtrDescType); 2703 2704 if (Ldtr.Attr.n.u1Present) 2705 { /* likely */ } 2706 else 2707 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrLdtrPresent); 2708 2709 if ( ( (Ldtr.u32Limit & 0xfff) != 0xfff 2710 && Ldtr.Attr.n.u1Granularity == 0) 2711 || ( (Ldtr.u32Limit & 0xfff00000) 2712 && Ldtr.Attr.n.u1Granularity == 1)) 2713 { /* likely */ } 2714 else 2715 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrLdtrGran); 2716 } 2717 } 2718 2719 /* 2720 * TR. 2721 */ 2722 { 2723 CPUMSELREG Tr; 2724 Tr.Sel = pVmcs->GuestTr; 2725 Tr.u32Limit = pVmcs->u32GuestTrLimit; 2726 Tr.u64Base = pVmcs->u64GuestTrBase.u; 2727 Tr.Attr.u = pVmcs->u32GuestTrLimit; 2728 2729 /* Selector. */ 2730 if (!(Tr.Sel & X86_SEL_LDT)) 2731 { /* likely */ } 2732 else 2733 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegSelTr); 2734 2735 /* Base. */ 2736 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode) 2737 { 2738 if (X86_IS_CANONICAL(Tr.u64Base)) 2739 { /* likely */ } 2740 else 2741 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegBaseTr); 2742 } 2743 2744 /* Attributes. */ 2745 /* Reserved bits (bits 31:17 and bits 11:8). */ 2746 if (!(Tr.Attr.u & 0xfffe0f00)) 2747 { /* likely */ } 2748 else 2749 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrTrRsvd); 2750 2751 if (!Tr.Attr.n.u1Unusable) 2752 { /* likely */ } 2753 else 2754 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrTrUnusable); 2755 2756 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY 2757 || ( !fGstInLongMode 2758 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY)) 2759 { /* likely */ } 2760 else 2761 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrTrType); 2762 2763 if (!Tr.Attr.n.u1DescType) 2764 { /* likely */ } 2765 else 2766 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrTrDescType); 2767 2768 if (Tr.Attr.n.u1Present) 2769 { /* likely */ } 2770 else 2771 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrTrPresent); 2772 2773 if ( ( (Tr.u32Limit & 0xfff) != 0xfff 2774 && Tr.Attr.n.u1Granularity == 0) 2775 || ( (Tr.u32Limit & 0xfff00000) 2776 && Tr.Attr.n.u1Granularity == 1)) 2777 { /* likely */ } 2778 else 2779 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_GuestSegAttrTrGran); 2780 } 2781 2782 NOREF(pszInstr); 2783 NOREF(pszFailure); 2784 return VINF_SUCCESS; 2785 } 2786 2787 /** 2788 * Checks guest-state as part of VM-entry. 2221 2789 * 2222 2790 * @returns VBox status code. … … 2224 2792 * @param pszInstr The VMX instruction name (for logging purposes). 2225 2793 */ 2794 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr) 2795 { 2796 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr); 2797 if (rc == VINF_SUCCESS) 2798 { /* likely */ } 2799 else 2800 return rc; 2801 2802 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr); 2803 if (rc == VINF_SUCCESS) 2804 { /* likely */ } 2805 else 2806 return rc; 2807 2808 return VINF_SUCCESS; 2809 } 2810 2811 2812 /** 2813 * Checks host-state as part of VM-entry. 2814 * 2815 * @returns VBox status code. 2816 * @param pVCpu The cross context virtual CPU structure. 2817 * @param pszInstr The VMX instruction name (for logging purposes). 2818 */ 2226 2819 IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr) 2227 2820 { 2228 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);2229 2230 2821 /* 2231 2822 * Host Control Registers and MSRs. 2232 2823 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs". 2233 2824 */ 2825 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 2826 const char * const pszFailure = "VMFail"; 2827 2234 2828 /* CR0 reserved bits. */ 2235 2829 { … … 2237 2831 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu); 2238 2832 if (~pVmcs->u64HostCr0.u & u64Cr0Fixed0) 2239 { 2240 Log(("%s: Invalid host CR0 %#RX64 (fixed0) -> VMFail\n", pszInstr, pVmcs->u64HostCr0.u)); 2241 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr0Fixed0; 2242 return VERR_VMX_VMENTRY_FAILED; 2243 } 2833 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostCr0Fixed0); 2244 2834 2245 2835 /* CR0 MBZ bits. */ 2246 2836 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu); 2247 2837 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1) 2248 { 2249 Log(("%s: Invalid host CR0 %#RX64 (fixed1) -> VMFail\n", pszInstr, pVmcs->u64HostCr0.u)); 2250 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr0Fixed1; 2251 return VERR_VMX_VMENTRY_FAILED; 2252 } 2838 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostCr0Fixed1); 2253 2839 } 2254 2840 … … 2258 2844 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu); 2259 2845 if (~pVmcs->u64HostCr4.u & u64Cr4Fixed0) 2260 { 2261 Log(("%s: Invalid host CR4 %#RX64 (fixed0) -> VMFail\n", pszInstr, pVmcs->u64HostCr4.u)); 2262 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr4Fixed0; 2263 return VERR_VMX_VMENTRY_FAILED; 2264 } 2846 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostCr4Fixed0); 2265 2847 2266 2848 /* CR4 MBZ bits. */ 2267 2849 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu); 2268 2850 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1) 2269 { 2270 Log(("%s: Invalid host CR4 %#RX64 (fixed1) -> VMFail\n", pszInstr, pVmcs->u64HostCr4.u)); 2271 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr4Fixed1; 2272 return VERR_VMX_VMENTRY_FAILED; 2273 } 2851 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostCr4Fixed1); 2274 2852 } 2275 2853 … … 2277 2855 { 2278 2856 /* CR3 reserved bits. */ 2279 if (pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth) 2280 { 2281 Log(("%s: Invalid host CR3 %#RX64 -> VMFail\n", pszInstr, pVmcs->u64HostCr3.u)); 2282 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr3; 2283 return VERR_VMX_VMENTRY_FAILED; 2284 } 2857 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)) 2858 { /* likely */ } 2859 else 2860 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostCr3); 2285 2861 2286 2862 /* SYSENTER ESP and SYSENTER EIP. */ … … 2289 2865 { /* likely */ } 2290 2866 else 2291 { 2292 Log(("%s: Host Sysenter ESP (%#RX64) / EIP (%#RX64) not canonical -> VMFail\n", pszInstr, 2293 pVmcs->u64HostSysenterEsp.u, pVmcs->u64HostSysenterEip.u)); 2294 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostSysenterEspEip; 2295 return VERR_VMX_VMENTRY_FAILED; 2296 } 2867 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostSysenterEspEip); 2297 2868 } 2298 2869 … … 2300 2871 2301 2872 /* PAT MSR. */ 2302 if ( (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR) 2303 && !CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u)) 2304 { 2305 Log(("%s: Host PAT MSR (%#RX64) invalid\n", pszInstr, pVmcs->u64HostPatMsr.u)); 2306 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostPatMsr; 2307 return VERR_VMX_VMENTRY_FAILED; 2308 } 2873 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR) 2874 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u)) 2875 { /* likely */ } 2876 else 2877 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostPatMsr); 2309 2878 2310 2879 /* EFER MSR. */ 2311 2880 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM)); 2312 if ( (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR) 2313 && (pVmcs->u64HostEferMsr.u & ~uValidEferMask)) 2314 { 2315 Log(("%s: Host EFER MSR (%#RX64) reserved bits set\n", pszInstr, pVmcs->u64HostEferMsr.u)); 2316 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostEferMsrRsvd; 2317 return VERR_VMX_VMENTRY_FAILED; 2318 } 2881 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR) 2882 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask)) 2883 { /* likely */ } 2884 else 2885 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostEferMsrRsvd); 2886 2319 2887 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 2320 2888 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA); … … 2324 2892 { /* likely */ } 2325 2893 else 2326 { 2327 Log(("%s: Host EFER MSR (%#RX64) LMA, LME, host addr-space size mismatch\n", pszInstr, pVmcs->u64HostEferMsr.u)); 2328 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostEferMsr; 2329 return VERR_VMX_VMENTRY_FAILED; 2330 } 2894 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostEferMsr); 2331 2895 2332 2896 /* … … 2344 2908 { /* likely */ } 2345 2909 else 2346 { 2347 Log(("%s: One or more host selector registers invalid\n", pszInstr)); 2348 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostSel; 2349 return VERR_VMX_VMENTRY_FAILED; 2350 } 2910 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostSel); 2351 2911 2352 2912 /* CS and TR selectors cannot be 0. */ … … 2355 2915 { /* likely */ } 2356 2916 else 2357 { 2358 Log(("%s: Host CS/TR selector is invalid\n", pszInstr)); 2359 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCsTr; 2360 return VERR_VMX_VMENTRY_FAILED; 2361 } 2917 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostCsTr); 2362 2918 2363 2919 /* SS cannot be 0 if 32-bit host. */ … … 2366 2922 { /* likely */ } 2367 2923 else 2368 { 2369 Log(("%s: Host SS selector invalid for 32-bit host\n", pszInstr)); 2370 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostSs; 2371 return VERR_VMX_VMENTRY_FAILED; 2372 } 2924 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostSs); 2373 2925 2374 2926 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode) … … 2382 2934 { /* likely */ } 2383 2935 else 2384 { 2385 Log(("%s: Host segment register (FS/GS/GDTR/IDTR/TR) base address is not canonical\n", pszInstr)); 2386 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostSegBase; 2387 return VERR_VMX_VMENTRY_FAILED; 2388 } 2936 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostSegBase); 2389 2937 } 2390 2938 … … 2407 2955 { /* likely */ } 2408 2956 else 2409 { 2410 Log(("%s: Host CR4.PAE not set when logical CPU is in long mode\n", pszInstr)); 2411 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr4Pae; 2412 return VERR_VMX_VMENTRY_FAILED; 2413 } 2957 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostCr4Pae); 2414 2958 2415 2959 /* RIP must be canonical. */ … … 2417 2961 { /* likely */ } 2418 2962 else 2419 { 2420 Log(("%s: Host RIP must be canonicalwhen logical CPU in long mode\n", pszInstr)); 2421 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostRip; 2422 return VERR_VMX_VMENTRY_FAILED; 2423 } 2963 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostRip); 2424 2964 } 2425 2965 else 2426 { 2427 Log(("%s: Host must be in long mode when logical CPU in long mode\n", pszInstr)); 2428 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostLongMode; 2429 return VERR_VMX_VMENTRY_FAILED; 2430 } 2966 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostLongMode); 2431 2967 } 2432 2968 else … … 2440 2976 { /* likely */ } 2441 2977 else 2442 { 2443 Log(("%s: Host CR4.PCIDE must be clear when logical CPU is not in long mode\n", pszInstr)); 2444 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostCr4Pcide; 2445 return VERR_VMX_VMENTRY_FAILED; 2446 } 2978 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostCr4Pcide); 2447 2979 2448 2980 /* The high 32-bits of RIP MBZ. */ … … 2450 2982 { /* likely */ } 2451 2983 else 2452 { 2453 Log(("%s: Host RIP high 32-bits must be clear when logical CPU is not in long mode\n", pszInstr)); 2454 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostRipRsvd; 2455 return VERR_VMX_VMENTRY_FAILED; 2456 } 2984 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostRipRsvd); 2457 2985 } 2458 2986 else 2459 { 2460 Log(("%s: Host/guest cannot be in long mode when logical CPU is not in long mode\n", pszInstr)); 2461 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostGuestLongMode; 2462 return VERR_VMX_VMENTRY_FAILED; 2463 } 2987 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostGuestLongMode); 2464 2988 } 2465 2989 } … … 2471 2995 { /* likely */ } 2472 2996 else 2473 { 2474 Log(("%s: Host/guest cannot be in long mode on 32-bit CPUs\n", pszInstr)); 2475 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_HostGuestLongModeNoCpu; 2476 return VERR_VMX_VMENTRY_FAILED; 2477 } 2997 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_HostGuestLongModeNoCpu); 2478 2998 } 2479 2999 2480 3000 NOREF(pszInstr); 3001 NOREF(pszFailure); 2481 3002 return VINF_SUCCESS; 2482 3003 } … … 2494 3015 { 2495 3016 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3017 const char * const pszFailure = "VMFail"; 2496 3018 2497 3019 /* VM-entry controls. */ … … 2499 3021 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu); 2500 3022 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0) 2501 { 2502 Log(("%s: Invalid EntryCtls %#RX32 (disallowed0) -> VMFail\n", pszInstr, pVmcs->u32EntryCtls)); 2503 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryCtlsDisallowed0; 2504 return VERR_VMX_VMENTRY_FAILED; 2505 } 3023 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_EntryCtlsDisallowed0); 3024 2506 3025 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1) 2507 { 2508 Log(("%s: Invalid EntryCtls %#RX32 (allowed1) -> VMFail\n", pszInstr, pVmcs->u32EntryCtls)); 2509 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryCtlsAllowed1; 2510 return VERR_VMX_VMENTRY_FAILED; 2511 } 3026 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_EntryCtlsAllowed1); 2512 3027 2513 3028 /* Event injection. */ … … 2524 3039 { /* likely */ } 2525 3040 else 2526 { 2527 Log(("%s: VM-entry interruption info (%#RX32) invalid (rsvd/type/vector) -> VMFail\n", pszInstr, uIntInfo)); 2528 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryIntInfoTypeVecRsvd; 2529 return VERR_VMX_VMENTRY_FAILED; 2530 } 3041 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_EntryIntInfoTypeVecRsvd); 2531 3042 2532 3043 /* Exception error code. */ … … 2538 3049 { /* likely */ } 2539 3050 else 2540 { 2541 Log(("%s: VM-entry interruption (%#RX32) invalid error-code (paging-mode) -> VMFail\n", pszInstr, uIntInfo)); 2542 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodePe; 2543 return VERR_VMX_VMENTRY_FAILED; 2544 } 3051 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodePe); 2545 3052 2546 3053 /* Exceptions that provide an error code. */ … … 2555 3062 { /* likely */ } 2556 3063 else 2557 { 2558 Log(("%s: VM-entry interruption (%#RX32) invalid error-code (vector) -> VMFail\n", pszInstr, uIntInfo)); 2559 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodeVec; 2560 return VERR_VMX_VMENTRY_FAILED; 2561 } 3064 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodeVec); 2562 3065 2563 3066 /* Exception error-code reserved bits. */ 2564 if (pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK) 2565 { 2566 Log(("%s: VM-entry exception error-code (%#RX32) invalid -> VMFail\n", pszInstr, uIntInfo)); 2567 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryXcptErrCodeRsvd; 2568 return VERR_VMX_VMENTRY_FAILED; 2569 } 3067 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK)) 3068 { /* likely */ } 3069 else 3070 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_EntryXcptErrCodeRsvd); 2570 3071 2571 3072 /* Injecting a software interrupt, software exception or privileged software exception. */ … … 2575 3076 { 2576 3077 /* Instruction length must be in the range 0-15. */ 2577 if (pVmcs->u32EntryInstrLen > VMX_ENTRY_INSTR_LEN_MAX) 2578 { 2579 Log(("%s: VM-entry instruction length (%#RX32) invalid -> VMFail\n", pszInstr, pVmcs->u32EntryInstrLen)); 2580 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryInstrLen; 2581 return VERR_VMX_VMENTRY_FAILED; 2582 } 2583 2584 /* Zero instruction length is allowed only when the CPU supports it explicitly. */ 3078 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX) 3079 { /* likely */ } 3080 else 3081 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_EntryInstrLen); 3082 3083 /* Instruction length of 0 is allowed only when its CPU feature is present. */ 2585 3084 if ( pVmcs->u32EntryInstrLen == 0 2586 3085 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt) 2587 { 2588 Log(("%s: VM-entry instruction length zero invalid (swint/xcpt/priv) -> VMFail\n", pszInstr)); 2589 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_EntryInstrLenZero; 2590 return VERR_VMX_VMENTRY_FAILED; 2591 } 3086 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_EntryInstrLenZero); 2592 3087 } 2593 3088 } … … 2600 3095 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2601 3096 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u)) 2602 { 2603 Log(("%s: VM-entry MSR-load area address %#RX64 invalid -> VMFail\n", pszInstr, pVmcs->u64AddrEntryMsrLoad.u)); 2604 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrEntryMsrLoad; 2605 return VERR_VMX_VMENTRY_FAILED; 2606 } 3097 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_AddrEntryMsrLoad); 2607 3098 } 2608 3099 … … 2611 3102 2612 3103 NOREF(pszInstr); 3104 NOREF(pszFailure); 2613 3105 return VINF_SUCCESS; 2614 3106 } … … 2626 3118 { 2627 3119 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3120 const char * const pszFailure = "VMFail"; 2628 3121 2629 3122 /* VM-exit controls. */ … … 2631 3124 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu); 2632 3125 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0) 2633 { 2634 Log(("%s: Invalid ExitCtls %#RX32 (disallowed0) -> VMFail\n", pszInstr, pVmcs->u32ExitCtls)); 2635 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0; 2636 return VERR_VMX_VMENTRY_FAILED; 2637 } 3126 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0); 3127 2638 3128 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1) 2639 { 2640 Log(("%s: Invalid ExitCtls %#RX32 (allowed1) -> VMFail\n", pszInstr, pVmcs->u32ExitCtls)); 2641 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1; 2642 return VERR_VMX_VMENTRY_FAILED; 2643 } 3129 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1); 2644 3130 2645 3131 /* Save preemption timer without activating it. */ 2646 3132 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER) 2647 3133 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)) 2648 { 2649 Log(("%s: Save Preemption timer without Activate Preempt timer -> VMFail\n", pszInstr)); 2650 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_SavePreemptTimer; 2651 return VERR_VMX_VMENTRY_FAILED; 2652 } 3134 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_SavePreemptTimer); 2653 3135 2654 3136 /* VM-exit MSR-store count and VM-exit MSR-store area address. */ … … 2658 3140 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2659 3141 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u)) 2660 { 2661 Log(("%s: VM-exit MSR-store area address %#RX64 invalid -> VMFail\n", pszInstr, pVmcs->u64AddrExitMsrStore.u)); 2662 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrExitMsrStore; 2663 return VERR_VMX_VMENTRY_FAILED; 2664 } 3142 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_AddrExitMsrStore); 2665 3143 } 2666 3144 … … 2671 3149 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2672 3150 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u)) 2673 { 2674 Log(("%s: VM-exit MSR-store area address %#RX64 invalid -> VMFail\n", pszInstr, pVmcs->u64AddrExitMsrLoad.u)); 2675 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrExitMsrLoad; 2676 return VERR_VMX_VMENTRY_FAILED; 2677 } 3151 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_AddrExitMsrLoad); 2678 3152 } 2679 3153 2680 3154 NOREF(pszInstr); 3155 NOREF(pszFailure); 2681 3156 return VINF_SUCCESS; 2682 3157 } … … 2697 3172 { 2698 3173 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3174 const char * const pszFailure = "VMFail"; 3175 2699 3176 /* Pin-based VM-execution controls. */ 2700 3177 { … … 2702 3179 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu); 2703 3180 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0) 2704 { 2705 Log(("%s: Invalid PinCtls %#RX32 (disallowed0) -> VMFail\n", pszInstr, pVmcs->u32PinCtls)); 2706 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_PinCtlsDisallowed0; 2707 return VERR_VMX_VMENTRY_FAILED; 2708 } 3181 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_PinCtlsDisallowed0); 3182 2709 3183 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1) 2710 { 2711 Log(("%s: Invalid PinCtls %#RX32 (allowed1) -> VMFail\n", pszInstr, pVmcs->u32PinCtls)); 2712 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_PinCtlsAllowed1; 2713 return VERR_VMX_VMENTRY_FAILED; 2714 } 3184 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_PinCtlsAllowed1); 2715 3185 } 2716 3186 … … 2720 3190 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu); 2721 3191 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0) 2722 { 2723 Log(("%s: Invalid ProcCtls %#RX32 (disallowed0) -> VMFail\n", pszInstr, pVmcs->u32ProcCtls)); 2724 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_ProcCtlsDisallowed0; 2725 return VERR_VMX_VMENTRY_FAILED; 2726 } 3192 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_ProcCtlsDisallowed0); 3193 2727 3194 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1) 2728 { 2729 Log(("%s: Invalid ProcCtls %#RX32 (allowed1) -> VMFail\n", pszInstr, pVmcs->u32ProcCtls)); 2730 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_ProcCtlsAllowed1; 2731 return VERR_VMX_VMENTRY_FAILED; 2732 } 3195 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_ProcCtlsAllowed1); 2733 3196 } 2734 3197 … … 2737 3200 { 2738 3201 VMXCTLSMSR ProcCtls2; 2739 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls (pVCpu);3202 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu); 2740 3203 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0) 2741 { 2742 Log(("%s: Invalid ProcCtls2 %#RX32 (disallowed0) -> VMFail\n", pszInstr, pVmcs->u32ProcCtls2)); 2743 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_ProcCtls2Disallowed0; 2744 return VERR_VMX_VMENTRY_FAILED; 2745 } 3204 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_ProcCtls2Disallowed0); 3205 2746 3206 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1) 2747 { 2748 Log(("%s: Invalid ProcCtls2 %#RX32 (allowed1) -> VMFail\n", pszInstr, pVmcs->u32ProcCtls2)); 2749 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_ProcCtls2Allowed1; 2750 return VERR_VMX_VMENTRY_FAILED; 2751 } 3207 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_ProcCtls2Allowed1); 2752 3208 } 2753 3209 else 2754 { 2755 /* 2756 * The guest is always capable of corrupting the VMCS by writing to the VMCS is guest 2757 * memory directly rather than follow the rules. So we don't make any assumptions that 2758 * u32ProcCtls2 will be 0 if no secondary-processor based VM-execution control support 2759 * is reported to the guest. 2760 */ 2761 pVmcs->u32ProcCtls2 = 0; 2762 } 3210 Assert(!pVmcs->u32ProcCtls2); 2763 3211 2764 3212 /* CR3-target count. */ 2765 if (pVmcs->u32Cr3TargetCount > VMX_V_CR3_TARGET_COUNT) 2766 { 2767 Log(("%s: CR3-target count exceeded %#x -> VMFail\n", pszInstr, pVmcs->u32Cr3TargetCount)); 2768 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_Cr3TargetCount; 2769 return VERR_VMX_VMENTRY_FAILED; 2770 } 3213 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT) 3214 { /* likely */ } 3215 else 3216 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_Cr3TargetCount); 2771 3217 2772 3218 /* IO bitmaps physical addresses. */ … … 2776 3222 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2777 3223 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u)) 2778 { 2779 Log(("%s: I/O Bitmap A physaddr invalid %#RX64 -> VMFail\n", pszInstr, pVmcs->u64AddrIoBitmapA.u)); 2780 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrIoBitmapA; 2781 return VERR_VMX_VMENTRY_FAILED; 2782 } 3224 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_AddrIoBitmapA); 2783 3225 2784 3226 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK) 2785 3227 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2786 3228 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u)) 2787 { 2788 Log(("%s: I/O Bitmap B physaddr invalid %#RX64 -> VMFail\n", pszInstr, pVmcs->u64AddrIoBitmapB.u)); 2789 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrIoBitmapB; 2790 return VERR_VMX_VMENTRY_FAILED; 2791 } 3229 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_AddrIoBitmapB); 2792 3230 } 2793 3231 … … 2798 3236 || (pVmcs->u64AddrMsrBitmap.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2799 3237 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrMsrBitmap.u)) 2800 { 2801 Log(("%s: MSR Bitmap physaddr invalid %#RX64 -> VMFail\n", pszInstr, pVmcs->u64AddrMsrBitmap.u)); 2802 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrMsrBitmap; 2803 return VERR_VMX_VMENTRY_FAILED; 2804 } 3238 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_AddrMsrBitmap); 2805 3239 } 2806 3240 … … 2813 3247 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2814 3248 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic)) 2815 { 2816 Log(("%s: Virtual-APIC page physaddr invalid %#RX64 -> VMFail\n", pszInstr, GCPhysVirtApic)); 2817 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrVirtApicPage; 2818 return VERR_VMX_VMENTRY_FAILED; 2819 } 3249 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_AddrVirtApicPage); 2820 3250 2821 3251 /* Read the Virtual-APIC page. */ … … 2824 3254 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES); 2825 3255 if (RT_FAILURE(rc)) 2826 { 2827 Log(("%s: Failed to read Virtual-APIC page at %#RGp, rc=%Rrc\n", pszInstr, GCPhysVirtApic, rc)); 2828 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_VirtApicPagePtrReadPhys; 2829 return rc; 2830 } 3256 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_VirtApicPagePtrReadPhys); 2831 3257 2832 3258 /* TPR threshold without virtual-interrupt delivery. */ 2833 3259 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY) 2834 3260 && (pVmcs->u32TprThreshold & VMX_TPR_THRESHOLD_MASK)) 2835 { 2836 Log(("%s: TPR-threshold (%#RX32) invalid -> VMFail\n", pszInstr, pVmcs->u32TprThreshold)); 2837 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_TprThreshold; 2838 return VERR_VMX_VMENTRY_FAILED; 2839 } 3261 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_TprThreshold); 2840 3262 2841 3263 /* TPR threshold and VTPR. */ … … 2845 3267 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY) 2846 3268 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */) 2847 { 2848 Log(("%s: TPR-threshold (%#x) exceeds VTPR (%#x) -> VMFail\n", pszInstr, 2849 (pVmcs->u32TprThreshold & VMX_TPR_THRESHOLD_MASK), u8VTpr)); 2850 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_TprThresholdVTpr; 2851 return VERR_VMX_VMENTRY_FAILED; 2852 } 3269 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_TprThresholdVTpr); 2853 3270 } 2854 3271 else … … 2861 3278 { 2862 3279 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE) 2863 { 2864 Log(("%s: Virtualize x2APIC access without TPR shadowing -> VMFail\n", pszInstr)); 2865 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_VirtX2ApicTprShadow; 2866 return VERR_VMX_VMENTRY_FAILED; 2867 } 2868 else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT) 2869 { 2870 Log(("%s: APIC-register virtualization without TPR shadowing -> VMFail\n", pszInstr)); 2871 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_ApicRegVirt; 2872 return VERR_VMX_VMENTRY_FAILED; 2873 } 2874 else 2875 { 2876 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY); 2877 Log(("%s: Virtual-interrupt delivery without TPR shadowing -> VMFail\n", pszInstr)); 2878 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_VirtIntDelivery; 2879 return VERR_VMX_VMENTRY_FAILED; 2880 } 3280 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_VirtX2ApicTprShadow); 3281 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT) 3282 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_ApicRegVirt); 3283 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY); 3284 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_VirtIntDelivery); 2881 3285 } 2882 3286 } … … 2885 3289 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT) 2886 3290 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 2887 { 2888 Log(("%s: Virtual-NMIs invalid without NMI-exiting -> VMFail\n", pszInstr)); 2889 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_VirtNmi; 2890 return VERR_VMX_VMENTRY_FAILED; 2891 } 3291 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_VirtNmi); 2892 3292 2893 3293 /* Virtual-NMIs and NMI-window exiting. */ 2894 3294 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 2895 3295 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)) 2896 { 2897 Log(("%s: NMI-window exiting invalid without virtual-NMIs -> VMFail\n", pszInstr)); 2898 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_NmiWindowExit; 2899 return VERR_VMX_VMENTRY_FAILED; 2900 } 3296 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_NmiWindowExit); 2901 3297 2902 3298 /* Virtualize APIC accesses. */ … … 2908 3304 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2909 3305 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess)) 2910 { 2911 Log(("%s: APIC-access address invalid %#RX64 -> VMFail\n", pszInstr, GCPhysApicAccess)); 2912 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrApicAccess; 2913 return VERR_VMX_VMENTRY_FAILED; 2914 } 3306 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_AddrApicAccess); 2915 3307 } 2916 3308 … … 2918 3310 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE) 2919 3311 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)) 2920 { 2921 Log(("%s: Virtualize-APIC access when virtualize-x2APIC mode is enabled -> VMFail", pszInstr)); 2922 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_VirtX2ApicVirtApic; 2923 return VERR_VMX_VMENTRY_FAILED; 2924 } 3312 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_VirtX2ApicVirtApic); 2925 3313 2926 3314 /* Virtual-interrupt delivery requires external interrupt exiting. */ 2927 3315 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY) 2928 3316 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)) 2929 { 2930 Log(("%s: Virtual-interrupt delivery without external interrupt exiting -> VMFail\n", pszInstr)); 2931 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_VirtX2ApicVirtApic; 2932 return VERR_VMX_VMENTRY_FAILED; 2933 } 3317 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_VirtX2ApicVirtApic); 2934 3318 2935 3319 /* VPID. */ 2936 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID) 2937 && pVmcs->u16Vpid == 0) 2938 { 2939 Log(("%s: VPID invalid -> VMFail\n", pszInstr)); 2940 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_Vpid; 2941 return VERR_VMX_VMENTRY_FAILED; 2942 } 3320 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID) 3321 || pVmcs->u16Vpid != 0) 3322 { /* likely */ } 3323 else 3324 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_Vpid); 2943 3325 2944 3326 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */ … … 2957 3339 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2958 3340 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap)) 2959 { 2960 Log(("%s: VMREAD-bitmap address invalid %#RX64 -> VMFail\n", pszInstr, GCPhysVmreadBitmap)); 2961 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrVmreadBitmap; 2962 return VERR_VMX_VMENTRY_FAILED; 2963 } 3341 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_AddrVmreadBitmap); 2964 3342 2965 3343 /* VMWRITE-bitmap physical address. */ … … 2968 3346 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 2969 3347 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap)) 2970 { 2971 Log(("%s: VMWRITE-bitmap address invalid %#RX64 -> VMFail\n", pszInstr, GCPhysVmwriteBitmap)); 2972 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_AddrVmwriteBitmap; 2973 return VERR_VMX_VMENTRY_FAILED; 2974 } 3348 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_AddrVmwriteBitmap); 2975 3349 2976 3350 /* Read the VMREAD-bitmap. */ … … 2979 3353 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2980 3354 if (RT_FAILURE(rc)) 2981 { 2982 Log(("%s: Failed to read VMREAD-bitmap at %#RGp, rc=%Rrc\n", pszInstr, GCPhysVmreadBitmap, rc)); 2983 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_VmreadBitmapPtrReadPhys; 2984 return rc; 2985 } 3355 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_VmreadBitmapPtrReadPhys); 2986 3356 2987 3357 /* Read the VMWRITE-bitmap. */ … … 2990 3360 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2991 3361 if (RT_FAILURE(rc)) 2992 { 2993 Log(("%s: Failed to read VMWRITE-bitmap at %#RGp, rc=%Rrc\n", pszInstr, GCPhysVmwriteBitmap, rc)); 2994 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmentry_VmwriteBitmapPtrReadPhys; 2995 return rc; 2996 } 3362 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVInstrDiag_Vmentry_VmwriteBitmapPtrReadPhys); 2997 3363 } 2998 3364 2999 3365 NOREF(pszInstr); 3366 NOREF(pszFailure); 3000 3367 return VINF_SUCCESS; 3001 3368 }
Note:
See TracChangeset
for help on using the changeset viewer.