Changeset 73606 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 10, 2018 7:38:56 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 1 added
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r73348 r73606 175 175 VMMAll/HMAll.cpp \ 176 176 VMMAll/HMSVMAll.cpp \ 177 VMMAll/HMVMXAll.cpp \ 177 178 VMMAll/IEMAll.cpp \ 178 179 VMMAll/IEMAllAImpl.asm \ … … 508 509 VMMRC_DEFS = IN_VMM_RC IN_RT_RC IN_DIS DIS_CORE_ONLY VBOX_WITH_RAW_MODE VBOX_WITH_RAW_MODE_NOT_R0 IN_SUP_RC \ 509 510 $(VMM_COMMON_DEFS) 510 VMMRC_DEFS := $(filter-out VBOX_WITH_NESTED_HWVIRT_SVM ,$(VMMRC_DEFS))511 VMMRC_DEFS := $(filter-out VBOX_WITH_NESTED_HWVIRT_SVM VBOX_WITH_NESTED_HWVIRT_VMX,$(VMMRC_DEFS)) 511 512 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK 512 513 VMMRC_DEFS += VMM_R0_SWITCH_STACK … … 570 571 VMMAll/GIMAllHv.cpp \ 571 572 VMMAll/GIMAllKvm.cpp \ 572 VMMAll/HMAll.cpp \ 573 VMMAll/HMSVMAll.cpp \ 573 VMMAll/HMAll.cpp \ 574 VMMAll/HMSVMAll.cpp \ 575 VMMAll/HMVMXAll.cpp \ 574 576 VMMAll/MMAll.cpp \ 575 577 VMMAll/MMAllHyper.cpp \ … … 718 720 VMMAll/HMAll.cpp \ 719 721 VMMAll/HMSVMAll.cpp \ 722 VMMAll/HMVMXAll.cpp \ 720 723 VMMAll/IEMAll.cpp \ 721 724 VMMAll/IEMAllAImpl.asm \ -
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r73431 r73606 300 300 301 301 302 /** 303 * Get MSR_IA32_SMM_MONITOR_CTL value for IEM and cpumMsrRd_Ia32SmmMonitorCtl. 304 * 305 * @returns The MSR_IA32_SMM_MONITOR_CTL value. 306 * @param pVCpu The cross context per CPU structure. 307 */ 308 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32SmmMonitorCtl(PVMCPU pVCpu) 309 { 310 /* We do not support dual-monitor treatment for SMI and SMM. */ 311 /** @todo SMM. */ 312 RT_NOREF(pVCpu); 313 return 0; 314 } 315 316 302 317 /** @callback_method_impl{FNCPUMRDMSR} */ 303 318 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32SmmMonitorCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 304 319 { 305 320 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 306 /** @todo SMM. */ 307 *puValue = 0; 321 *puValue = CPUMGetGuestIa32SmmMonitorCtl(pVCpu); 308 322 return VINF_SUCCESS; 309 323 } … … 1288 1302 1289 1303 1290 /** @callback_method_impl{FNCPUMRDMSR} */ 1291 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32VmxBasic(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 1292 { 1293 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1304 /** 1305 * Gets IA32_VMX_BASIC for IEM and cpumMsrRd_Ia32VmxBasic. 1306 * 1307 * @returns IA32_VMX_BASIC value. 1308 * @param pVCpu The cross context per CPU structure. 1309 */ 1310 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxBasic(PVMCPU pVCpu) 1311 { 1294 1312 PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures; 1313 uint64_t uVmxMsr; 1295 1314 if (pGuestFeatures->fVmx) 1296 1315 { 1297 *puValue= RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID, VMX_V_VMCS_REVISION_ID )1298 1299 1300 1301 1302 1303 1316 uVmxMsr = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID, VMX_V_VMCS_REVISION_ID ) 1317 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE, VMX_V_VMCS_SIZE ) 1318 | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH, VMX_V_VMCS_PHYSADDR_4G_LIMIT ) 1319 | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON, 0 ) 1320 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE, VMX_BASIC_MEM_TYPE_WB ) 1321 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS, pGuestFeatures->fVmxInsOutInfo) 1322 | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS, 0 ); 1304 1323 } 1305 1324 else 1306 *puValue = 0; 1325 uVmxMsr = 0; 1326 return uVmxMsr; 1327 } 1328 1329 1330 /** @callback_method_impl{FNCPUMRDMSR} */ 1331 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32VmxBasic(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 1332 { 1333 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 1334 *puValue = CPUMGetGuestIa32VmxBasic(pVCpu); 1307 1335 return VINF_SUCCESS; 1308 1336 } … … 5101 5129 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Gim(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 5102 5130 { 5103 #if def VBOX_WITH_NESTED_HWVIRT_SVM5131 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 5104 5132 /* Raise #GP(0) like a physical CPU would since the nested-hypervisor hasn't intercept these MSRs. */ 5105 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;5106 if (CPUMIsGuestInNestedHwVirtMode(pCtx))5133 if ( CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest) 5134 || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)) 5107 5135 return VERR_CPUM_RAISE_GP_0; 5108 5136 #endif … … 5114 5142 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_Gim(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) 5115 5143 { 5116 #if def VBOX_WITH_NESTED_HWVIRT_SVM5144 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 5117 5145 /* Raise #GP(0) like a physical CPU would since the nested-hypervisor hasn't intercept these MSRs. */ 5118 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;5119 if (CPUMIsGuestInNestedHwVirtMode(pCtx))5146 if ( CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest) 5147 || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)) 5120 5148 return VERR_CPUM_RAISE_GP_0; 5121 5149 #endif -
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r73395 r73606 439 439 440 440 /** 441 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,442 * incorrect code bytes may be fetched after a world-switch".443 *444 * @param pu32Family Where to store the CPU family (can be NULL).445 * @param pu32Model Where to store the CPU model (can be NULL).446 * @param pu32Stepping Where to store the CPU stepping (can be NULL).447 * @returns true if the erratum applies, false otherwise.448 */449 VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)450 {451 /*452 * Erratum 170 which requires a forced TLB flush for each world switch:453 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".454 *455 * All BH-G1/2 and DH-G1/2 models include a fix:456 * Athlon X2: 0x6b 1/2457 * 0x68 1/2458 * Athlon 64: 0x7f 1459 * 0x6f 2460 * Sempron: 0x7f 1/2461 * 0x6f 2462 * 0x6c 2463 * 0x7c 2464 * Turion 64: 0x68 2465 */466 uint32_t u32Dummy;467 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;468 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);469 u32BaseFamily = (u32Version >> 8) & 0xf;470 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);471 u32Model = ((u32Version >> 4) & 0xf);472 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);473 u32Stepping = u32Version & 0xf;474 475 bool fErratumApplies = false;476 if ( u32Family == 0xf477 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)478 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))479 {480 fErratumApplies = true;481 }482 483 if (pu32Family)484 *pu32Family = u32Family;485 if (pu32Model)486 *pu32Model = u32Model;487 if (pu32Stepping)488 *pu32Stepping = u32Stepping;489 490 return fErratumApplies;491 }492 493 494 /**495 441 * Sets or clears the single instruction flag. 496 442 * … … 541 487 else 542 488 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS); 543 }544 545 546 /**547 * VMX nested-guest VM-exit handler.548 *549 * @param pVCpu The cross context virtual CPU structure.550 * @param uBasicExitReason The basic exit reason.551 */552 VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)553 {554 RT_NOREF2(pVCpu, uBasicExitReason);555 }556 557 558 /**559 * Gets a copy of the VMX host MSRs that were read by HM during ring-0560 * initialization.561 *562 * @return VBox status code.563 * @param pVM The cross context VM structure.564 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when565 * VINF_SUCCESS is returned).566 *567 * @remarks Caller needs to take care not to call this function too early. Call568 * after HM initialization is fully complete.569 */570 VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)571 {572 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);573 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);574 if (pVM->hm.s.vmx.fSupported)575 {576 *pVmxMsrs = pVM->hm.s.vmx.Msrs;577 return VINF_SUCCESS;578 }579 return VERR_VMX_NOT_SUPPORTED;580 }581 582 583 /**584 * Gets the specified VMX host MSR that was read by HM during ring-0585 * initialization.586 *587 * @return VBox status code.588 * @param pVM The cross context VM structure.589 * @param idMsr The MSR.590 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS591 * is returned).592 *593 * @remarks Caller needs to take care not to call this function too early. Call594 * after HM initialization is fully complete.595 */596 VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)597 {598 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);599 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);600 601 if (!pVM->hm.s.vmx.fSupported)602 return VERR_VMX_NOT_SUPPORTED;603 604 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;605 switch (idMsr)606 {607 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;608 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;609 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;610 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;611 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;612 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;613 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;614 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;615 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;616 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;617 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;618 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;619 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;620 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;621 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;622 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;623 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;624 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;625 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;626 default:627 {628 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));629 return VERR_NOT_FOUND;630 }631 }632 return VINF_SUCCESS;633 489 } 634 490 -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r73287 r73606 25 25 #include <VBox/vmm/apic.h> 26 26 #include <VBox/vmm/gim.h> 27 #include <VBox/vmm/hm.h>28 27 #include <VBox/vmm/iem.h> 29 28 #include <VBox/vmm/vm.h> 30 #include <VBox/vmm/hm_svm.h>31 29 32 30 … … 243 241 } 244 242 243 244 /** 245 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode, 246 * incorrect code bytes may be fetched after a world-switch". 247 * 248 * @param pu32Family Where to store the CPU family (can be NULL). 249 * @param pu32Model Where to store the CPU model (can be NULL). 250 * @param pu32Stepping Where to store the CPU stepping (can be NULL). 251 * @returns true if the erratum applies, false otherwise. 252 */ 253 VMM_INT_DECL(int) HMSvmIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping) 254 { 255 /* 256 * Erratum 170 which requires a forced TLB flush for each world switch: 257 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors". 258 * 259 * All BH-G1/2 and DH-G1/2 models include a fix: 260 * Athlon X2: 0x6b 1/2 261 * 0x68 1/2 262 * Athlon 64: 0x7f 1 263 * 0x6f 2 264 * Sempron: 0x7f 1/2 265 * 0x6f 2 266 * 0x6c 2 267 * 0x7c 2 268 * Turion 64: 0x68 2 269 */ 270 uint32_t u32Dummy; 271 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily; 272 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy); 273 u32BaseFamily = (u32Version >> 8) & 0xf; 274 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0); 275 u32Model = ((u32Version >> 4) & 0xf); 276 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4); 277 u32Stepping = u32Version & 0xf; 278 279 bool fErratumApplies = false; 280 if ( u32Family == 0xf 281 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1) 282 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2)) 283 { 284 fErratumApplies = true; 285 } 286 287 if (pu32Family) 288 *pu32Family = u32Family; 289 if (pu32Model) 290 *pu32Model = u32Model; 291 if (pu32Stepping) 292 *pu32Stepping = u32Stepping; 293 294 return fErratumApplies; 295 } 296 245 297 #endif /* !IN_RC */ 298 246 299 247 300 /** -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r73555 r73606 388 388 * Check the common VMX instruction preconditions. 389 389 */ 390 #define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_ Instr) \390 #define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) \ 391 391 do { \ 392 { \393 392 if (!IEM_IS_VMX_ENABLED(a_pVCpu)) \ 394 393 { \ 395 Log((RT_STR(a_Instr) ": CR4.VMXE not enabled -> #UD\n")); \ 394 Log((a_szInstr ": CR4.VMXE not enabled -> #UD\n")); \ 395 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_Vmxe; \ 396 396 return iemRaiseUndefinedOpcode(a_pVCpu); \ 397 397 } \ 398 398 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \ 399 399 { \ 400 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \ 400 Log((a_szInstr ": Real or v8086 mode -> #UD\n")); \ 401 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_RealOrV86Mode; \ 401 402 return iemRaiseUndefinedOpcode(a_pVCpu); \ 402 403 } \ 403 404 if (IEM_IS_LONG_MODE(a_pVCpu) && !IEM_IS_64BIT_CODE(a_pVCpu)) \ 404 405 { \ 405 Log((RT_STR(a_Instr) ": Long mode without 64-bit code segment -> #UD\n")); \ 406 Log((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \ 407 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_LongModeCS; \ 406 408 return iemRaiseUndefinedOpcode(a_pVCpu); \ 407 409 } \ 408 } while (0)410 } while (0) 409 411 410 412 /** … … 413 415 # define IEM_IS_VMX_ENABLED(a_pVCpu) (CPUMIsGuestVmxEnabled(IEM_GET_CTX(a_pVCpu))) 414 416 417 /** 418 * Check if the guest has entered VMX root operation. 419 */ 420 #define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(pVCpu))) 421 422 /** 423 * Check if the guest has entered VMX non-root operation. 424 */ 425 #define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu))) 426 415 427 #else 416 # define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_ Instr)do { } while (0)428 # define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) do { } while (0) 417 429 # define IEM_IS_VMX_ENABLED(a_pVCpu) (false) 430 # define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (false) 431 # define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (false) 418 432 419 433 #endif … … 938 952 #endif 939 953 954 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 955 IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, PCVMXEXITINSTRINFO pExitInstrInfo, 956 RTGCPTR GCPtrDisp); 957 #endif 958 940 959 /** 941 960 * Sets the pass up status. … … 1037 1056 pVCpu->iem.s.uRexReg = 127; 1038 1057 pVCpu->iem.s.uRexB = 127; 1058 pVCpu->iem.s.offModRm = 127; 1039 1059 pVCpu->iem.s.uRexIndex = 127; 1040 1060 pVCpu->iem.s.iEffSeg = 127; … … 1196 1216 pVCpu->iem.s.cbOpcode = 0; 1197 1217 #endif 1218 pVCpu->iem.s.offModRm = 0; 1198 1219 pVCpu->iem.s.cActiveMappings = 0; 1199 1220 pVCpu->iem.s.iNextMapping = 0; … … 1306 1327 pVCpu->iem.s.offOpcode = 0; 1307 1328 #endif 1329 pVCpu->iem.s.offModRm = 0; 1308 1330 Assert(pVCpu->iem.s.cActiveMappings == 0); 1309 1331 pVCpu->iem.s.iNextMapping = 0; … … 2434 2456 # ifdef IEM_WITH_CODE_TLB 2435 2457 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 2436 pVCpu->iem.s.offModRm = offOpcode;2458 pVCpu->iem.s.offModRm = offBuf; 2437 2459 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; 2438 2460 if (RT_LIKELY( pbBuf != NULL … … 2443 2465 } 2444 2466 # else 2445 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;2467 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 2446 2468 pVCpu->iem.s.offModRm = offOpcode; 2447 2469 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) … … 2468 2490 do \ 2469 2491 { \ 2470 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_p u8)); \2492 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \ 2471 2493 if (rcStrict2 == VINF_SUCCESS) \ 2472 2494 { /* likely */ } \ … … 5523 5545 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */ 5524 5546 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n")); 5525 if (!CPUMIsGuestInNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 5547 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)) 5548 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu))) 5526 5549 return VERR_EM_GUEST_CPU_HANG; 5527 5550 } … … 8083 8106 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1))) 8084 8107 return VINF_SUCCESS; 8108 /** @todo We should probably raise #SS(0) here if segment is SS; see AMD spec. 8109 * 4.12.2 "Data Limit Checks in 64-bit Mode". */ 8085 8110 return iemRaiseGeneralProtectionFault0(pVCpu); 8086 8111 } … … 12547 12572 } while (0) 12548 12573 12574 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 12575 /** This instruction raises an \#UD in real and V8086 mode or when not using a 12576 * 64-bit code segment when in long mode (applicable to all VMX instructions 12577 * except VMCALL). */ 12578 # define IEMOP_HLP_VMX_INSTR() \ 12579 do \ 12580 { \ 12581 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \ 12582 && ( !IEM_IS_LONG_MODE(pVCpu) \ 12583 || IEM_IS_64BIT_CODE(pVCpu))) \ 12584 { /* likely */ } \ 12585 else \ 12586 return IEMOP_RAISE_INVALID_OPCODE(); \ 12587 } while (0) 12588 #endif 12589 12549 12590 /** The instruction is not available in 64-bit mode, throw \#UD if we're in 12550 12591 * 64-bit mode. */ … … 15096 15137 15097 15138 /** 15098 * Interface for HM and EM to emulate the INVPCID instruction.15099 *15100 * @param pVCpu The cross context virtual CPU structure.15101 * @param cbInstr The instruction length in bytes.15102 * @param uType The invalidation type.15103 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.15104 *15105 * @remarks In ring-0 not all of the state needs to be synced in.15106 */15107 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)15108 {15109 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);15110 15111 iemInitExec(pVCpu, false /*fBypassHandlers*/);15112 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);15113 Assert(!pVCpu->iem.s.cActiveMappings);15114 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);15115 }15116 15117 15118 15119 /**15120 15139 * Interface for HM and EM to emulate the CPUID instruction. 15121 15140 * … … 15498 15517 15499 15518 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */ 15519 15520 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 15521 15522 /** 15523 * Interface for HM and EM to emulate the VMXOFF instruction. 15524 * 15525 * @returns Strict VBox status code. 15526 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15527 * @param cbInstr The instruction length in bytes. 15528 * @thread EMT(pVCpu) 15529 */ 15530 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr) 15531 { 15532 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 15533 15534 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15535 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff); 15536 Assert(!pVCpu->iem.s.cActiveMappings); 15537 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 15538 } 15539 15540 15541 /** 15542 * Interface for HM and EM to emulate the VMXON instruction. 15543 * 15544 * @returns Strict VBox status code. 15545 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15546 * @param cbInstr The instruction length in bytes. 15547 * @param GCPtrVmxon The linear address of the VMXON pointer. 15548 * @param uExitInstrInfo The VM-exit instruction information field. 15549 * @param GCPtrDisp The displacement field for @a GCPtrVmxon if any. 15550 * @thread EMT(pVCpu) 15551 */ 15552 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, uint32_t uExitInstrInfo, 15553 RTGCPTR GCPtrDisp) 15554 { 15555 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 15556 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 15557 15558 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15559 PCVMXEXITINSTRINFO pExitInstrInfo = (PCVMXEXITINSTRINFO)&uExitInstrInfo; 15560 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, pExitInstrInfo, GCPtrDisp); 15561 if (pVCpu->iem.s.cActiveMappings) 15562 iemMemRollback(pVCpu); 15563 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 15564 } 15565 15566 #endif 15567 15500 15568 #ifdef IN_RING3 15501 15569 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r73435 r73606 5335 5335 } 5336 5336 5337 /* Check for bits that must remain set in VMX operation. */ 5338 if (IEM_IS_VMX_ROOT_MODE(pVCpu)) 5339 { 5340 uint32_t const uCr0Fixed0 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest ? 5341 VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0; 5342 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0) 5343 { 5344 Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0)); 5345 return iemRaiseGeneralProtectionFault0(pVCpu); 5346 } 5347 } 5348 5337 5349 /** @todo check reserved PDPTR bits as AMD states. */ 5338 5350 … … 5548 5560 IEM_SVM_UPDATE_NRIP(pVCpu); 5549 5561 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg); 5562 } 5563 5564 /* Check for bits that must remain set in VMX operation. */ 5565 if (IEM_IS_VMX_ROOT_MODE(pVCpu)) 5566 { 5567 uint32_t const uCr4Fixed0 = VMX_V_CR4_FIXED0; 5568 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0) 5569 { 5570 Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0)); 5571 return iemRaiseGeneralProtectionFault0(pVCpu); 5572 } 5550 5573 } 5551 5574 … … 5935 5958 * Implements INVPCID. 5936 5959 * 5960 * @param iEffSeg The segment of the invpcid descriptor. 5961 * @param GCPtrInvpcidDesc The address of invpcid descriptor. 5937 5962 * @param uInvpcidType The invalidation type. 5938 * @param GCPtrInvpcidDesc The effective address of invpcid descriptor.5939 5963 * @remarks Updates the RIP. 5940 5964 */ 5941 IEM_CIMPL_DEF_ 2(iemCImpl_invpcid, uint64_t, uInvpcidType, RTGCPTR, GCPtrInvpcidDesc)5965 IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint8_t, uInvpcidType) 5942 5966 { 5943 5967 /* … … 5967 5991 */ 5968 5992 RTUINT128U uDesc; 5969 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, pVCpu->iem.s.iEffSeg, GCPtrInvpcidDesc);5993 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc); 5970 5994 if (rcStrict == VINF_SUCCESS) 5971 5995 { -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r72469 r73606 22 22 IEM_CIMPL_DEF_0(iemCImpl_vmcall) 23 23 { 24 /** @todo intercept. */24 /** @todo NSTVMX: intercept. */ 25 25 26 26 /* Join forces with vmmcall. */ … … 28 28 } 29 29 30 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 31 32 /** 33 * Implements VMSucceed for VMX instruction success. 34 * 35 * @param pVCpu The cross context virtual CPU structure. 36 */ 37 DECLINLINE(void) iemVmxVmSucceed(PVMCPU pVCpu) 38 { 39 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF); 40 } 41 42 43 /** 44 * Implements VMFailInvalid for VMX instruction failure. 45 * 46 * @param pVCpu The cross context virtual CPU structure. 47 */ 48 DECLINLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu) 49 { 50 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF); 51 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF; 52 } 53 54 55 /** 56 * Implements VMFailValid for VMX instruction failure. 57 * 58 * @param pVCpu The cross context virtual CPU structure. 59 * @param enmInsErr The VM instruction error. 60 */ 61 DECLINLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr) 62 { 63 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)) 64 { 65 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF); 66 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF; 67 /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */ 68 RT_NOREF(enmInsErr); 69 } 70 } 71 72 73 /** 74 * Implements VMFail for VMX instruction failure. 75 * 76 * @param pVCpu The cross context virtual CPU structure. 77 * @param enmInsErr The VM instruction error. 78 */ 79 DECLINLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr) 80 { 81 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)) 82 { 83 iemVmxVmFailValid(pVCpu, enmInsErr); 84 /** @todo Set VM-instruction error field in the current virtual-VMCS. */ 85 } 86 else 87 iemVmxVmFailInvalid(pVCpu); 88 } 89 90 91 /** 92 * VMXON instruction execution worker. 93 * 94 * @param pVCpu The cross context virtual CPU structure. 95 * @param cbInstr The instruction length. 96 * @param GCPtrVmxon The linear address of the VMXON pointer. 97 * @param ExitInstrInfo The VM-exit instruction information field. 98 * @param GCPtrDisp The displacement field for @a GCPtrVmxon if any. 99 * 100 * @remarks Common VMX instruction checks are already expected to by the caller, 101 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks. 102 */ 103 IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, PCVMXEXITINSTRINFO pExitInstrInfo, 104 RTGCPTR GCPtrDisp) 105 { 106 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 107 RT_NOREF5(pVCpu, cbInstr, GCPtrVmxon, pExitInstrInfo, GCPtrDisp); 108 return VINF_EM_RAW_EMULATE_INSTR; 109 #else 110 if (!IEM_IS_VMX_ROOT_MODE(pVCpu)) 111 { 112 /* CPL. */ 113 if (pVCpu->iem.s.uCpl > 0) 114 { 115 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 116 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cpl; 117 return iemRaiseGeneralProtectionFault0(pVCpu); 118 } 119 120 /* A20M (A20 Masked) mode. */ 121 if (!PGMPhysIsA20Enabled(pVCpu)) 122 { 123 Log(("vmxon: A20M mode -> #GP(0)\n")); 124 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_A20M; 125 return iemRaiseGeneralProtectionFault0(pVCpu); 126 } 127 128 /* CR0 fixed bits. */ 129 bool const fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest; 130 uint64_t const uCr0Fixed0 = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0; 131 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0) 132 { 133 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n")); 134 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr0Fixed0; 135 return iemRaiseGeneralProtectionFault0(pVCpu); 136 } 137 138 /* CR4 fixed bits. */ 139 if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0) 140 { 141 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n")); 142 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr4Fixed0; 143 return iemRaiseGeneralProtectionFault0(pVCpu); 144 } 145 146 /* Feature control MSR's LOCK and VMXON bits. */ 147 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu); 148 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))) 149 { 150 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n")); 151 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_MsrFeatCtl; 152 return iemRaiseGeneralProtectionFault0(pVCpu); 153 } 154 155 /* Get the VMXON pointer from the location specified by the source memory operand. */ 156 RTGCPHYS GCPhysVmxon; 157 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, pExitInstrInfo->InvVmxXsaves.iSegReg, GCPtrVmxon); 158 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 159 { 160 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict))); 161 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrMap; 162 return rcStrict; 163 } 164 165 /* VMXON region pointer alignment. */ 166 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK) 167 { 168 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n")); 169 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAlign; 170 iemVmxVmFailInvalid(pVCpu); 171 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 172 return VINF_SUCCESS; 173 } 174 175 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a 176 restriction imposed by our implementation. */ 177 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon)) 178 { 179 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n")); 180 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAbnormal; 181 iemVmxVmFailInvalid(pVCpu); 182 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 183 return VINF_SUCCESS; 184 } 185 186 /* Read the VMCS revision ID from the VMXON region. */ 187 VMXVMCSREVID VmcsRevId; 188 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId)); 189 if (RT_FAILURE(rc)) 190 { 191 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc)); 192 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrReadPhys; 193 return rc; 194 } 195 196 /* Physical-address width. */ 197 uint64_t const uMsrBasic = CPUMGetGuestIa32VmxBasic(pVCpu); 198 if ( RT_BF_GET(uMsrBasic, VMX_BF_BASIC_PHYSADDR_WIDTH) 199 && RT_HI_U32(GCPhysVmxon)) 200 { 201 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n")); 202 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrWidth; 203 iemVmxVmFailInvalid(pVCpu); 204 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 205 return VINF_SUCCESS; 206 } 207 208 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */ 209 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID)) 210 { 211 /* Revision ID mismatch. */ 212 if (!VmcsRevId.n.fIsShadowVmcs) 213 { 214 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID, 215 VmcsRevId.n.u31RevisionId)); 216 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmcsRevId; 217 iemVmxVmFailInvalid(pVCpu); 218 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 219 return VINF_SUCCESS; 220 } 221 222 /* Shadow VMCS disallowed. */ 223 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n")); 224 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_ShadowVmcs; 225 iemVmxVmFailInvalid(pVCpu); 226 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 227 return VINF_SUCCESS; 228 } 229 230 /* 231 * Record that we're in VMX operation, block INIT, block and disable A20M. 232 */ 233 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon; 234 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true; 235 /** @todo NSTVMX: init. current VMCS pointer with ~0. */ 236 /** @todo NSTVMX: clear address-range monitoring. */ 237 /** @todo NSTVMX: Intel PT. */ 238 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Success; 239 iemVmxVmSucceed(pVCpu); 240 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 241 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 242 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); 243 # else 244 return VINF_SUCCESS; 245 # endif 246 } 247 else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu)) 248 { 249 RT_NOREF(GCPtrDisp); 250 /** @todo NSTVMX: intercept. */ 251 } 252 253 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu)); 254 255 /* CPL. */ 256 if (pVCpu->iem.s.uCpl > 0) 257 { 258 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 259 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRootCpl; 260 return iemRaiseGeneralProtectionFault0(pVCpu); 261 } 262 263 /* VMXON when already in VMX root mode. */ 264 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE); 265 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRoot; 266 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 267 return VINF_SUCCESS; 268 #endif 269 } 270 271 272 /** 273 * Implements 'VMXON'. 274 */ 275 IEM_CIMPL_DEF_1(iemCImpl_vmxon, RTGCPTR, GCPtrVmxon) 276 { 277 /** @todo NSTVMX: Parse ModR/M, SIB, disp. */ 278 RTGCPTR GCPtrDisp = 0; 279 VMXEXITINSTRINFO ExitInstrInfo; 280 ExitInstrInfo.u = 0; 281 ExitInstrInfo.InvVmxXsaves.u2Scaling = 0; 282 ExitInstrInfo.InvVmxXsaves.u3AddrSize = pVCpu->iem.s.enmEffAddrMode; 283 ExitInstrInfo.InvVmxXsaves.fIsRegOperand = 0; 284 ExitInstrInfo.InvVmxXsaves.iSegReg = pVCpu->iem.s.iEffSeg; 285 ExitInstrInfo.InvVmxXsaves.iIdxReg = 0; 286 ExitInstrInfo.InvVmxXsaves.fIdxRegInvalid = 0; 287 ExitInstrInfo.InvVmxXsaves.iBaseReg = 0; 288 ExitInstrInfo.InvVmxXsaves.fBaseRegInvalid = 0; 289 ExitInstrInfo.InvVmxXsaves.iReg2 = 0; 290 return iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, &ExitInstrInfo, GCPtrDisp); 291 } 292 293 294 /** 295 * Implements 'VMXOFF'. 296 */ 297 IEM_CIMPL_DEF_0(iemCImpl_vmxoff) 298 { 299 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 300 RT_NOREF2(pVCpu, cbInstr); 301 return VINF_EM_RAW_EMULATE_INSTR; 302 # else 303 IEM_VMX_INSTR_COMMON_CHECKS(pVCpu, "vmxoff", kVmxVInstrDiag_Vmxoff); 304 if (!IEM_IS_VMX_ROOT_MODE(pVCpu)) 305 { 306 Log(("vmxoff: Not in VMX root mode -> #GP(0)\n")); 307 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_VmxRoot; 308 return iemRaiseUndefinedOpcode(pVCpu); 309 } 310 311 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu)) 312 { 313 /** @todo NSTVMX: intercept. */ 314 } 315 316 /* CPL. */ 317 if (pVCpu->iem.s.uCpl > 0) 318 { 319 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 320 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Cpl; 321 return iemRaiseGeneralProtectionFault0(pVCpu); 322 } 323 324 /* Dual monitor treatment of SMIs and SMM. */ 325 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu); 326 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID) 327 { 328 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON); 329 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 330 return VINF_SUCCESS; 331 } 332 333 /* 334 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M. 335 */ 336 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false; 337 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode); 338 339 /** @todo NSTVMX: Unblock INIT. */ 340 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI) 341 { /** @todo NSTVMX: Unblock SMI. */ } 342 /** @todo NSTVMX: Unblock and enable A20M. */ 343 /** @todo NSTVMX: Clear address-range monitoring. */ 344 345 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Success; 346 iemVmxVmSucceed(pVCpu); 347 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 348 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 349 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); 350 # else 351 return VINF_SUCCESS; 352 # endif 353 # endif 354 } 355 356 #endif 357 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h
r70612 r73606 317 317 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT) 318 318 { 319 IEM_MC_BEGIN( 2, 0);320 IEM_MC_ARG(uint 64_t, uInvpcidType,0);319 IEM_MC_BEGIN(3, 0); 320 IEM_MC_ARG(uint8_t, iEffSeg, 0); 321 321 IEM_MC_ARG(RTGCPTR, GCPtrInvpcidDesc, 1); 322 IEM_MC_ARG(uint64_t, uInvpcidType, 2); 322 323 IEM_MC_FETCH_GREG_U64(uInvpcidType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 323 324 IEM_MC_CALC_RM_EFF_ADDR(GCPtrInvpcidDesc, bRm, 0); 324 IEM_MC_CALL_CIMPL_2(iemCImpl_invpcid, uInvpcidType, GCPtrInvpcidDesc); 325 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 326 IEM_MC_CALL_CIMPL_3(iemCImpl_invpcid, iEffSeg, GCPtrInvpcidDesc, uInvpcidType); 325 327 IEM_MC_END(); 326 328 } 327 329 else 328 330 { 329 IEM_MC_BEGIN( 2, 0);330 IEM_MC_ARG(uint 32_t, uInvpcidType,0);331 IEM_MC_BEGIN(3, 0); 332 IEM_MC_ARG(uint8_t, iEffSeg, 0); 331 333 IEM_MC_ARG(RTGCPTR, GCPtrInvpcidDesc, 1); 334 IEM_MC_ARG(uint32_t, uInvpcidType, 2); 332 335 IEM_MC_FETCH_GREG_U32(uInvpcidType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 333 336 IEM_MC_CALC_RM_EFF_ADDR(GCPtrInvpcidDesc, bRm, 0); 334 IEM_MC_CALL_CIMPL_2(iemCImpl_invpcid, uInvpcidType, GCPtrInvpcidDesc); 337 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 338 IEM_MC_CALL_CIMPL_3(iemCImpl_invpcid, iEffSeg, GCPtrInvpcidDesc, uInvpcidType); 335 339 IEM_MC_END(); 336 340 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r72522 r73606 264 264 265 265 /** Opcode 0x0f 0x01 /0. */ 266 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 267 FNIEMOP_DEF(iemOp_Grp7_vmxoff) 268 { 269 IEMOP_MNEMONIC(vmxoff, "vmxoff"); 270 IEMOP_HLP_DONE_DECODING(); 271 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmxoff); 272 } 273 #else 266 274 FNIEMOP_DEF(iemOp_Grp7_vmxoff) 267 275 { … … 269 277 return IEMOP_RAISE_INVALID_OPCODE(); 270 278 } 279 #endif 271 280 272 281 … … 8418 8427 8419 8428 /** Opcode 0xf3 0x0f 0xc7 !11/6. */ 8429 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 8430 FNIEMOP_DEF_1(iemOp_Grp9_vmxon_Mq, uint8_t, bRm) 8431 { 8432 IEMOP_MNEMONIC(vmxon, "vmxon"); 8433 IEMOP_HLP_VMX_INSTR(); 8434 IEM_MC_BEGIN(1, 0); 8435 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 0); 8436 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 8437 IEMOP_HLP_DONE_DECODING(); 8438 IEM_MC_CALL_CIMPL_1(iemCImpl_vmxon, GCPtrEffSrc); 8439 IEM_MC_END(); 8440 return VINF_SUCCESS; 8441 } 8442 #else 8420 8443 FNIEMOP_UD_STUB_1(iemOp_Grp9_vmxon_Mq, uint8_t, bRm); 8444 #endif 8421 8445 8422 8446 /** Opcode [0xf3] 0x0f 0xc7 !11/7. */ … … 8464 8488 FNIEMOP_DEF(iemOp_Grp9) 8465 8489 { 8466 uint8_t bRm; IEM_OPCODE_GET_NEXT_ U8(&bRm);8490 uint8_t bRm; IEM_OPCODE_GET_NEXT_RM(&bRm); 8467 8491 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 8468 8492 /* register, register */ -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r73293 r73606 30 30 #include <VBox/vmm/iom.h> 31 31 #include <VBox/vmm/tm.h> 32 #include <VBox/vmm/em.h> 32 33 #include <VBox/vmm/gim.h> 33 34 #include <VBox/vmm/apic.h> … … 711 712 uint32_t u32Model; 712 713 uint32_t u32Stepping; 713 if (HM AmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))714 if (HMSvmIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping)) 714 715 { 715 716 Log4Func(("AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping)); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r72967 r73606 21 21 #include <VBox/cdefs.h> 22 22 #include <VBox/types.h> 23 #include <VBox/vmm/em.h>24 #include <VBox/vmm/stam.h>25 #include <VBox/dis.h>26 23 #include <VBox/vmm/hm.h> 27 #include <VBox/vmm/pgm.h>28 24 #include <VBox/vmm/hm_svm.h> 29 25 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r73437 r73606 32 32 #include <VBox/vmm/selm.h> 33 33 #include <VBox/vmm/tm.h> 34 #include <VBox/vmm/em.h> 34 35 #include <VBox/vmm/gim.h> 35 36 #include <VBox/vmm/apic.h> … … 196 197 { 197 198 /** The host's rflags/eflags. */ 198 RTCCUINTREG fEFlags;199 RTCCUINTREG fEFlags; 199 200 #if HC_ARCH_BITS == 32 200 uint32_t u32Alignment0;201 uint32_t u32Alignment0; 201 202 #endif 202 203 /** The guest's TPR value used for TPR shadowing. */ 203 uint8_t u8GuestTpr;204 uint8_t u8GuestTpr; 204 205 /** Alignment. */ 205 uint8_t abAlignment0[7];206 uint8_t abAlignment0[7]; 206 207 207 208 /** The basic VM-exit reason. */ 208 uint16_t uExitReason;209 uint16_t uExitReason; 209 210 /** Alignment. */ 210 uint16_t u16Alignment0;211 uint16_t u16Alignment0; 211 212 /** The VM-exit interruption error code. */ 212 uint32_t uExitIntErrorCode;213 uint32_t uExitIntErrorCode; 213 214 /** The VM-exit exit code qualification. */ 214 uint64_t uExitQualification;215 uint64_t uExitQual; 215 216 216 217 /** The VM-exit interruption-information field. */ 217 uint32_t uExitIntInfo;218 uint32_t uExitIntInfo; 218 219 /** The VM-exit instruction-length field. */ 219 uint32_t cbInstr;220 uint32_t cbInstr; 220 221 /** The VM-exit instruction-information field. */ 221 union 222 { 223 /** Plain unsigned int representation. */ 224 uint32_t u; 225 /** INS and OUTS information. */ 226 struct 227 { 228 uint32_t u7Reserved0 : 7; 229 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */ 230 uint32_t u3AddrSize : 3; 231 uint32_t u5Reserved1 : 5; 232 /** The segment register (X86_SREG_XXX). */ 233 uint32_t iSegReg : 3; 234 uint32_t uReserved2 : 14; 235 } StrIo; 236 /** INVEPT, INVVPID, INVPCID information. */ 237 struct 238 { 239 /** Scaling; 0=no scaling, 1=scale-by-2, 2=scale-by-4, 3=scale-by-8. */ 240 uint32_t u2Scaling : 2; 241 uint32_t u5Reserved0 : 5; 242 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */ 243 uint32_t u3AddrSize : 3; 244 uint32_t u1Reserved0 : 1; 245 uint32_t u4Reserved0 : 4; 246 /** The segment register (X86_SREG_XXX). */ 247 uint32_t iSegReg : 3; 248 /** The index register (X86_GREG_XXX). */ 249 uint32_t iIdxReg : 4; 250 /** Set if index register is invalid. */ 251 uint32_t fIdxRegValid : 1; 252 /** The base register (X86_GREG_XXX). */ 253 uint32_t iBaseReg : 4; 254 /** Set if base register is invalid. */ 255 uint32_t fBaseRegValid : 1; 256 /** Register 2 (X86_GREG_XXX). */ 257 uint32_t iReg2 : 4; 258 } Inv; 259 } ExitInstrInfo; 222 VMXEXITINSTRINFO ExitInstrInfo; 260 223 /** Whether the VM-entry failed or not. */ 261 bool fVMEntryFailed;224 bool fVMEntryFailed; 262 225 /** Alignment. */ 263 uint8_t abAlignment1[3];226 uint8_t abAlignment1[3]; 264 227 265 228 /** The VM-entry interruption-information field. */ 266 uint32_t uEntryIntInfo;229 uint32_t uEntryIntInfo; 267 230 /** The VM-entry exception error code field. */ 268 uint32_t uEntryXcptErrorCode;231 uint32_t uEntryXcptErrorCode; 269 232 /** The VM-entry instruction length field. */ 270 uint32_t cbEntryInstr;233 uint32_t cbEntryInstr; 271 234 272 235 /** IDT-vectoring information field. */ 273 uint32_t uIdtVectoringInfo;236 uint32_t uIdtVectoringInfo; 274 237 /** IDT-vectoring error code. */ 275 uint32_t uIdtVectoringErrorCode;238 uint32_t uIdtVectoringErrorCode; 276 239 277 240 /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */ 278 uint32_t fVmcsFieldsRead;241 uint32_t fVmcsFieldsRead; 279 242 280 243 /** Whether the guest debug state was active at the time of VM-exit. */ 281 bool fWasGuestDebugStateActive;244 bool fWasGuestDebugStateActive; 282 245 /** Whether the hyper debug state was active at the time of VM-exit. */ 283 bool fWasHyperDebugStateActive;246 bool fWasHyperDebugStateActive; 284 247 /** Whether TSC-offsetting should be setup before VM-entry. */ 285 bool fUpdateTscOffsettingAndPreemptTimer;248 bool fUpdateTscOffsettingAndPreemptTimer; 286 249 /** Whether the VM-exit was caused by a page-fault during delivery of a 287 250 * contributory exception or a page-fault. */ 288 bool fVectoringDoublePF;251 bool fVectoringDoublePF; 289 252 /** Whether the VM-exit was caused by a page-fault during delivery of an 290 253 * external interrupt or NMI. */ 291 bool fVectoringPF;254 bool fVectoringPF; 292 255 } VMXTRANSIENT; 293 256 AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t)); … … 404 367 static FNVMXEXITHANDLER hmR0VmxExitRdpmc; 405 368 static FNVMXEXITHANDLER hmR0VmxExitVmcall; 369 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 370 static FNVMXEXITHANDLER hmR0VmxExitVmclear; 371 static FNVMXEXITHANDLER hmR0VmxExitVmlaunch; 372 static FNVMXEXITHANDLER hmR0VmxExitVmptrld; 373 static FNVMXEXITHANDLER hmR0VmxExitVmptrst; 374 static FNVMXEXITHANDLER hmR0VmxExitVmread; 375 static FNVMXEXITHANDLER hmR0VmxExitVmresume; 376 static FNVMXEXITHANDLER hmR0VmxExitVmwrite; 377 static FNVMXEXITHANDLER hmR0VmxExitVmxoff; 378 static FNVMXEXITHANDLER hmR0VmxExitVmxon; 379 #endif 406 380 static FNVMXEXITHANDLER hmR0VmxExitRdtsc; 407 381 static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm; … … 473 447 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm, 474 448 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall, 449 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 450 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitVmclear, 451 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitVmlaunch, 452 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitVmptrld, 453 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitVmptrst, 454 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitVmread, 455 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitVmresume, 456 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitVmwrite, 457 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitVmxoff, 458 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitVmxon, 459 #else 475 460 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD, 476 461 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD, … … 482 467 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD, 483 468 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD, 469 #endif 484 470 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx, 485 471 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx, … … 719 705 * @param pVmxTransient Pointer to the VMX transient structure. 720 706 */ 721 DECLINLINE(int) hmR0VmxReadExitQual ificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)707 DECLINLINE(int) hmR0VmxReadExitQualVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 722 708 { 723 709 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION)) 724 710 { 725 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual ification); NOREF(pVCpu);711 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual); NOREF(pVCpu); 726 712 AssertRCReturn(rc, rc); 727 713 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION; … … 4999 4985 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason); 5000 4986 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError); 5001 rc |= hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);4987 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 5002 4988 AssertRC(rc); 5003 4989 … … 5009 4995 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason, 5010 4996 pVmxTransient->uExitReason)); 5011 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual ification));4997 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual)); 5012 4998 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError)); 5013 4999 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX) … … 5788 5774 DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu) 5789 5775 { 5790 uint32_t const u32IntInfo = X86_XCPT_DF | VMX_EXIT_INT_INFO_VALID 5791 | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT) 5792 | VMX_EXIT_INT_INFO_ERROR_CODE_VALID; 5776 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF) 5777 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT) 5778 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1) 5779 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5793 5780 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); 5794 5781 } … … 5802 5789 DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu) 5803 5790 { 5804 uint32_t const u32IntInfo = X86_XCPT_UD | VMX_EXIT_INT_INFO_VALID 5805 | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT); 5791 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD) 5792 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT) 5793 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0) 5794 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5806 5795 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); 5807 5796 } … … 5815 5804 DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu) 5816 5805 { 5817 uint32_t const u32IntInfo = X86_XCPT_DB | VMX_EXIT_INT_INFO_VALID 5818 | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT); 5806 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB) 5807 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT) 5808 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0) 5809 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5819 5810 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); 5820 5811 } … … 5830 5821 DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, uint32_t cbInstr) 5831 5822 { 5832 uint32_t const u32IntInfo = X86_XCPT_OF | VMX_EXIT_INT_INFO_VALID 5833 | (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT); 5823 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_OF) 5824 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_SW_INT) 5825 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0) 5826 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5834 5827 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); 5835 5828 } 5829 5830 5831 /** 5832 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM. 5833 * 5834 * @param pVCpu The cross context virtual CPU structure. 5835 * @param u32ErrCode The error code for the general-protection exception. 5836 */ 5837 DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, uint32_t u32ErrCode) 5838 { 5839 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP) 5840 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT) 5841 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1) 5842 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5843 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */); 5844 } 5845 5846 5847 /** 5848 * Sets a stack (\#SS) exception as pending-for-injection into the VM. 5849 * 5850 * @param pVCpu The cross context virtual CPU structure. 5851 * @param u32ErrCode The error code for the stack exception. 5852 */ 5853 DECLINLINE(void) hmR0VmxSetPendingXcptSS(PVMCPU pVCpu, uint32_t u32ErrCode) 5854 { 5855 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS) 5856 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT) 5857 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1) 5858 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 5859 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */); 5860 } 5861 5862 5863 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 5864 5865 /** 5866 * Decodes the memory operand of a VM-exit due to instruction execution. 5867 * 5868 * For instructions with two operands, the second operand is usually found in the 5869 * VM-exit qualification field. 5870 * 5871 * @returns Strict VBox status code (i.e. informational status codes too). 5872 * @retval VINF_SUCCESS if the operand was successfully decoded. 5873 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the 5874 * operand. 5875 * @param pVCpu The cross context virtual CPU structure. 5876 * @param pExitInstrInfo Pointer to the VM-exit instruction information. 5877 * @param fIsWrite Whether the operand is a destination memory operand 5878 * (i.e. writeable memory location) or not. 5879 * @param GCPtrDisp The instruction displacement field, if any. For 5880 * RIP-relative addressing pass RIP + displacement here. 5881 * @param pGCPtrMem Where to store the destination memory operand. 5882 */ 5883 static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, PCVMXEXITINSTRINFO pExitInstrInfo, RTGCPTR GCPtrDisp, bool fIsWrite, 5884 PRTGCPTR pGCPtrMem) 5885 { 5886 Assert(pExitInstrInfo); 5887 Assert(pGCPtrMem); 5888 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu)); 5889 5890 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) }; 5891 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) }; 5892 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks)); 5893 5894 uint8_t const uAddrSize = pExitInstrInfo->InvVmxXsaves.u3AddrSize; 5895 uint8_t const iSegReg = pExitInstrInfo->InvVmxXsaves.iSegReg; 5896 bool const fIdxRegValid = !pExitInstrInfo->InvVmxXsaves.fIdxRegInvalid; 5897 uint8_t const iIdxReg = pExitInstrInfo->InvVmxXsaves.iIdxReg; 5898 uint8_t const uScale = pExitInstrInfo->InvVmxXsaves.u2Scaling; 5899 bool const fBaseRegValid = !pExitInstrInfo->InvVmxXsaves.fBaseRegInvalid; 5900 uint8_t const iBaseReg = pExitInstrInfo->InvVmxXsaves.iBaseReg; 5901 bool const fIsMemOperand = !pExitInstrInfo->InvVmxXsaves.fIsRegOperand; 5902 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx); 5903 5904 /* 5905 * Validate instruction information. 5906 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code. 5907 */ 5908 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks), 5909 ("Invalid address size. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_1); 5910 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT, 5911 ("Invalid segment register. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_2); 5912 AssertLogRelMsgReturn(fIsMemOperand, 5913 ("Expected memory operand. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_3); 5914 5915 /* 5916 * Compute the complete effective address. 5917 * 5918 * See AMD instruction spec. 1.4.2 "SIB Byte Format" 5919 * See AMD spec. 4.5.2 "Segment Registers". 5920 */ 5921 RTGCPTR GCPtrMem = GCPtrDisp; 5922 if (fBaseRegValid) 5923 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64; 5924 if (fIdxRegValid) 5925 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale; 5926 5927 RTGCPTR const GCPtrOff = GCPtrMem; 5928 if ( !fIsLongMode 5929 || iSegReg >= X86_SREG_FS) 5930 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base; 5931 GCPtrMem &= s_auAddrSizeMasks[uAddrSize]; 5932 5933 /* 5934 * Validate effective address. 5935 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode". 5936 */ 5937 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize]; 5938 Assert(cbAccess > 0); 5939 if (fIsLongMode) 5940 { 5941 if (X86_IS_CANONICAL(GCPtrMem)) 5942 { 5943 *pGCPtrMem = GCPtrMem; 5944 return VINF_SUCCESS; 5945 } 5946 5947 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem)); 5948 hmR0VmxSetPendingXcptGP(pVCpu, 0); 5949 return VINF_HM_PENDING_XCPT; 5950 } 5951 5952 /* 5953 * This is a watered down version of iemMemApplySegment(). 5954 * Parts that are not applicable for VMX instructions like real-or-v8086 mode 5955 * and segment CPL/DPL checks are skipped. 5956 */ 5957 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff; 5958 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1; 5959 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg]; 5960 5961 /* Check if the segment is present and usable. */ 5962 if ( pSel->Attr.n.u1Present 5963 && !pSel->Attr.n.u1Unusable) 5964 { 5965 Assert(pSel->Attr.n.u1DescType); 5966 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE)) 5967 { 5968 /* Check permissions for the data segment. */ 5969 if ( fIsWrite 5970 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE)) 5971 { 5972 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u)); 5973 hmR0VmxSetPendingXcptGP(pVCpu, iSegReg); 5974 return VINF_HM_PENDING_XCPT; 5975 } 5976 5977 /* Check limits if it's a normal data segment. */ 5978 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN)) 5979 { 5980 if ( GCPtrFirst32 > pSel->u32Limit 5981 || GCPtrLast32 > pSel->u32Limit) 5982 { 5983 Log4Func(("Data segment limit exceeded." 5984 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32, 5985 GCPtrLast32, pSel->u32Limit)); 5986 if (iSegReg == X86_SREG_SS) 5987 hmR0VmxSetPendingXcptSS(pVCpu, 0); 5988 else 5989 hmR0VmxSetPendingXcptGP(pVCpu, 0); 5990 return VINF_HM_PENDING_XCPT; 5991 } 5992 } 5993 else 5994 { 5995 /* Check limits if it's an expand-down data segment. 5996 Note! The upper boundary is defined by the B bit, not the G bit! */ 5997 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1) 5998 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))) 5999 { 6000 Log4Func(("Expand-down data segment limit exceeded." 6001 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32, 6002 GCPtrLast32, pSel->u32Limit)); 6003 if (iSegReg == X86_SREG_SS) 6004 hmR0VmxSetPendingXcptSS(pVCpu, 0); 6005 else 6006 hmR0VmxSetPendingXcptGP(pVCpu, 0); 6007 return VINF_HM_PENDING_XCPT; 6008 } 6009 } 6010 } 6011 else 6012 { 6013 /* Check permissions for the code segment. */ 6014 if ( fIsWrite 6015 || !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) 6016 { 6017 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u)); 6018 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx)); 6019 hmR0VmxSetPendingXcptGP(pVCpu, 0); 6020 return VINF_HM_PENDING_XCPT; 6021 } 6022 6023 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */ 6024 if ( GCPtrFirst32 > pSel->u32Limit 6025 || GCPtrLast32 > pSel->u32Limit) 6026 { 6027 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", 6028 GCPtrFirst32, GCPtrLast32, pSel->u32Limit)); 6029 if (iSegReg == X86_SREG_SS) 6030 hmR0VmxSetPendingXcptSS(pVCpu, 0); 6031 else 6032 hmR0VmxSetPendingXcptGP(pVCpu, 0); 6033 return VINF_HM_PENDING_XCPT; 6034 } 6035 } 6036 } 6037 else 6038 { 6039 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u)); 6040 hmR0VmxSetPendingXcptGP(pVCpu, 0); 6041 return VINF_HM_PENDING_XCPT; 6042 } 6043 6044 *pGCPtrMem = GCPtrMem; 6045 return VINF_SUCCESS; 6046 } 6047 6048 6049 /** 6050 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the 6051 * guest attempting to execute a VMX instruction. 6052 * 6053 * @returns Strict VBox status code (i.e. informational status codes too). 6054 * @retval VINF_SUCCESS if we should continue handling the VM-exit. 6055 * @retval VINF_HM_PENDING_XCPT if an exception was raised. 6056 * 6057 * @param pVCpu The cross context virtual CPU structure. 6058 * @param pVmxTransient Pointer to the VMX transient structure. 6059 * 6060 * @todo NstVmx: Document other error codes when VM-exit is implemented. 6061 * @remarks No-long-jump zone!!! 6062 */ 6063 static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 6064 { 6065 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS 6066 | CPUMCTX_EXTRN_HWVIRT); 6067 6068 if ( CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx) 6069 || ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx) 6070 && !CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))) 6071 { 6072 Log4Func(("In real/v86-mode or long-mode outside 64-bit code segment -> #UD\n")); 6073 hmR0VmxSetPendingXcptUD(pVCpu); 6074 return VINF_HM_PENDING_XCPT; 6075 } 6076 6077 if (pVmxTransient->uExitReason == VMX_EXIT_VMXON) 6078 { 6079 /* 6080 * We check CR4.VMXE because it is required to be always set while in VMX operation 6081 * by physical CPUs and our CR4 read shadow is only consulted when executing specific 6082 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation 6083 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0). 6084 */ 6085 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx)) 6086 { 6087 Log4Func(("CR4.VMXE is not set -> #UD\n")); 6088 hmR0VmxSetPendingXcptUD(pVCpu); 6089 return VINF_HM_PENDING_XCPT; 6090 } 6091 } 6092 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx)) 6093 { 6094 /* 6095 * The guest has not entered VMX operation but attempted to execute a VMX instruction 6096 * (other than VMXON), we need to raise a #UD. 6097 */ 6098 Log4Func(("Not in VMX root mode -> #UD\n")); 6099 hmR0VmxSetPendingXcptUD(pVCpu); 6100 return VINF_HM_PENDING_XCPT; 6101 } 6102 6103 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 6104 { 6105 /* 6106 * The nested-guest attempted to execute a VMX instruction, cause a VM-exit and let 6107 * the guest hypervisor deal with it. 6108 */ 6109 /** @todo NSTVMX: Trigger a VM-exit */ 6110 } 6111 6112 /* 6113 * VMX instructions require CPL 0 except in VMX non-root mode where the VM-exit intercept 6114 * (above) takes preceedence over the CPL check. 6115 */ 6116 if (CPUMGetGuestCPL(pVCpu) > 0) 6117 { 6118 Log4Func(("CPL > 0 -> #GP(0)\n")); 6119 hmR0VmxSetPendingXcptGP(pVCpu, 0); 6120 return VINF_HM_PENDING_XCPT; 6121 } 6122 6123 return VINF_SUCCESS; 6124 } 6125 6126 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 5836 6127 5837 6128 … … 5861 6152 5862 6153 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 5863 if (VMX_IDT_VECTORING_INFO_ VALID(pVmxTransient->uIdtVectoringInfo))6154 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo)) 5864 6155 { 5865 6156 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo); … … 8190 8481 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 8191 8482 8483 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_ONLY_IN_IEM 8484 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n")); 8485 return VINF_EM_RESCHEDULE_REM; 8486 #endif 8487 8192 8488 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 8193 8489 PGMRZDynMapFlushAutoSet(pVCpu); … … 9319 9615 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break; 9320 9616 case VMX_EXIT_MOV_CRX: 9321 hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);9322 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual ification) == VMX_EXIT_QUAL_CRX_ACCESS_READ)9617 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 9618 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ) 9323 9619 SET_BOTH(CRX_READ); 9324 9620 else 9325 9621 SET_BOTH(CRX_WRITE); 9326 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual ification);9622 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual); 9327 9623 break; 9328 9624 case VMX_EXIT_MOV_DRX: 9329 hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);9330 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual ification)9625 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 9626 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) 9331 9627 == VMX_EXIT_QUAL_DRX_DIRECTION_READ) 9332 9628 SET_BOTH(DRX_READ); 9333 9629 else 9334 9630 SET_BOTH(DRX_WRITE); 9335 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual ification);9631 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual); 9336 9632 break; 9337 9633 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break; … … 9408 9704 if (fDtrace1 || fDtrace2) 9409 9705 { 9410 hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);9706 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 9411 9707 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 9412 9708 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 9593 9889 else 9594 9890 { 9595 hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);9891 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 9596 9892 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 9597 9893 AssertRC(rc); 9598 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual ification);9894 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual); 9599 9895 } 9600 9896 … … 10833 11129 } 10834 11130 11131 11132 /** @name VM-exit handlers. 11133 * @{ 11134 */ 10835 11135 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 10836 11136 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */ 10837 11137 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 10838 10839 /** @name VM-exit handlers.10840 * @{10841 */10842 11138 10843 11139 /** … … 10961 11257 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 10962 11258 AssertRCReturn(rc, rc); 10963 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),11259 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(uExitIntInfo), 10964 11260 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 10965 11261 0 /* GCPtrFaultAddress */); … … 11270 11566 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop); 11271 11567 11272 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);11568 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 11273 11569 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11274 11570 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS); 11275 11571 AssertRCReturn(rc, rc); 11276 11572 11277 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQual ification);11573 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQual); 11278 11574 11279 11575 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3) … … 11285 11581 } 11286 11582 else 11287 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n", 11288 pVmxTransient->uExitQualification,VBOXSTRICTRC_VAL(rcStrict)));11583 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n", pVmxTransient->uExitQual, 11584 VBOXSTRICTRC_VAL(rcStrict))); 11289 11585 return rcStrict; 11290 11586 } … … 11888 12184 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2); 11889 12185 11890 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12186 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 11891 12187 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11892 12188 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); … … 11895 12191 VBOXSTRICTRC rcStrict; 11896 12192 PVM pVM = pVCpu->CTX_SUFF(pVM); 11897 RTGCUINTPTR const uExitQual ification = pVmxTransient->uExitQualification;11898 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification);12193 RTGCUINTPTR const uExitQual = pVmxTransient->uExitQual; 12194 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual); 11899 12195 switch (uAccessType) 11900 12196 { … … 11902 12198 { 11903 12199 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0; 11904 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, 11905 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification), 11906 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification)); 12200 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_REGISTER(uExitQual), 12201 VMX_EXIT_QUAL_CRX_GENREG(uExitQual)); 11907 12202 AssertMsg( rcStrict == VINF_SUCCESS 11908 12203 || rcStrict == VINF_IEM_RAISED_XCPT 11909 12204 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 11910 12205 11911 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual ification))12206 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)) 11912 12207 { 11913 12208 case 0: … … 11916 12211 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 11917 12212 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write); 11918 Log4 (("CRXCR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));12213 Log4Func(("CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0)); 11919 12214 11920 12215 /* … … 11935 12230 { 11936 12231 /** @todo check selectors rather than returning all the time. */ 11937 Log4 (("CRx CR0 write:back to real mode -> VINF_EM_RESCHEDULE_REM\n"));12232 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n")); 11938 12233 rcStrict = VINF_EM_RESCHEDULE_REM; 11939 12234 } … … 11956 12251 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 11957 12252 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3); 11958 Log4 (("CRXCR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));12253 Log4Func(("CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3)); 11959 12254 break; 11960 12255 } … … 11965 12260 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 11966 12261 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4); 11967 Log4 (("CRXCR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),11968 pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));12262 Log4Func(("CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), 12263 pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0)); 11969 12264 break; 11970 12265 } … … 11979 12274 } 11980 12275 default: 11981 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual ification)));12276 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))); 11982 12277 break; 11983 12278 } … … 11990 12285 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx) 11991 12286 || pVCpu->hm.s.fUsingDebugLoop 11992 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQual ification) != 3);12287 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 3); 11993 12288 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */ 11994 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQual ification) != 812289 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 8 11995 12290 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 11996 12291 11997 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, 11998 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification), 11999 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)); 12292 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual), 12293 VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)); 12000 12294 AssertMsg( rcStrict == VINF_SUCCESS 12001 12295 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12002 12296 #ifdef VBOX_WITH_STATISTICS 12003 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual ification))12297 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)) 12004 12298 { 12005 12299 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break; … … 12010 12304 } 12011 12305 #endif 12012 Log4 (("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),12306 Log4Func(("CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual), 12013 12307 VBOXSTRICTRC_VAL(rcStrict))); 12014 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQual ification) == X86_GREG_xSP)12308 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQual) == X86_GREG_xSP) 12015 12309 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP); 12016 12310 else … … 12027 12321 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 12028 12322 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 12029 Log4 (("CRXCLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));12323 Log4Func(("CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict))); 12030 12324 break; 12031 12325 } … … 12034 12328 { 12035 12329 /* Note! LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here. */ 12036 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, 12037 VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQualification)); 12330 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual)); 12038 12331 AssertMsg( rcStrict == VINF_SUCCESS 12039 12332 || rcStrict == VINF_IEM_RAISED_XCPT … … 12042 12335 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 12043 12336 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw); 12044 Log4 (("CRXLMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));12337 Log4Func(("LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict))); 12045 12338 break; 12046 12339 } … … 12075 12368 12076 12369 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12077 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12370 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12078 12371 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12079 12372 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER); … … 12082 12375 12083 12376 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */ 12084 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQualification); 12085 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQualification); 12086 bool fIOWrite = ( VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQualification) 12087 == VMX_EXIT_QUAL_IO_DIRECTION_OUT); 12088 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification); 12377 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual); 12378 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQual); 12379 bool fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT); 12380 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual); 12089 12381 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 12090 12382 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction; … … 12124 12416 * interpreting the instruction. 12125 12417 */ 12126 Log4 (("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));12418 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 12127 12419 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2); 12128 12420 bool const fInsOutsInfo = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS); … … 12134 12426 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2); 12135 12427 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize; 12136 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual ification);12428 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual); 12137 12429 if (fIOWrite) 12138 12430 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, … … 12160 12452 * IN/OUT - I/O instruction. 12161 12453 */ 12162 Log4 (("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));12454 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 12163 12455 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth]; 12164 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual ification));12456 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual)); 12165 12457 if (fIOWrite) 12166 12458 { … … 12296 12588 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n", 12297 12589 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 12298 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual ification) ? "REP " : "",12590 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "", 12299 12591 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth)); 12300 12592 … … 12319 12611 12320 12612 /* Check if this task-switch occurred while delivery an event through the guest IDT. */ 12321 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12613 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12322 12614 AssertRCReturn(rc, rc); 12323 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual ification) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)12615 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT) 12324 12616 { 12325 12617 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 12326 12618 AssertRCReturn(rc, rc); 12327 if (VMX_IDT_VECTORING_INFO_ VALID(pVmxTransient->uIdtVectoringInfo))12619 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo)) 12328 12620 { 12329 12621 uint32_t uErrCode; … … 12350 12642 0 /* cbInstr */, uErrCode, GCPtrFaultAddress); 12351 12643 12352 Log4 (("Pending event on TaskSwitchuIntType=%#x uVector=%#x\n", uIntType, uVector));12644 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", uIntType, uVector)); 12353 12645 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch); 12354 12646 return VINF_EM_RAW_INJECT_TRPM_EVENT; … … 12406 12698 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */ 12407 12699 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 12408 rc |= hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12700 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12409 12701 AssertRCReturn(rc, rc); 12410 12702 12411 12703 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */ 12412 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual ification);12704 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual); 12413 12705 VBOXSTRICTRC rcStrict2; 12414 12706 switch (uAccessType) … … 12418 12710 { 12419 12711 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 12420 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual ification) != XAPIC_OFF_TPR,12712 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR, 12421 12713 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n")); 12422 12714 12423 12715 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase; /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */ 12424 12716 GCPhys &= PAGE_BASE_GC_MASK; 12425 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual ification);12717 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual); 12426 12718 PVM pVM = pVCpu->CTX_SUFF(pVM); 12427 12719 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys, 12428 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual ification)));12720 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual))); 12429 12721 12430 12722 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 12494 12786 12495 12787 #ifdef VBOX_WITH_STATISTICS 12496 rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12788 rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12497 12789 AssertRCReturn(rc, rc); 12498 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual ification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)12790 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE) 12499 12791 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 12500 12792 else … … 12510 12802 */ 12511 12803 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12512 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12804 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12513 12805 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7); 12514 12806 AssertRCReturn(rc, rc); … … 12516 12808 12517 12809 PVM pVM = pVCpu->CTX_SUFF(pVM); 12518 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual ification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)12810 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE) 12519 12811 { 12520 12812 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 12521 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual ification),12522 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual ification));12813 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual), 12814 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual)); 12523 12815 if (RT_SUCCESS(rc)) 12524 12816 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7); … … 12528 12820 { 12529 12821 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), 12530 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual ification),12531 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual ification));12822 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual), 12823 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual)); 12532 12824 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 12533 12825 } … … 12596 12888 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12597 12889 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX); 12598 Log4 (("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));12890 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict))); 12599 12891 if ( rcStrict == VINF_SUCCESS 12600 12892 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT … … 12655 12947 RTGCPHYS GCPhys; 12656 12948 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys); 12657 rc |= hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);12949 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12658 12950 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 12659 12951 AssertRCReturn(rc, rc); 12660 12952 12661 12953 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */ 12662 AssertMsg(((pVmxTransient->uExitQual ification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));12954 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQual)); 12663 12955 12664 12956 RTGCUINT uErrorCode = 0; 12665 if (pVmxTransient->uExitQual ification& VMX_EXIT_QUAL_EPT_INSTR_FETCH)12957 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_INSTR_FETCH) 12666 12958 uErrorCode |= X86_TRAP_PF_ID; 12667 if (pVmxTransient->uExitQual ification& VMX_EXIT_QUAL_EPT_DATA_WRITE)12959 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_DATA_WRITE) 12668 12960 uErrorCode |= X86_TRAP_PF_RW; 12669 if (pVmxTransient->uExitQual ification& VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)12961 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT) 12670 12962 uErrorCode |= X86_TRAP_PF_P; 12671 12963 … … 12677 12969 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12678 12970 12679 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQual ification, GCPhys,12680 uErrorCode,pCtx->cs.Sel, pCtx->rip));12971 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode, 12972 pCtx->cs.Sel, pCtx->rip)); 12681 12973 12682 12974 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys); … … 12700 12992 /** @} */ 12701 12993 12994 /** @name VM-exit exception handlers. 12995 * @{ 12996 */ 12702 12997 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 12703 12998 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit exception handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */ 12704 12999 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 12705 12706 /** @name VM-exit exception handlers.12707 * @{12708 */12709 13000 12710 13001 /** … … 12732 13023 } 12733 13024 12734 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),12735 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13025 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13026 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 12736 13027 return rc; 12737 13028 } … … 12758 13049 AssertRCReturn(rc, rc); 12759 13050 12760 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),12761 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13051 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13052 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 12762 13053 } 12763 13054 … … 12782 13073 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO); 12783 13074 12784 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),12785 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13075 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13076 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 12786 13077 return VINF_SUCCESS; 12787 13078 } … … 12800 13091 * for processing. 12801 13092 */ 12802 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);13093 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12803 13094 12804 13095 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */ 12805 13096 uint64_t uDR6 = X86_DR6_INIT_VAL; 12806 uDR6 |= ( pVmxTransient->uExitQualification 12807 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS)); 13097 uDR6 |= (pVmxTransient->uExitQual & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS)); 12808 13098 12809 13099 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 12856 13146 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 12857 13147 AssertRCReturn(rc, rc); 12858 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),12859 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13148 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13149 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 12860 13150 return VINF_SUCCESS; 12861 13151 } … … 12899 13189 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip, 12900 13190 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel)); 12901 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),12902 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13191 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13192 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 12903 13193 return rc; 12904 13194 } … … 13166 13456 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)) 13167 13457 { 13168 Log4 (("hmR0VmxExitXcptGP: mode changed -> VINF_EM_RESCHEDULE\n"));13458 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n")); 13169 13459 /** @todo Exit fRealOnV86Active here w/o dropping back to ring-3. */ 13170 13460 rc = VINF_EM_RESCHEDULE; … … 13217 13507 #endif 13218 13508 13219 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),13220 pVmxTransient-> cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13509 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr, 13510 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 13221 13511 return VINF_SUCCESS; 13222 13512 } … … 13230 13520 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13231 13521 PVM pVM = pVCpu->CTX_SUFF(pVM); 13232 int rc = hmR0VmxReadExitQual ificationVmcs(pVCpu, pVmxTransient);13522 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 13233 13523 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 13234 13524 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); … … 13245 13535 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF)) 13246 13536 { 13247 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),13248 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);13537 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */, 13538 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual); 13249 13539 } 13250 13540 else … … 13270 13560 AssertRCReturn(rc, rc); 13271 13561 13272 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification, 13273 pCtx->cs.Sel, pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3)); 13274 13275 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode); 13276 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), 13277 (RTGCPTR)pVmxTransient->uExitQualification); 13562 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQual, pCtx->cs.Sel, 13563 pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3)); 13564 13565 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode); 13566 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual); 13278 13567 13279 13568 Log4Func(("#PF: rc=%Rrc\n", rc)); … … 13298 13587 TRPMResetTrap(pVCpu); 13299 13588 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */ 13300 hmR0VmxSetPendingEvent(pVCpu, VMX_ VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),13301 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);13589 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */, 13590 uGstErrorCode, pVmxTransient->uExitQual); 13302 13591 } 13303 13592 else … … 13321 13610 /** @} */ 13322 13611 13612 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 13613 13614 /** @name Nested-guest VM-exit handlers. 13615 * @{ 13616 */ 13617 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 13618 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Nested-guest VM-exit handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 13619 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 13620 13621 /** 13622 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit. 13623 */ 13624 HMVMX_EXIT_DECL hmR0VmxExitVmclear(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13625 { 13626 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13627 13628 /** @todo NSTVMX: Vmclear. */ 13629 hmR0VmxSetPendingXcptUD(pVCpu); 13630 return VINF_SUCCESS; 13631 } 13632 13633 13634 /** 13635 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit. 13636 */ 13637 HMVMX_EXIT_DECL hmR0VmxExitVmlaunch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13638 { 13639 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13640 13641 /** @todo NSTVMX: Vmlaunch. */ 13642 hmR0VmxSetPendingXcptUD(pVCpu); 13643 return VINF_SUCCESS; 13644 } 13645 13646 13647 /** 13648 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit. 13649 */ 13650 HMVMX_EXIT_DECL hmR0VmxExitVmptrld(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13651 { 13652 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13653 13654 /** @todo NSTVMX: Vmptrld. */ 13655 hmR0VmxSetPendingXcptUD(pVCpu); 13656 return VINF_SUCCESS; 13657 } 13658 13659 13660 /** 13661 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit. 13662 */ 13663 HMVMX_EXIT_DECL hmR0VmxExitVmptrst(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13664 { 13665 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13666 13667 /** @todo NSTVMX: Vmptrst. */ 13668 hmR0VmxSetPendingXcptUD(pVCpu); 13669 return VINF_SUCCESS; 13670 } 13671 13672 13673 /** 13674 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Unconditional VM-exit. 13675 */ 13676 HMVMX_EXIT_DECL hmR0VmxExitVmread(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13677 { 13678 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13679 13680 /** @todo NSTVMX: Vmread. */ 13681 hmR0VmxSetPendingXcptUD(pVCpu); 13682 return VINF_SUCCESS; 13683 } 13684 13685 13686 /** 13687 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit. 13688 */ 13689 HMVMX_EXIT_DECL hmR0VmxExitVmresume(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13690 { 13691 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13692 13693 /** @todo NSTVMX: Vmresume. */ 13694 hmR0VmxSetPendingXcptUD(pVCpu); 13695 return VINF_SUCCESS; 13696 } 13697 13698 13699 /** 13700 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Unconditional VM-exit. 13701 */ 13702 HMVMX_EXIT_DECL hmR0VmxExitVmwrite(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13703 { 13704 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13705 13706 /** @todo NSTVMX: Vmwrite. */ 13707 hmR0VmxSetPendingXcptUD(pVCpu); 13708 return VINF_SUCCESS; 13709 } 13710 13711 13712 /** 13713 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit. 13714 */ 13715 HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13716 { 13717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13718 13719 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13720 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13721 AssertRCReturn(rc, rc); 13722 13723 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr); 13724 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13725 { 13726 /* VMXOFF on success changes the internal hwvirt state but not anything that's visible to the guest. */ 13727 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT); 13728 } 13729 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13730 { 13731 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13732 rcStrict = VINF_SUCCESS; 13733 } 13734 return rcStrict; 13735 } 13736 13737 13738 /** 13739 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit. 13740 */ 13741 HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13742 { 13743 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13744 13745 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13746 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13747 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13748 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 13749 AssertRCReturn(rc, rc); 13750 13751 VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToVmxInstr(pVCpu, pVmxTransient); 13752 if (rcStrict == VINF_SUCCESS) 13753 { /* likely */ } 13754 else if (rcStrict == VINF_HM_PENDING_XCPT) 13755 { 13756 Log4Func(("Privilege checks failed, raising xcpt %#x!\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo))); 13757 return VINF_SUCCESS; 13758 } 13759 else 13760 { 13761 Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13762 return rcStrict; 13763 } 13764 13765 RTGCPTR GCPtrVmxon; 13766 PCVMXEXITINSTRINFO pExitInstrInfo = &pVmxTransient->ExitInstrInfo; 13767 RTGCPTR const GCPtrDisp = pVmxTransient->uExitQual; 13768 rcStrict = hmR0VmxDecodeMemOperand(pVCpu, pExitInstrInfo, GCPtrDisp, false /*fIsWrite*/, &GCPtrVmxon); 13769 if (rcStrict == VINF_SUCCESS) 13770 { /* likely */ } 13771 else if (rcStrict == VINF_HM_PENDING_XCPT) 13772 { 13773 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo))); 13774 return VINF_SUCCESS; 13775 } 13776 else 13777 { 13778 Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13779 return rcStrict; 13780 } 13781 13782 rcStrict = IEMExecDecodedVmxon(pVCpu, pVmxTransient->cbInstr, GCPtrVmxon, pExitInstrInfo->u, GCPtrDisp); 13783 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13784 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT); 13785 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13786 { 13787 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13788 rcStrict = VINF_SUCCESS; 13789 } 13790 return rcStrict; 13791 } 13792 13793 /** @} */ 13794 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 13795 -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r73389 r73606 2775 2775 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 2776 2776 static const char *const s_aHwvirtModes[] = { "No/inactive", "SVM", "VMX", "Common" }; 2777 uint8_t const idxHwvirtState = CPUMIsGuestInSvmNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_SVM 2778 : CPUMIsGuestInVmxNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE; 2777 bool const fSvm = pVM->cpum.ro.GuestFeatures.fSvm; 2778 bool const fVmx = pVM->cpum.ro.GuestFeatures.fVmx; 2779 uint8_t const idxHwvirtState = fSvm ? CPUMHWVIRTDUMP_SVM : (fVmx ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE); 2779 2780 AssertCompile(CPUMHWVIRTDUMP_LAST <= RT_ELEMENTS(s_aHwvirtModes)); 2780 2781 Assert(idxHwvirtState < RT_ELEMENTS(s_aHwvirtModes)); … … 2788 2789 2789 2790 if (fDumpState & CPUMHWVIRTDUMP_COMMON) 2790 { 2791 pHlp->pfnPrintf(pHlp, "fGif = %RTbool\n", pCtx->hwvirt.fGif); 2792 pHlp->pfnPrintf(pHlp, "fLocalForcedActions = %#RX32\n", pCtx->hwvirt.fLocalForcedActions); 2793 } 2791 pHlp->pfnPrintf(pHlp, "fLocalForcedActions = %#RX32\n", pCtx->hwvirt.fLocalForcedActions); 2792 2794 2793 pHlp->pfnPrintf(pHlp, "%s hwvirt state%s\n", pcszHwvirtMode, (fDumpState & (CPUMHWVIRTDUMP_SVM | CPUMHWVIRTDUMP_VMX)) ? 2795 2794 ":" : ""); 2796 2795 if (fDumpState & CPUMHWVIRTDUMP_SVM) 2797 2796 { 2797 pHlp->pfnPrintf(pHlp, " fGif = %RTbool\n", pCtx->hwvirt.fGif); 2798 2798 2799 char szEFlags[80]; 2799 2800 cpumR3InfoFormatFlags(&szEFlags[0], pCtx->hwvirt.svm.HostState.rflags.u); 2800 2801 2801 pHlp->pfnPrintf(pHlp, " uMsrHSavePa = %#RX64\n", pCtx->hwvirt.svm.uMsrHSavePa); 2802 2802 pHlp->pfnPrintf(pHlp, " GCPhysVmcb = %#RGp\n", pCtx->hwvirt.svm.GCPhysVmcb); … … 2839 2839 } 2840 2840 2841 /** @todo Intel. */2842 #if 02843 2841 if (fDumpState & CPUMHWVIRTDUMP_VMX) 2844 2842 { 2843 pHlp->pfnPrintf(pHlp, " fInVmxRootMode = %RTbool\n", pCtx->hwvirt.vmx.fInVmxRootMode); 2844 pHlp->pfnPrintf(pHlp, " fInVmxNonRootMode = %RTbool\n", pCtx->hwvirt.vmx.fInVmxNonRootMode); 2845 pHlp->pfnPrintf(pHlp, " GCPhysVmxon = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmxon); 2846 pHlp->pfnPrintf(pHlp, " GCPhysVmcs = %#RGp\n", pCtx->hwvirt.vmx.GCPhysVmcs); 2847 pHlp->pfnPrintf(pHlp, " enmInstrDiag = %u (%s)\n", pCtx->hwvirt.vmx.enmInstrDiag, 2848 HMVmxGetInstrDiagDesc(pCtx->hwvirt.vmx.enmInstrDiag)); 2849 /** @todo NSTVMX: Dump remaining/new fields. */ 2845 2850 } 2846 #endif2847 2851 2848 2852 #undef CPUMHWVIRTDUMP_NONE -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r73389 r73606 3937 3937 AssertLogRelRCReturn(rc, rc); 3938 3938 3939 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 3940 /** @cfgm{/CPUM/NestedHWVirt, bool, false} 3941 * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest. 3942 * The default is false, and when enabled requires nested paging and AMD-V or 3943 * unrestricted guest mode. 3944 */ 3945 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false); 3946 AssertLogRelRCReturn(rc, rc); 3947 if ( pConfig->fNestedHWVirt 3948 && !fNestedPagingAndFullGuestExec) 3949 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS, 3950 "Cannot enable nested VT-x/AMD-V without nested-paging and unresricted guest execution!\n"); 3951 3952 /** @todo Think about enabling this later with NEM/KVM. */ 3953 if ( pConfig->fNestedHWVirt 3954 && VM_IS_NEM_ENABLED(pVM)) 3955 { 3956 LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used!\n")); 3957 pConfig->fNestedHWVirt = false; 3958 } 3939 bool fQueryNestedHwvirt = false; 3940 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 3941 fQueryNestedHwvirt |= RT_BOOL(pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD); 3959 3942 #endif 3943 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 3944 fQueryNestedHwvirt |= RT_BOOL( pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL 3945 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA); 3946 #endif 3947 if (fQueryNestedHwvirt) 3948 { 3949 /** @cfgm{/CPUM/NestedHWVirt, bool, false} 3950 * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest. 3951 * The default is false, and when enabled requires nested paging and AMD-V or 3952 * unrestricted guest mode. 3953 */ 3954 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false); 3955 AssertLogRelRCReturn(rc, rc); 3956 if ( pConfig->fNestedHWVirt 3957 && !fNestedPagingAndFullGuestExec) 3958 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS, 3959 "Cannot enable nested VT-x/AMD-V without nested-paging and unresricted guest execution!\n"); 3960 3961 /** @todo Think about enabling this later with NEM/KVM. */ 3962 if ( pConfig->fNestedHWVirt 3963 && VM_IS_NEM_ENABLED(pVM)) 3964 { 3965 LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used!\n")); 3966 pConfig->fNestedHWVirt = false; 3967 } 3968 } 3960 3969 3961 3970 /* -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r73097 r73606 1815 1815 } 1816 1816 1817 if (CPUMIsGuestInVmxN estedHwVirtMode(&pVCpu->cpum.GstCtx))1817 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 1818 1818 { /** @todo Nested VMX. */ } 1819 1819 … … 2147 2147 Assert(!HMR3IsEventPending(pVCpu)); 2148 2148 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2149 if (CPUMIsGuestIn NestedHwVirtMode(&pVCpu->cpum.GstCtx))2149 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 2150 2150 { 2151 2151 bool fResched, fInject; -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r73389 r73606 42 42 #include <VBox/vmm/stam.h> 43 43 #include <VBox/vmm/mm.h> 44 #include <VBox/vmm/em.h> 44 45 #include <VBox/vmm/pdmapi.h> 45 46 #include <VBox/vmm/pgm.h> … … 77 78 #define EXIT_REASON(def, val, str) #def " - " #val " - " str 78 79 #define EXIT_REASON_NIL() NULL 79 /** Exit reason descriptions for VT-x, used to describe statistics. */ 80 static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] = 80 /** Exit reason descriptions for VT-x, used to describe statistics and exit 81 * history. */ 82 static const char * const g_apszVmxExitReasons[MAX_EXITREASON_STAT] = 81 83 { 82 84 EXIT_REASON(VMX_EXIT_XCPT_OR_NMI , 0, "Exception or non-maskable interrupt (NMI)."), … … 149 151 #define MAX_EXITREASON_VTX 64 150 152 151 /** A partial list of Exitreason descriptions for AMD-V, used to describe152 * statistics .153 /** A partial list of \#EXIT reason descriptions for AMD-V, used to describe 154 * statistics and exit history. 153 155 * 154 156 * @note AMD-V have annoyingly large gaps (e.g. \#NPF VMEXIT comes at 1024), 155 157 * this array doesn't contain the entire set of exit reasons, we 156 158 * handle them via hmSvmGetSpecialExitReasonDesc(). */ 157 static const char * const g_apsz AmdVExitReasons[MAX_EXITREASON_STAT] =159 static const char * const g_apszSvmExitReasons[MAX_EXITREASON_STAT] = 158 160 { 159 161 EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."), … … 310 312 /** 311 313 * Gets the SVM exit reason if it's one of the reasons not present in the @c 312 * g_apsz AmdVExitReasons array.314 * g_apszSvmExitReasons array. 313 315 * 314 316 * @returns The exit reason or NULL if unknown. … … 1061 1063 #undef HM_REG_COUNTER 1062 1064 1063 const char *const *papszDesc = ASMIsIntelCpu() || ASMIsViaCentaurCpu() ? &g_apszV TxExitReasons[0]1064 : &g_apsz AmdVExitReasons[0];1065 const char *const *papszDesc = ASMIsIntelCpu() || ASMIsViaCentaurCpu() ? &g_apszVmxExitReasons[0] 1066 : &g_apszSvmExitReasons[0]; 1065 1067 1066 1068 /* … … 1938 1940 uint32_t u32Model; 1939 1941 uint32_t u32Stepping; 1940 if (HM AmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))1942 if (HMSvmIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping)) 1941 1943 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping)); 1942 1944 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); … … 2948 2950 2949 2951 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM 2950 if (CPUMIsGuestInNestedHwVirtMode(pCtx)) 2952 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 2953 || CPUMIsGuestVmxEnabled(pCtx)) 2951 2954 { 2952 2955 Log(("HMR3CanExecuteGuest: In nested-guest mode - returning false")); … … 3151 3154 && CPUMIsGuestInRealModeEx(pCtx) 3152 3155 && !PDMVmmDevHeapIsEnabled(pVM)) 3153 {3154 3156 return true; 3155 }3156 3157 3157 3158 return false; … … 3429 3430 LogRel(("HM: CPU[%u] Exit reason %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason)); 3430 3431 3431 if ( pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX _ERROR_VMLAUCH_NON_CLEAR_VMCS3432 || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX _ERROR_VMRESUME_NON_LAUNCHED_VMCS)3432 if ( pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS 3433 || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS) 3433 3434 { 3434 3435 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idEnteredCpu)); 3435 3436 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idCurrentCpu)); 3436 3437 } 3437 else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX _ERROR_VMENTRY_INVALID_CONTROL_FIELDS)3438 else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTL) 3438 3439 { 3439 3440 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", i, pVCpu->hm.s.vmx.u32PinCtls)); … … 3756 3757 VMMR3DECL(const char *) HMR3GetVmxExitName(uint32_t uExit) 3757 3758 { 3758 if (uExit < RT_ELEMENTS(g_apszV TxExitReasons))3759 return g_apszV TxExitReasons[uExit];3759 if (uExit < RT_ELEMENTS(g_apszVmxExitReasons)) 3760 return g_apszVmxExitReasons[uExit]; 3760 3761 return NULL; 3761 3762 } … … 3770 3771 VMMR3DECL(const char *) HMR3GetSvmExitName(uint32_t uExit) 3771 3772 { 3772 if (uExit < RT_ELEMENTS(g_apsz AmdVExitReasons))3773 return g_apsz AmdVExitReasons[uExit];3773 if (uExit < RT_ELEMENTS(g_apszSvmExitReasons)) 3774 return g_apszSvmExitReasons[uExit]; 3774 3775 return hmSvmGetSpecialExitReasonDesc(uExit); 3775 3776 } -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r73097 r73606 4549 4549 if (pVCpu->pgm.s.fA20Enabled != fEnable) 4550 4550 { 4551 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 4552 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 4553 if ( CPUMIsGuestInVmxRootMode(pCtx) 4554 && !fEnable) 4555 { 4556 Log(("Cannot enter A20M mode while in VMX root mode\n")); 4557 return; 4558 } 4559 #endif 4551 4560 pVCpu->pgm.s.fA20Enabled = fEnable; 4552 4561 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20); -
trunk/src/VBox/VMM/include/HMInternal.h
r73389 r73606 21 21 #include <VBox/cdefs.h> 22 22 #include <VBox/types.h> 23 #include <VBox/vmm/em.h>24 23 #include <VBox/vmm/stam.h> 25 24 #include <VBox/dis.h> -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r73555 r73606 127 127 #define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() do { } while (0) 128 128 #define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() do { } while (0) 129 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 130 # define IEMOP_HLP_VMX_INSTR() do { } while (0) 131 #endif 129 132 130 133 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r73250 r73606 146 146 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvIoBitmapR3); 147 147 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HCPhysVmcb); 148 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.GCPhysVmxon); 149 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.GCPhysVmcs); 150 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.enmInstrDiag); 151 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInVmxRootMode); 152 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInVmxNonRootMode); 153 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR0); 154 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR3); 148 155 GEN_CHECK_OFF(CPUMCTX, hwvirt.fLocalForcedActions); 149 156 GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif);
Note:
See TracChangeset
for help on using the changeset viewer.