Changeset 76464 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Dec 25, 2018 4:36:48 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r76397 r76464 365 365 366 366 /** 367 * Gets a copy of the VMX host MSRs that were read by HM during ring-0368 * initialization.369 *370 * @return VBox status code.371 * @param pVM The cross context VM structure.372 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when373 * VINF_SUCCESS is returned).374 *375 * @remarks Caller needs to take care not to call this function too early. Call376 * after HM initialization is fully complete.377 */378 VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)379 {380 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);381 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);382 if (pVM->hm.s.vmx.fSupported)383 {384 *pVmxMsrs = pVM->hm.s.vmx.Msrs;385 return VINF_SUCCESS;386 }387 return VERR_VMX_NO_VMX;388 }389 390 391 /**392 * Gets the specified VMX host MSR that was read by HM during ring-0393 * initialization.394 *395 * @return VBox status code.396 * @param pVM The cross context VM structure.397 * @param idMsr The MSR.398 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS399 * is returned).400 *401 * @remarks Caller needs to take care not to call this function too early. Call402 * after HM initialization is fully complete.403 */404 VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)405 {406 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);407 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);408 409 if (pVM->hm.s.vmx.fSupported)410 {411 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;412 switch (idMsr)413 {414 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;415 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;416 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;417 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;418 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;419 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;420 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;421 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;422 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;423 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;424 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;425 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;426 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;427 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;428 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;429 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;430 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;431 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;432 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;433 default:434 {435 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));436 return VERR_NOT_FOUND;437 }438 }439 return VINF_SUCCESS;440 }441 return VERR_VMX_NO_VMX;442 }443 444 445 /**446 367 * Gets the descriptive name of a VMX instruction/VM-exit diagnostic code. 447 368 * -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r76397 r76464 26 26 #include "HMInternal.h" 27 27 #include <VBox/vmm/vm.h> 28 #include <VBox/vmm/hm_ vmx.h>28 #include <VBox/vmm/hm_svm.h> 29 29 #include <VBox/vmm/hmvmxinline.h> 30 #include <VBox/vmm/hm_svm.h>31 30 #include <VBox/err.h> 32 31 #include <VBox/log.h> … … 93 92 DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPU pVCpu)); 94 93 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 95 bool fEnabledByHost, void *pvArg));94 bool fEnabledByHost, PCSUPHWVIRTMSRS pHwvirtMsrs)); 96 95 DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 97 96 DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM)); … … 100 99 /** @} */ 101 100 102 /** Maximum ASID allowed. */ 103 uint32_t uMaxAsid; 104 105 /** VT-x data. */ 101 /** Hardware-virtualization data. */ 106 102 struct 107 103 { 108 /** Set to by us to indicate VMX is supported by the CPU. */ 109 bool fSupported; 110 /** Whether we're using SUPR0EnableVTx or not. */ 111 bool fUsingSUPR0EnableVTx; 112 /** Whether we're using the preemption timer or not. */ 113 bool fUsePreemptTimer; 114 /** The shift mask employed by the VMX-Preemption timer. */ 115 uint8_t cPreemptTimerShift; 116 117 /** Host CR4 value (set by ring-0 VMX init) */ 118 uint64_t u64HostCr4; 119 /** Host EFER value (set by ring-0 VMX init) */ 120 uint64_t u64HostEfer; 121 /** Host SMM monitor control (used for logging/diagnostics) */ 122 uint64_t u64HostSmmMonitorCtl; 123 124 /** VMX MSR values. */ 125 VMXMSRS Msrs; 126 127 /** Last instruction error. */ 128 uint32_t ulLastInstrError; 129 130 /** Set if we've called SUPR0EnableVTx(true) and should disable it during 131 * module termination. */ 132 bool fCalledSUPR0EnableVTx; 133 } vmx; 134 135 /** AMD-V information. */ 136 struct 137 { 138 /* HWCR MSR (for diagnostics) */ 139 uint64_t u64MsrHwcr; 140 141 /** SVM revision. */ 142 uint32_t u32Rev; 143 144 /** SVM feature bits from cpuid 0x8000000a */ 145 uint32_t u32Features; 146 147 /** Set by us to indicate SVM is supported by the CPU. */ 148 bool fSupported; 149 } svm; 104 union 105 { 106 /** VT-x data. */ 107 struct 108 { 109 /** Host CR4 value (set by ring-0 VMX init) */ 110 uint64_t u64HostCr4; 111 /** Host EFER value (set by ring-0 VMX init) */ 112 uint64_t u64HostEfer; 113 /** Host SMM monitor control (used for logging/diagnostics) */ 114 uint64_t u64HostSmmMonitorCtl; 115 /** Last instruction error. */ 116 uint32_t ulLastInstrError; 117 /** The shift mask employed by the VMX-Preemption timer. */ 118 uint8_t cPreemptTimerShift; 119 /** Padding. */ 120 uint8_t abPadding[3]; 121 /** Whether we're using the preemption timer or not. */ 122 bool fUsePreemptTimer; 123 /** Whether we're using SUPR0EnableVTx or not. */ 124 bool fUsingSUPR0EnableVTx; 125 /** Set if we've called SUPR0EnableVTx(true) and should disable it during 126 * module termination. */ 127 bool fCalledSUPR0EnableVTx; 128 /** Set to by us to indicate VMX is supported by the CPU. */ 129 bool fSupported; 130 } vmx; 131 132 /** AMD-V data. */ 133 struct 134 { 135 /** SVM revision. */ 136 uint32_t u32Rev; 137 /** SVM feature bits from cpuid 0x8000000a */ 138 uint32_t u32Features; 139 /** Padding. */ 140 bool afPadding[3]; 141 /** Set by us to indicate SVM is supported by the CPU. */ 142 bool fSupported; 143 } svm; 144 } u; 145 /** Maximum allowed ASID/VPID (inclusive). */ 146 uint32_t uMaxAsid; 147 /** MSRs. */ 148 SUPHWVIRTMSRS Msrs; 149 } hwvirt; 150 150 151 151 /** Last recorded error code during HM ring-0 init. */ … … 239 239 240 240 static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 241 bool fEnabledBySystem, void *pvArg)242 { 243 RT_NOREF6(pHostCpu, pVM, pvCpuPage, HCPhysCpuPage, fEnabledBySystem, p vArg);241 bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs) 242 { 243 RT_NOREF6(pHostCpu, pVM, pvCpuPage, HCPhysCpuPage, fEnabledBySystem, pHwvirtMsrs); 244 244 return VINF_SUCCESS; 245 245 } … … 329 329 330 330 /** 331 * Reads all the VMX feature MSRs.332 *333 * @param pVmxMsrs Where to read the VMX MSRs into.334 * @remarks The caller is expected to have verified if this is an Intel CPU and that335 * VMX is present (i.e. SUPR0GetVTSupport() must have returned336 * SUPVTCAPS_VT_X).337 */338 static void hmR0InitIntelReadVmxMsrs(PVMXMSRS pVmxMsrs)339 {340 Assert(pVmxMsrs);341 RT_ZERO(*pVmxMsrs);342 343 /*344 * Note! We assume here that all MSRs are consistent across host CPUs345 * and don't bother with preventing CPU migration.346 */347 348 pVmxMsrs->u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);349 pVmxMsrs->u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);350 pVmxMsrs->PinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);351 pVmxMsrs->ProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);352 pVmxMsrs->ExitCtls.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);353 pVmxMsrs->EntryCtls.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);354 pVmxMsrs->u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);355 pVmxMsrs->u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);356 pVmxMsrs->u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);357 pVmxMsrs->u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);358 pVmxMsrs->u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);359 pVmxMsrs->u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);360 361 if (RT_BF_GET(pVmxMsrs->u64Basic, VMX_BF_BASIC_TRUE_CTLS))362 {363 pVmxMsrs->TruePinCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);364 pVmxMsrs->TrueProcCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);365 pVmxMsrs->TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);366 pVmxMsrs->TrueExitCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);367 }368 369 if (pVmxMsrs->ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)370 {371 pVmxMsrs->ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);372 if (pVmxMsrs->ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))373 pVmxMsrs->u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);374 375 if (pVmxMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)376 pVmxMsrs->u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);377 }378 }379 380 381 /**382 331 * Intel specific initialization code. 383 332 * … … 387 336 { 388 337 /* Read this MSR now as it may be useful for error reporting when initializing VT-x fails. */ 389 g_HmR0. vmx.Msrs.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);338 g_HmR0.hwvirt.Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 390 339 391 340 /* … … 394 343 */ 395 344 int rc = g_HmR0.rcInit = SUPR0EnableVTx(true /* fEnable */); 396 g_HmR0. vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;397 if (g_HmR0. vmx.fUsingSUPR0EnableVTx)345 g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED; 346 if (g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 398 347 { 399 348 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc)); 400 349 if (RT_SUCCESS(rc)) 401 350 { 402 g_HmR0. vmx.fSupported = true;351 g_HmR0.hwvirt.u.vmx.fSupported = true; 403 352 rc = SUPR0EnableVTx(false /* fEnable */); 404 353 AssertLogRelRC(rc); … … 417 366 { 418 367 /* Read CR4 and EFER for logging/diagnostic purposes. */ 419 g_HmR0. vmx.u64HostCr4 = ASMGetCR4();420 g_HmR0. vmx.u64HostEfer = ASMRdMsr(MSR_K6_EFER);421 422 /* Read all the VMX MSRs for determining which VMX features we can use later. */423 hmR0InitIntelReadVmxMsrs(&g_HmR0.vmx.Msrs);368 g_HmR0.hwvirt.u.vmx.u64HostCr4 = ASMGetCR4(); 369 g_HmR0.hwvirt.u.vmx.u64HostEfer = ASMRdMsr(MSR_K6_EFER); 370 371 /* Get VMX MSRs for determining VMX features we can ultimately use. */ 372 SUPR0GetHwvirtMsrs(&g_HmR0.hwvirt.Msrs, SUPVTCAPS_VT_X, false /* fForce */); 424 373 425 374 /* 426 * KVM workaround: Intel SDM section 34.15.5 describes that MSR_IA32_SMM_MONITOR_CTL427 * depends on bit 49 of MSR_IA32_VMX_BASIC while table 35-2 says that this MSR is428 * available if either VMX or SMX is supported.375 * Nested KVM workaround: Intel SDM section 34.15.5 describes that 376 * MSR_IA32_SMM_MONITOR_CTL depends on bit 49 of MSR_IA32_VMX_BASIC while 377 * table 35-2 says that this MSR is available if either VMX or SMX is supported. 429 378 */ 430 if (RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_DUAL_MON)) 431 g_HmR0.vmx.u64HostSmmMonitorCtl = ASMRdMsr(MSR_IA32_SMM_MONITOR_CTL); 379 uint64_t const uVmxBasicMsr = g_HmR0.hwvirt.Msrs.u.vmx.u64Basic; 380 if (RT_BF_GET(uVmxBasicMsr, VMX_BF_BASIC_DUAL_MON)) 381 g_HmR0.hwvirt.u.vmx.u64HostSmmMonitorCtl = ASMRdMsr(MSR_IA32_SMM_MONITOR_CTL); 432 382 433 383 /* Initialize VPID - 16 bits ASID. */ 434 g_HmR0. uMaxAsid = 0x10000; /* exclusive */384 g_HmR0.hwvirt.uMaxAsid = 0x10000; /* exclusive */ 435 385 436 386 /* … … 438 388 * to really verify if VT-x is usable. 439 389 */ 440 if (!g_HmR0. vmx.fUsingSUPR0EnableVTx)390 if (!g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 441 391 { 442 392 /* Allocate a temporary VMXON region. */ … … 453 403 454 404 /* Set revision dword at the beginning of the VMXON structure. */ 455 *(uint32_t *)pvScatchPage = RT_BF_GET( g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);405 *(uint32_t *)pvScatchPage = RT_BF_GET(uVmxBasicMsr, VMX_BF_BASIC_VMCS_ID); 456 406 457 407 /* Make sure we don't get rescheduled to another CPU during this probe. */ … … 459 409 460 410 /* Check CR4.VMXE. */ 461 g_HmR0. vmx.u64HostCr4 = ASMGetCR4();462 if (!(g_HmR0. vmx.u64HostCr4 & X86_CR4_VMXE))411 g_HmR0.hwvirt.u.vmx.u64HostCr4 = ASMGetCR4(); 412 if (!(g_HmR0.hwvirt.u.vmx.u64HostCr4 & X86_CR4_VMXE)) 463 413 { 464 414 /* In theory this bit could be cleared behind our back. Which would cause #UD 465 415 faults when we try to execute the VMX instructions... */ 466 ASMSetCR4(g_HmR0. vmx.u64HostCr4 | X86_CR4_VMXE);416 ASMSetCR4(g_HmR0.hwvirt.u.vmx.u64HostCr4 | X86_CR4_VMXE); 467 417 } 468 418 … … 475 425 if (RT_SUCCESS(rc)) 476 426 { 477 g_HmR0. vmx.fSupported = true;427 g_HmR0.hwvirt.u.vmx.fSupported = true; 478 428 VMXDisable(); 479 429 } … … 492 442 */ 493 443 g_HmR0.rcInit = VERR_VMX_IN_VMX_ROOT_MODE; 494 Assert(g_HmR0. vmx.fSupported == false);444 Assert(g_HmR0.hwvirt.u.vmx.fSupported == false); 495 445 } 496 446 … … 499 449 * set before (some software could incorrectly think it is in VMX mode). 500 450 */ 501 ASMSetCR4(g_HmR0. vmx.u64HostCr4);451 ASMSetCR4(g_HmR0.hwvirt.u.vmx.u64HostCr4); 502 452 ASMSetFlags(fEFlags); 503 453 … … 505 455 } 506 456 507 if (g_HmR0. vmx.fSupported)457 if (g_HmR0.hwvirt.u.vmx.fSupported) 508 458 { 509 459 rc = VMXR0GlobalInit(); … … 528 478 * Timer Does Not Count Down at the Rate Specified" CPU erratum. 529 479 */ 530 if (g_HmR0.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER) 480 uint32_t const fPinCtls = g_HmR0.hwvirt.Msrs.u.vmx.PinCtls.n.allowed1; 481 if (fPinCtls & VMX_PIN_CTLS_PREEMPT_TIMER) 531 482 { 532 g_HmR0.vmx.fUsePreemptTimer = true; 533 g_HmR0.vmx.cPreemptTimerShift = RT_BF_GET(g_HmR0.vmx.Msrs.u64Misc, VMX_BF_MISC_PREEMPT_TIMER_TSC); 483 uint64_t const uVmxMiscMsr = g_HmR0.hwvirt.Msrs.u.vmx.u64Misc; 484 g_HmR0.hwvirt.u.vmx.fUsePreemptTimer = true; 485 g_HmR0.hwvirt.u.vmx.cPreemptTimerShift = RT_BF_GET(uVmxMiscMsr, VMX_BF_MISC_PREEMPT_TIMER_TSC); 534 486 if (hmR0InitIntelIsSubjectToVmxPreemptTimerErratum()) 535 g_HmR0. vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */487 g_HmR0.hwvirt.u.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */ 536 488 } 537 489 } … … 575 527 /* Query AMD features. */ 576 528 uint32_t u32Dummy; 577 ASMCpuId(0x8000000a, &g_HmR0. svm.u32Rev, &g_HmR0.uMaxAsid, &u32Dummy, &g_HmR0.svm.u32Features);529 ASMCpuId(0x8000000a, &g_HmR0.hwvirt.u.svm.u32Rev, &g_HmR0.hwvirt.uMaxAsid, &u32Dummy, &g_HmR0.hwvirt.u.svm.u32Features); 578 530 579 531 /* … … 593 545 if (RT_SUCCESS(rc)) 594 546 { 595 /* Read the HWCR MSR for diagnostics. */ 596 g_HmR0.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR); 597 g_HmR0.svm.fSupported = true; 547 SUPR0GetHwvirtMsrs(&g_HmR0.hwvirt.Msrs, SUPVTCAPS_AMD_V, false /* fForce */); 548 g_HmR0.hwvirt.u.svm.fSupported = true; 598 549 } 599 550 else … … 685 636 * when brought offline/online or suspending/resuming. 686 637 */ 687 if (!g_HmR0. vmx.fUsingSUPR0EnableVTx)638 if (!g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 688 639 { 689 640 rc = RTMpNotificationRegister(hmR0MpEventCallback, NULL); … … 707 658 { 708 659 int rc; 709 if ( g_HmR0. vmx.fSupported710 && g_HmR0. vmx.fUsingSUPR0EnableVTx)660 if ( g_HmR0.hwvirt.u.vmx.fSupported 661 && g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 711 662 { 712 663 /* … … 715 666 Assert(g_HmR0.fGlobalInit); 716 667 717 if (g_HmR0. vmx.fCalledSUPR0EnableVTx)668 if (g_HmR0.hwvirt.u.vmx.fCalledSUPR0EnableVTx) 718 669 { 719 670 rc = SUPR0EnableVTx(false /* fEnable */); 720 g_HmR0. vmx.fCalledSUPR0EnableVTx = false;671 g_HmR0.hwvirt.u.vmx.fCalledSUPR0EnableVTx = false; 721 672 } 722 673 else … … 731 682 else 732 683 { 733 Assert(!g_HmR0. vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);684 Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 734 685 735 686 /* Doesn't really matter if this fails. */ … … 778 729 * should move into their respective modules. */ 779 730 /* Finally, call global VT-x/AMD-V termination. */ 780 if (g_HmR0. vmx.fSupported)731 if (g_HmR0.hwvirt.u.vmx.fSupported) 781 732 VMXR0GlobalTerm(); 782 else if (g_HmR0. svm.fSupported)733 else if (g_HmR0.hwvirt.u.svm.fSupported) 783 734 SVMR0GlobalTerm(); 784 735 … … 849 800 850 801 int rc; 851 if (g_HmR0.vmx.fSupported && g_HmR0.vmx.fUsingSUPR0EnableVTx) 852 rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HmR0.vmx.Msrs); 802 if ( g_HmR0.hwvirt.u.vmx.fSupported 803 && g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 804 rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HmR0.hwvirt.Msrs); 853 805 else 854 806 { 855 807 AssertLogRelMsgReturn(pHostCpu->hMemObj != NIL_RTR0MEMOBJ, ("hmR0EnableCpu failed idCpu=%u.\n", idCpu), VERR_HM_IPE_1); 856 if (g_HmR0.vmx.fSupported) 857 rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, &g_HmR0.vmx.Msrs); 858 else 859 rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, NULL /* pvArg */); 808 rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, &g_HmR0.hwvirt.Msrs); 860 809 } 861 810 if (RT_SUCCESS(rc)) 862 811 pHostCpu->fConfigured = true; 863 864 812 return rc; 865 813 } … … 925 873 926 874 int rc; 927 if ( g_HmR0. vmx.fSupported928 && g_HmR0. vmx.fUsingSUPR0EnableVTx)875 if ( g_HmR0.hwvirt.u.vmx.fSupported 876 && g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 929 877 { 930 878 /* … … 934 882 if (RT_SUCCESS(rc)) 935 883 { 936 g_HmR0. vmx.fCalledSUPR0EnableVTx = true;884 g_HmR0.hwvirt.u.vmx.fCalledSUPR0EnableVTx = true; 937 885 /* If the host provides a VT-x init API, then we'll rely on that for global init. */ 938 886 g_HmR0.fGlobalInit = pVM->hm.s.fGlobalInit = true; … … 1029 977 PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[idCpu]; 1030 978 1031 Assert(!g_HmR0. vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);979 Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 1032 980 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1033 981 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */ … … 1098 1046 { 1099 1047 NOREF(pvData); 1100 Assert(!g_HmR0. vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);1048 Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 1101 1049 1102 1050 /* … … 1139 1087 { 1140 1088 NOREF(pvUser); 1141 Assert(!g_HmR0. vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);1089 Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 1142 1090 1143 1091 #ifdef LOG_ENABLED … … 1171 1119 /* Reinit the CPUs from scratch as the suspend state might have 1172 1120 messed with the MSRs. (lousy BIOSes as usual) */ 1173 if (g_HmR0. vmx.fSupported)1121 if (g_HmR0.hwvirt.u.vmx.fSupported) 1174 1122 rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL); 1175 1123 else … … 1198 1146 1199 1147 /** 1200 * Pre-initializes ring-0 HM per-VM structures. 1201 * 1202 * This is the first HM ring-0 function to be called when a VM is created. It is 1203 * called after VT-x/AMD-V has been detected, and initialized and -after- HM's CFGM 1204 * settings have been queried. 1205 * 1206 * This copies relevant, global HM structures into per-VM data and initializes some 1207 * per-VCPU data. 1148 * Does ring-0 per-VM HM initialization. 1149 * 1150 * This will call the CPU specific init. routine which may initialize and allocate 1151 * resources for virtual CPUs. 1208 1152 * 1209 1153 * @returns VBox status code. 1210 1154 * @param pVM The cross context VM structure. 1211 1155 * 1212 * @remarks This is called during HMR3Init(). Be really careful what we call here as1213 * almost no VM machinery is up at this point (e.g. PGM, CPUM).1214 */ 1215 VMMR0_INT_DECL(int) HMR0 PreInitVM(PVM pVM)1156 * @remarks This is called after HMR3Init(), see vmR3CreateU() and 1157 * vmR3InitRing3(). 1158 */ 1159 VMMR0_INT_DECL(int) HMR0InitVM(PVM pVM) 1216 1160 { 1217 1161 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1162 1163 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */ 1164 if (ASMAtomicReadBool(&g_HmR0.fSuspended)) 1165 return VERR_HM_SUSPEND_PENDING; 1218 1166 1219 1167 /* 1220 1168 * Copy globals to the VM structure. 1221 1169 */ 1222 pVM->hm.s.vmx.fSupported = g_HmR0.vmx.fSupported;1223 pVM->hm.s.svm.fSupported = g_HmR0.svm.fSupported;1224 1170 Assert(!(pVM->hm.s.vmx.fSupported && pVM->hm.s.svm.fSupported)); 1225 1171 if (pVM->hm.s.vmx.fSupported) 1226 1172 { 1227 pVM->hm.s.vmx.fUsePreemptTimer &= g_HmR0. vmx.fUsePreemptTimer; /* Can be overridden by CFGM. See HMR3Init(). */1228 pVM->hm.s.vmx.cPreemptTimerShift = g_HmR0. vmx.cPreemptTimerShift;1229 pVM->hm.s.vmx.u64HostCr4 = g_HmR0. vmx.u64HostCr4;1230 pVM->hm.s.vmx.u64HostEfer = g_HmR0. vmx.u64HostEfer;1231 pVM->hm.s.vmx.u64HostSmmMonitorCtl = g_HmR0. vmx.u64HostSmmMonitorCtl;1232 pVM->hm.s.vmx.Msrs = g_HmR0. vmx.Msrs;1173 pVM->hm.s.vmx.fUsePreemptTimer &= g_HmR0.hwvirt.u.vmx.fUsePreemptTimer; /* Can be overridden by CFGM see HMR3Init(). */ 1174 pVM->hm.s.vmx.cPreemptTimerShift = g_HmR0.hwvirt.u.vmx.cPreemptTimerShift; 1175 pVM->hm.s.vmx.u64HostCr4 = g_HmR0.hwvirt.u.vmx.u64HostCr4; 1176 pVM->hm.s.vmx.u64HostEfer = g_HmR0.hwvirt.u.vmx.u64HostEfer; 1177 pVM->hm.s.vmx.u64HostSmmMonitorCtl = g_HmR0.hwvirt.u.vmx.u64HostSmmMonitorCtl; 1178 pVM->hm.s.vmx.Msrs = g_HmR0.hwvirt.Msrs.u.vmx; 1233 1179 } 1234 1180 else if (pVM->hm.s.svm.fSupported) 1235 1181 { 1236 pVM->hm.s.svm.u 64MsrHwcr = g_HmR0.svm.u64MsrHwcr;1237 pVM->hm.s.svm.u32 Rev = g_HmR0.svm.u32Rev;1238 pVM->hm.s.svm.u 32Features = g_HmR0.svm.u32Features;1182 pVM->hm.s.svm.u32Rev = g_HmR0.hwvirt.u.svm.u32Rev; 1183 pVM->hm.s.svm.u32Features = g_HmR0.hwvirt.u.svm.u32Features; 1184 pVM->hm.s.svm.u64MsrHwcr = g_HmR0.hwvirt.Msrs.u.svm.u64MsrHwcr; 1239 1185 } 1240 1186 pVM->hm.s.rcInit = g_HmR0.rcInit; 1241 pVM->hm.s.uMaxAsid = g_HmR0. uMaxAsid;1187 pVM->hm.s.uMaxAsid = g_HmR0.hwvirt.uMaxAsid; 1242 1188 1243 1189 /* … … 1264 1210 AssertReturn(!pVCpu->hm.s.uCurrentAsid, VERR_HM_IPE_3); 1265 1211 } 1266 1267 return VINF_SUCCESS;1268 }1269 1270 1271 /**1272 * Does ring-0 per-VM HM initialization.1273 *1274 * This will call the CPU specific init. routine which may initialize and allocate1275 * resources for virtual CPUs.1276 *1277 * @returns VBox status code.1278 * @param pVM The cross context VM structure.1279 *1280 * @remarks This is called after HMR3Init(), see vmR3CreateU() and1281 * vmR3InitRing3().1282 */1283 VMMR0_INT_DECL(int) HMR0InitVM(PVM pVM)1284 {1285 AssertReturn(pVM, VERR_INVALID_PARAMETER);1286 1287 /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */1288 if (ASMAtomicReadBool(&g_HmR0.fSuspended))1289 return VERR_HM_SUSPEND_PENDING;1290 1212 1291 1213 /* … … 1361 1283 if (!g_HmR0.fGlobalInit) 1362 1284 { 1363 Assert(!g_HmR0. vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);1285 Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 1364 1286 rc = hmR0EnableCpu(pVM, idCpu); 1365 1287 if (RT_FAILURE(rc)) … … 1376 1298 if (!g_HmR0.fGlobalInit) 1377 1299 { 1378 Assert(!g_HmR0. vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);1300 Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 1379 1301 int rc2 = hmR0DisableCpu(idCpu); 1380 1302 AssertRC(rc2); … … 1409 1331 1410 1332 /* Reload host-state (back from ring-3/migrated CPUs) and shared guest/host bits. */ 1411 if (g_HmR0. vmx.fSupported)1333 if (g_HmR0.hwvirt.u.vmx.fSupported) 1412 1334 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE; 1413 1335 else … … 1446 1368 PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[idCpu]; 1447 1369 Assert(pHostCpu); 1448 if (g_HmR0. vmx.fSupported)1370 if (g_HmR0.hwvirt.u.vmx.fSupported) 1449 1371 { 1450 1372 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) … … 1730 1652 1731 1653 /* No such issues with AMD-V */ 1732 if (!g_HmR0. vmx.fSupported)1654 if (!g_HmR0.hwvirt.u.vmx.fSupported) 1733 1655 return VINF_SUCCESS; 1734 1656 … … 1752 1674 /* When using SUPR0EnableVTx we must let the host suspend and resume VT-x, 1753 1675 regardless of whether we're currently using VT-x or not. */ 1754 if (g_HmR0. vmx.fUsingSUPR0EnableVTx)1676 if (g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 1755 1677 { 1756 1678 *pfVTxDisabled = SUPR0SuspendVTxOnCpu(); … … 1796 1718 return; /* nothing to do */ 1797 1719 1798 Assert(g_HmR0. vmx.fSupported);1799 if (g_HmR0. vmx.fUsingSUPR0EnableVTx)1720 Assert(g_HmR0.hwvirt.u.vmx.fSupported); 1721 if (g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 1800 1722 SUPR0ResumeVTxOnCpu(fVTxDisabled); 1801 1723 else … … 1810 1732 && pHostCpu->HCPhysMemObj != NIL_RTHCPHYS); 1811 1733 1812 VMXR0EnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, &g_HmR0. vmx.Msrs);1734 VMXR0EnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, &g_HmR0.hwvirt.Msrs); 1813 1735 } 1814 1736 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r76402 r76464 525 525 * @param HCPhysCpuPage Physical address of the global CPU page. 526 526 * @param fEnabledByHost Whether the host OS has already initialized AMD-V. 527 * @param pvArg Unused on AMD-V. 527 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs (currently 528 * unused). 528 529 */ 529 530 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 530 void *pvArg)531 PCSUPHWVIRTMSRS pHwvirtMsrs) 531 532 { 532 533 Assert(!fEnabledByHost); … … 536 537 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 537 538 538 NOREF(pvArg); 539 NOREF(fEnabledByHost); 539 RT_NOREF2(fEnabledByHost, pHwvirtMsrs); 540 540 541 541 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */ -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r73606 r76464 39 39 VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 40 40 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, 41 bool fEnabledBySystem, void *pvArg);41 bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs); 42 42 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 43 43 VMMR0DECL(int) SVMR0InitVM(PVM pVM); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r76397 r76464 224 224 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \ 225 225 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \ 226 NOREF(uXcptTmp); \ 226 227 return VINF_SUCCESS; \ 227 228 } \ … … 1127 1128 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to 1128 1129 * enable VT-x on the host. 1129 * @param p vMsrs Opaque pointer to VMXMSRS struct.1130 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs. 1130 1131 */ 1131 1132 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 1132 void *pvMsrs)1133 PCSUPHWVIRTMSRS pHwvirtMsrs) 1133 1134 { 1134 1135 Assert(pHostCpu); 1135 Assert(p vMsrs);1136 Assert(pHwvirtMsrs); 1136 1137 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1137 1138 … … 1149 1150 * invalidated when flushing by VPID. 1150 1151 */ 1151 P VMXMSRS pMsrs = (PVMXMSRS)pvMsrs;1152 PCVMXMSRS pMsrs = &pHwvirtMsrs->u.vmx; 1152 1153 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS) 1153 1154 { -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r72967 r76464 32 32 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 33 33 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, 34 bool fEnabledBySystem, void *pvMsrs);34 bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs); 35 35 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 36 36 VMMR0DECL(int) VMXR0GlobalInit(void); -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r76290 r76464 1930 1930 1931 1931 /* 1932 * Pre-initialize hardware-assisted mode per-VM data.1933 */1934 case VMMR0_DO_HM_PRE_INIT:1935 rc = HMR0PreInitVM(pVM);1936 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);1937 break;1938 1939 /*1940 1932 * Switch to RC to execute Hypervisor function. 1941 1933 */ -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r76310 r76464 788 788 for (VMCPUID i = 0; i < pVM->cCpus; i++) 789 789 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_FFXSR_LEAKY; 790 Log(("CPUM R3Init: host CPU has leaky fxsave/fxrstor behaviour\n"));790 Log(("CPUM: Host CPU has leaky fxsave/fxrstor behaviour\n")); 791 791 } 792 792 } … … 902 902 903 903 /** 904 * Initializes (or re-initializes)per-VCPU SVM hardware virtualization state.904 * Resets per-VCPU SVM hardware virtualization state. 905 905 * 906 906 * @param pVCpu The cross context virtual CPU structure. 907 907 */ 908 DECLINLINE(void) cpumR3 InitSvmHwVirtState(PVMCPU pVCpu)908 DECLINLINE(void) cpumR3ResetSvmHwVirtState(PVMCPU pVCpu) 909 909 { 910 910 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; … … 1114 1114 1115 1115 /** 1116 * Initializes (or re-initializes)per-VCPU VMX hardware virtualization state.1116 * Resets per-VCPU VMX hardware virtualization state. 1117 1117 * 1118 1118 * @param pVCpu The cross context virtual CPU structure. 1119 1119 */ 1120 DECLINLINE(void) cpumR3 InitVmxHwVirtState(PVMCPU pVCpu)1120 DECLINLINE(void) cpumR3ResetVmxHwVirtState(PVMCPU pVCpu) 1121 1121 { 1122 1122 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; … … 1157 1157 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n"); 1158 1158 VMXFEATDUMP("VMX - Virtual-Machine Extensions ", fVmx); 1159 if (!pGuestFeatures->fVmx)1160 return;1161 1159 /* Basic. */ 1162 1160 VMXFEATDUMP("InsOutInfo - INS/OUTS instruction info. ", fVmxInsOutInfo); … … 1250 1248 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API) 1251 1249 return true; 1250 #else 1251 NOREF(pVM); 1252 1252 #endif 1253 1253 return false; … … 1256 1256 1257 1257 /** 1258 * Initializes the guest VMX MSRs from guest-CPU features. 1259 * 1260 * @param pVM The cross context VM structure. 1261 */ 1262 static void cpumR3InitGuestVmxMsrs(PVM pVM) 1263 { 1264 PVMCPU pVCpu0 = &pVM->aCpus[0]; 1265 PCCPUMFEATURES pFeatures = &pVM->cpum.s.GuestFeatures; 1266 PVMXMSRS pVmxMsrs = &pVCpu0->cpum.s.Guest.hwvirt.vmx.Msrs; 1267 1268 Assert(pFeatures->fVmx); 1269 RT_ZERO(*pVmxMsrs); 1270 1271 /* Feature control. */ 1272 pVmxMsrs->u64FeatCtrl = MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON; 1273 1274 /* Basic information. */ 1275 { 1276 uint64_t const u64Basic = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID, VMX_V_VMCS_REVISION_ID ) 1277 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE, VMX_V_VMCS_SIZE ) 1278 | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH, !pFeatures->fLongMode ) 1279 | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON, 0 ) 1280 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE, VMX_BASIC_MEM_TYPE_WB ) 1281 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS, pFeatures->fVmxInsOutInfo) 1282 | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS, 0 ); 1283 pVmxMsrs->u64Basic = u64Basic; 1284 } 1285 1286 /* Pin-based VM-execution controls. */ 1287 { 1288 uint32_t const fFeatures = (pFeatures->fVmxExtIntExit << VMX_BF_PIN_CTLS_EXT_INT_EXIT_SHIFT ) 1289 | (pFeatures->fVmxNmiExit << VMX_BF_PIN_CTLS_NMI_EXIT_SHIFT ) 1290 | (pFeatures->fVmxVirtNmi << VMX_BF_PIN_CTLS_VIRT_NMI_SHIFT ) 1291 | (pFeatures->fVmxPreemptTimer << VMX_BF_PIN_CTLS_PREEMPT_TIMER_SHIFT) 1292 | (pFeatures->fVmxPostedInt << VMX_BF_PIN_CTLS_POSTED_INT_SHIFT ); 1293 uint32_t const fAllowed0 = VMX_PIN_CTLS_DEFAULT1; 1294 uint32_t const fAllowed1 = fFeatures | VMX_PIN_CTLS_DEFAULT1; 1295 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", 1296 fAllowed0, fAllowed1, fFeatures)); 1297 pVmxMsrs->PinCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1); 1298 } 1299 1300 /* Processor-based VM-execution controls. */ 1301 { 1302 uint32_t const fFeatures = (pFeatures->fVmxIntWindowExit << VMX_BF_PROC_CTLS_INT_WINDOW_EXIT_SHIFT ) 1303 | (pFeatures->fVmxTscOffsetting << VMX_BF_PROC_CTLS_USE_TSC_OFFSETTING_SHIFT) 1304 | (pFeatures->fVmxHltExit << VMX_BF_PROC_CTLS_HLT_EXIT_SHIFT ) 1305 | (pFeatures->fVmxInvlpgExit << VMX_BF_PROC_CTLS_INVLPG_EXIT_SHIFT ) 1306 | (pFeatures->fVmxMwaitExit << VMX_BF_PROC_CTLS_MWAIT_EXIT_SHIFT ) 1307 | (pFeatures->fVmxRdpmcExit << VMX_BF_PROC_CTLS_RDPMC_EXIT_SHIFT ) 1308 | (pFeatures->fVmxRdtscExit << VMX_BF_PROC_CTLS_RDTSC_EXIT_SHIFT ) 1309 | (pFeatures->fVmxCr3LoadExit << VMX_BF_PROC_CTLS_CR3_LOAD_EXIT_SHIFT ) 1310 | (pFeatures->fVmxCr3StoreExit << VMX_BF_PROC_CTLS_CR3_STORE_EXIT_SHIFT ) 1311 | (pFeatures->fVmxCr8LoadExit << VMX_BF_PROC_CTLS_CR8_LOAD_EXIT_SHIFT ) 1312 | (pFeatures->fVmxCr8StoreExit << VMX_BF_PROC_CTLS_CR8_STORE_EXIT_SHIFT ) 1313 | (pFeatures->fVmxUseTprShadow << VMX_BF_PROC_CTLS_USE_TPR_SHADOW_SHIFT ) 1314 | (pFeatures->fVmxNmiWindowExit << VMX_BF_PROC_CTLS_NMI_WINDOW_EXIT_SHIFT ) 1315 | (pFeatures->fVmxMovDRxExit << VMX_BF_PROC_CTLS_MOV_DR_EXIT_SHIFT ) 1316 | (pFeatures->fVmxUncondIoExit << VMX_BF_PROC_CTLS_UNCOND_IO_EXIT_SHIFT ) 1317 | (pFeatures->fVmxUseIoBitmaps << VMX_BF_PROC_CTLS_USE_IO_BITMAPS_SHIFT ) 1318 | (pFeatures->fVmxMonitorTrapFlag << VMX_BF_PROC_CTLS_MONITOR_TRAP_FLAG_SHIFT ) 1319 | (pFeatures->fVmxUseMsrBitmaps << VMX_BF_PROC_CTLS_USE_MSR_BITMAPS_SHIFT ) 1320 | (pFeatures->fVmxMonitorExit << VMX_BF_PROC_CTLS_MONITOR_EXIT_SHIFT ) 1321 | (pFeatures->fVmxPauseExit << VMX_BF_PROC_CTLS_PAUSE_EXIT_SHIFT ) 1322 | (pFeatures->fVmxSecondaryExecCtls << VMX_BF_PROC_CTLS_USE_SECONDARY_CTLS_SHIFT); 1323 uint32_t const fAllowed0 = VMX_PROC_CTLS_DEFAULT1; 1324 uint32_t const fAllowed1 = fFeatures | VMX_PROC_CTLS_DEFAULT1; 1325 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0, 1326 fAllowed1, fFeatures)); 1327 pVmxMsrs->ProcCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1); 1328 } 1329 1330 /* Secondary processor-based VM-execution controls. */ 1331 if (pFeatures->fVmxSecondaryExecCtls) 1332 { 1333 uint32_t const fFeatures = (pFeatures->fVmxVirtApicAccess << VMX_BF_PROC_CTLS2_VIRT_APIC_ACCESS_SHIFT ) 1334 | (pFeatures->fVmxEpt << VMX_BF_PROC_CTLS2_EPT_SHIFT ) 1335 | (pFeatures->fVmxDescTableExit << VMX_BF_PROC_CTLS2_DESC_TABLE_EXIT_SHIFT ) 1336 | (pFeatures->fVmxRdtscp << VMX_BF_PROC_CTLS2_RDTSCP_SHIFT ) 1337 | (pFeatures->fVmxVirtX2ApicMode << VMX_BF_PROC_CTLS2_VIRT_X2APIC_MODE_SHIFT ) 1338 | (pFeatures->fVmxVpid << VMX_BF_PROC_CTLS2_VPID_SHIFT ) 1339 | (pFeatures->fVmxWbinvdExit << VMX_BF_PROC_CTLS2_WBINVD_EXIT_SHIFT ) 1340 | (pFeatures->fVmxUnrestrictedGuest << VMX_BF_PROC_CTLS2_UNRESTRICTED_GUEST_SHIFT) 1341 | (pFeatures->fVmxApicRegVirt << VMX_BF_PROC_CTLS2_APIC_REG_VIRT_SHIFT ) 1342 | (pFeatures->fVmxVirtIntDelivery << VMX_BF_PROC_CTLS2_VIRT_INT_DELIVERY_SHIFT ) 1343 | (pFeatures->fVmxPauseLoopExit << VMX_BF_PROC_CTLS2_PAUSE_LOOP_EXIT_SHIFT ) 1344 | (pFeatures->fVmxRdrandExit << VMX_BF_PROC_CTLS2_RDRAND_EXIT_SHIFT ) 1345 | (pFeatures->fVmxInvpcid << VMX_BF_PROC_CTLS2_INVPCID_SHIFT ) 1346 | (pFeatures->fVmxVmFunc << VMX_BF_PROC_CTLS2_VMFUNC_SHIFT ) 1347 | (pFeatures->fVmxVmcsShadowing << VMX_BF_PROC_CTLS2_VMCS_SHADOWING_SHIFT ) 1348 | (pFeatures->fVmxRdseedExit << VMX_BF_PROC_CTLS2_RDSEED_EXIT_SHIFT ) 1349 | (pFeatures->fVmxPml << VMX_BF_PROC_CTLS2_PML_SHIFT ) 1350 | (pFeatures->fVmxEptXcptVe << VMX_BF_PROC_CTLS2_EPT_VE_SHIFT ) 1351 | (pFeatures->fVmxXsavesXrstors << VMX_BF_PROC_CTLS2_XSAVES_XRSTORS_SHIFT ) 1352 | (pFeatures->fVmxUseTscScaling << VMX_BF_PROC_CTLS2_TSC_SCALING_SHIFT ); 1353 uint32_t const fAllowed0 = 0; 1354 uint32_t const fAllowed1 = fFeatures; 1355 pVmxMsrs->ProcCtls2.u = RT_MAKE_U64(fAllowed0, fAllowed1); 1356 } 1357 1358 /* VM-exit controls. */ 1359 { 1360 uint32_t const fFeatures = (pFeatures->fVmxExitSaveDebugCtls << VMX_BF_EXIT_CTLS_SAVE_DEBUG_SHIFT ) 1361 | (pFeatures->fVmxHostAddrSpaceSize << VMX_BF_EXIT_CTLS_HOST_ADDR_SPACE_SIZE_SHIFT) 1362 | (pFeatures->fVmxExitAckExtInt << VMX_BF_EXIT_CTLS_ACK_EXT_INT_SHIFT ) 1363 | (pFeatures->fVmxExitSavePatMsr << VMX_BF_EXIT_CTLS_SAVE_PAT_MSR_SHIFT ) 1364 | (pFeatures->fVmxExitLoadPatMsr << VMX_BF_EXIT_CTLS_LOAD_PAT_MSR_SHIFT ) 1365 | (pFeatures->fVmxExitSaveEferMsr << VMX_BF_EXIT_CTLS_SAVE_EFER_MSR_SHIFT ) 1366 | (pFeatures->fVmxExitLoadEferMsr << VMX_BF_EXIT_CTLS_LOAD_EFER_MSR_SHIFT ) 1367 | (pFeatures->fVmxSavePreemptTimer << VMX_BF_EXIT_CTLS_SAVE_PREEMPT_TIMER_SHIFT ); 1368 /* Set the default1 class bits. See Intel spec. A.4 "VM-exit Controls". */ 1369 uint32_t const fAllowed0 = VMX_EXIT_CTLS_DEFAULT1; 1370 uint32_t const fAllowed1 = fFeatures | VMX_EXIT_CTLS_DEFAULT1; 1371 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0, 1372 fAllowed1, fFeatures)); 1373 pVmxMsrs->ExitCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1); 1374 } 1375 1376 /* VM-entry controls. */ 1377 { 1378 uint32_t const fFeatures = (pFeatures->fVmxEntryLoadDebugCtls << VMX_BF_ENTRY_CTLS_LOAD_DEBUG_SHIFT ) 1379 | (pFeatures->fVmxIa32eModeGuest << VMX_BF_ENTRY_CTLS_IA32E_MODE_GUEST_SHIFT) 1380 | (pFeatures->fVmxEntryLoadEferMsr << VMX_BF_ENTRY_CTLS_LOAD_EFER_MSR_SHIFT ) 1381 | (pFeatures->fVmxEntryLoadPatMsr << VMX_BF_ENTRY_CTLS_LOAD_PAT_MSR_SHIFT ); 1382 uint32_t const fAllowed0 = VMX_ENTRY_CTLS_DEFAULT1; 1383 uint32_t const fAllowed1 = fFeatures | VMX_ENTRY_CTLS_DEFAULT1; 1384 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed0=%#RX32 fFeatures=%#RX32\n", fAllowed0, 1385 fAllowed1, fFeatures)); 1386 pVmxMsrs->EntryCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1); 1387 } 1388 1389 /* Miscellaneous data. */ 1390 { 1391 uint64_t uHostMsr = 0; 1392 if (cpumR3IsHwAssistVmxNstGstExecAllowed(pVM)) 1393 HMVmxGetHostMsr(pVM, MSR_IA32_VMX_MISC, &uHostMsr); 1394 uint8_t const cMaxMsrs = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_AUTOMSR_COUNT_MAX); 1395 uint8_t const fActivityState = RT_BF_GET(uHostMsr, VMX_BF_MISC_ACTIVITY_STATES) & VMX_V_GUEST_ACTIVITY_STATE_MASK; 1396 pVmxMsrs->u64Misc = RT_BF_MAKE(VMX_BF_MISC_PREEMPT_TIMER_TSC, VMX_V_PREEMPT_TIMER_SHIFT ) 1397 | RT_BF_MAKE(VMX_BF_MISC_EXIT_SAVE_EFER_LMA, pFeatures->fVmxExitSaveEferLma ) 1398 | RT_BF_MAKE(VMX_BF_MISC_ACTIVITY_STATES, fActivityState ) 1399 | RT_BF_MAKE(VMX_BF_MISC_INTEL_PT, pFeatures->fVmxIntelPt ) 1400 | RT_BF_MAKE(VMX_BF_MISC_SMM_READ_SMBASE_MSR, 0 ) 1401 | RT_BF_MAKE(VMX_BF_MISC_CR3_TARGET, VMX_V_CR3_TARGET_COUNT ) 1402 | RT_BF_MAKE(VMX_BF_MISC_MAX_MSRS, cMaxMsrs ) 1403 | RT_BF_MAKE(VMX_BF_MISC_VMXOFF_BLOCK_SMI, 0 ) 1404 | RT_BF_MAKE(VMX_BF_MISC_VMWRITE_ALL, pFeatures->fVmxVmwriteAll ) 1405 | RT_BF_MAKE(VMX_BF_MISC_ENTRY_INJECT_SOFT_INT, pFeatures->fVmxEntryInjectSoftInt) 1406 | RT_BF_MAKE(VMX_BF_MISC_MSEG_ID, VMX_V_MSEG_REV_ID ); 1407 } 1408 1409 /* CR0 Fixed-0. */ 1410 pVmxMsrs->u64Cr0Fixed0 = pFeatures->fVmxUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX: VMX_V_CR0_FIXED0; 1411 1412 /* CR0 Fixed-1. */ 1413 { 1414 uint64_t uHostMsr = 0; 1415 if (cpumR3IsHwAssistVmxNstGstExecAllowed(pVM)) 1416 HMVmxGetHostMsr(pVM, MSR_IA32_VMX_CR0_FIXED1, &uHostMsr); 1417 pVmxMsrs->u64Cr0Fixed1 = uHostMsr | VMX_V_CR0_FIXED0; /* Make sure the CR0 MB1 bits are not clear. */ 1418 } 1419 1420 /* CR4 Fixed-0. */ 1421 pVmxMsrs->u64Cr4Fixed0 = VMX_V_CR4_FIXED0; 1422 1423 /* CR4 Fixed-1. */ 1424 { 1425 uint64_t uHostMsr = 0; 1426 if (cpumR3IsHwAssistVmxNstGstExecAllowed(pVM)) 1427 HMVmxGetHostMsr(pVM, MSR_IA32_VMX_CR4_FIXED1, &uHostMsr); 1428 pVmxMsrs->u64Cr4Fixed1 = uHostMsr | VMX_V_CR4_FIXED0; /* Make sure the CR4 MB1 bits are not clear. */ 1429 } 1430 1431 /* VMCS Enumeration. */ 1432 pVmxMsrs->u64VmcsEnum = VMX_V_VMCS_MAX_INDEX << VMX_BF_VMCS_ENUM_HIGHEST_IDX_SHIFT; 1433 1434 /* VM Functions. */ 1435 if (pFeatures->fVmxVmFunc) 1436 pVmxMsrs->u64VmFunc = RT_BF_MAKE(VMX_BF_VMFUNC_EPTP_SWITCHING, 1); 1258 * Initializes the VMX guest MSRs from guest CPU features based on the host MSRs. 1259 * 1260 * @param pVM The cross context VM structure. 1261 * @param pHostVmxMsrs The host VMX MSRs. Pass NULL when fully emulating VMX 1262 * and no hardware-assisted nested-guest execution is 1263 * possible for this VM. 1264 * @param pGuestFeatures The guest features to use (only VMX features are 1265 * accessed). 1266 * @param pGuestVmxMsrs Where to store the initialized guest VMX MSRs. 1267 * 1268 * @remarks This function ASSUMES the VMX guest-features are already exploded! 1269 */ 1270 static void cpumR3InitVmxGuestMsrs(PVM pVM, PCVMXMSRS pHostVmxMsrs, PCCPUMFEATURES pGuestFeatures, PVMXMSRS pGuestVmxMsrs) 1271 { 1272 Assert(!cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) || pHostVmxMsrs); 1273 Assert(pGuestFeatures->fVmx); 1437 1274 1438 1275 /* … … 1445 1282 */ 1446 1283 1447 /* 1448 * Copy the MSRs values initialized in VCPU 0 to all other VCPUs. 1449 */ 1450 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++) 1451 { 1452 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 1453 Assert(pVCpu); 1454 memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs)); 1455 } 1456 } 1457 1458 1459 /** 1460 * Explode VMX features from the provided MSRs. 1461 * 1462 * @param pVmxMsrs Pointer to the VMX MSRs. 1463 * @param pFeatures Pointer to the features struct. to populate. 1464 */ 1465 static void cpumR3ExplodeVmxFeatures(PCVMXMSRS pVmxMsrs, PCPUMFEATURES pFeatures) 1466 { 1467 Assert(pVmxMsrs); 1468 Assert(pFeatures); 1469 Assert(pFeatures->fVmx); 1284 /* Feature control. */ 1285 pGuestVmxMsrs->u64FeatCtrl = MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON; 1470 1286 1471 1287 /* Basic information. */ 1472 1288 { 1473 uint64_t const u64Basic = pVmxMsrs->u64Basic; 1474 pFeatures->fVmxInsOutInfo = RT_BF_GET(u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS); 1289 uint64_t const u64Basic = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID, VMX_V_VMCS_REVISION_ID ) 1290 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE, VMX_V_VMCS_SIZE ) 1291 | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH, !pGuestFeatures->fLongMode ) 1292 | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON, 0 ) 1293 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE, VMX_BASIC_MEM_TYPE_WB ) 1294 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS, pGuestFeatures->fVmxInsOutInfo) 1295 | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS, 0 ); 1296 pGuestVmxMsrs->u64Basic = u64Basic; 1475 1297 } 1476 1298 1477 1299 /* Pin-based VM-execution controls. */ 1478 1300 { 1479 uint32_t const fPinCtls = pVmxMsrs->PinCtls.n.allowed1; 1480 pFeatures->fVmxExtIntExit = RT_BOOL(fPinCtls & VMX_PIN_CTLS_EXT_INT_EXIT); 1481 pFeatures->fVmxNmiExit = RT_BOOL(fPinCtls & VMX_PIN_CTLS_NMI_EXIT); 1482 pFeatures->fVmxVirtNmi = RT_BOOL(fPinCtls & VMX_PIN_CTLS_VIRT_NMI); 1483 pFeatures->fVmxPreemptTimer = RT_BOOL(fPinCtls & VMX_PIN_CTLS_PREEMPT_TIMER); 1484 pFeatures->fVmxPostedInt = RT_BOOL(fPinCtls & VMX_PIN_CTLS_POSTED_INT); 1301 uint32_t const fFeatures = (pGuestFeatures->fVmxExtIntExit << VMX_BF_PIN_CTLS_EXT_INT_EXIT_SHIFT ) 1302 | (pGuestFeatures->fVmxNmiExit << VMX_BF_PIN_CTLS_NMI_EXIT_SHIFT ) 1303 | (pGuestFeatures->fVmxVirtNmi << VMX_BF_PIN_CTLS_VIRT_NMI_SHIFT ) 1304 | (pGuestFeatures->fVmxPreemptTimer << VMX_BF_PIN_CTLS_PREEMPT_TIMER_SHIFT) 1305 | (pGuestFeatures->fVmxPostedInt << VMX_BF_PIN_CTLS_POSTED_INT_SHIFT ); 1306 uint32_t const fAllowed0 = VMX_PIN_CTLS_DEFAULT1; 1307 uint32_t const fAllowed1 = fFeatures | VMX_PIN_CTLS_DEFAULT1; 1308 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", 1309 fAllowed0, fAllowed1, fFeatures)); 1310 pGuestVmxMsrs->PinCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1); 1485 1311 } 1486 1312 1487 1313 /* Processor-based VM-execution controls. */ 1488 1314 { 1489 uint32_t const fProcCtls = pVmxMsrs->ProcCtls.n.allowed1; 1490 pFeatures->fVmxIntWindowExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT); 1491 pFeatures->fVmxTscOffsetting = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING); 1492 pFeatures->fVmxHltExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_HLT_EXIT); 1493 pFeatures->fVmxInvlpgExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_INVLPG_EXIT); 1494 pFeatures->fVmxMwaitExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MWAIT_EXIT); 1495 pFeatures->fVmxRdpmcExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_RDPMC_EXIT); 1496 pFeatures->fVmxRdtscExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_RDTSC_EXIT); 1497 pFeatures->fVmxCr3LoadExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT); 1498 pFeatures->fVmxCr3StoreExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT); 1499 pFeatures->fVmxCr8LoadExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT); 1500 pFeatures->fVmxCr8StoreExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT); 1501 pFeatures->fVmxUseTprShadow = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 1502 pFeatures->fVmxNmiWindowExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT); 1503 pFeatures->fVmxMovDRxExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT); 1504 pFeatures->fVmxUncondIoExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT); 1505 pFeatures->fVmxUseIoBitmaps = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS); 1506 pFeatures->fVmxMonitorTrapFlag = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG); 1507 pFeatures->fVmxUseMsrBitmaps = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS); 1508 pFeatures->fVmxMonitorExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MONITOR_EXIT); 1509 pFeatures->fVmxPauseExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_PAUSE_EXIT); 1510 pFeatures->fVmxSecondaryExecCtls = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS); 1315 uint32_t const fFeatures = (pGuestFeatures->fVmxIntWindowExit << VMX_BF_PROC_CTLS_INT_WINDOW_EXIT_SHIFT ) 1316 | (pGuestFeatures->fVmxTscOffsetting << VMX_BF_PROC_CTLS_USE_TSC_OFFSETTING_SHIFT) 1317 | (pGuestFeatures->fVmxHltExit << VMX_BF_PROC_CTLS_HLT_EXIT_SHIFT ) 1318 | (pGuestFeatures->fVmxInvlpgExit << VMX_BF_PROC_CTLS_INVLPG_EXIT_SHIFT ) 1319 | (pGuestFeatures->fVmxMwaitExit << VMX_BF_PROC_CTLS_MWAIT_EXIT_SHIFT ) 1320 | (pGuestFeatures->fVmxRdpmcExit << VMX_BF_PROC_CTLS_RDPMC_EXIT_SHIFT ) 1321 | (pGuestFeatures->fVmxRdtscExit << VMX_BF_PROC_CTLS_RDTSC_EXIT_SHIFT ) 1322 | (pGuestFeatures->fVmxCr3LoadExit << VMX_BF_PROC_CTLS_CR3_LOAD_EXIT_SHIFT ) 1323 | (pGuestFeatures->fVmxCr3StoreExit << VMX_BF_PROC_CTLS_CR3_STORE_EXIT_SHIFT ) 1324 | (pGuestFeatures->fVmxCr8LoadExit << VMX_BF_PROC_CTLS_CR8_LOAD_EXIT_SHIFT ) 1325 | (pGuestFeatures->fVmxCr8StoreExit << VMX_BF_PROC_CTLS_CR8_STORE_EXIT_SHIFT ) 1326 | (pGuestFeatures->fVmxUseTprShadow << VMX_BF_PROC_CTLS_USE_TPR_SHADOW_SHIFT ) 1327 | (pGuestFeatures->fVmxNmiWindowExit << VMX_BF_PROC_CTLS_NMI_WINDOW_EXIT_SHIFT ) 1328 | (pGuestFeatures->fVmxMovDRxExit << VMX_BF_PROC_CTLS_MOV_DR_EXIT_SHIFT ) 1329 | (pGuestFeatures->fVmxUncondIoExit << VMX_BF_PROC_CTLS_UNCOND_IO_EXIT_SHIFT ) 1330 | (pGuestFeatures->fVmxUseIoBitmaps << VMX_BF_PROC_CTLS_USE_IO_BITMAPS_SHIFT ) 1331 | (pGuestFeatures->fVmxMonitorTrapFlag << VMX_BF_PROC_CTLS_MONITOR_TRAP_FLAG_SHIFT ) 1332 | (pGuestFeatures->fVmxUseMsrBitmaps << VMX_BF_PROC_CTLS_USE_MSR_BITMAPS_SHIFT ) 1333 | (pGuestFeatures->fVmxMonitorExit << VMX_BF_PROC_CTLS_MONITOR_EXIT_SHIFT ) 1334 | (pGuestFeatures->fVmxPauseExit << VMX_BF_PROC_CTLS_PAUSE_EXIT_SHIFT ) 1335 | (pGuestFeatures->fVmxSecondaryExecCtls << VMX_BF_PROC_CTLS_USE_SECONDARY_CTLS_SHIFT); 1336 uint32_t const fAllowed0 = VMX_PROC_CTLS_DEFAULT1; 1337 uint32_t const fAllowed1 = fFeatures | VMX_PROC_CTLS_DEFAULT1; 1338 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0, 1339 fAllowed1, fFeatures)); 1340 pGuestVmxMsrs->ProcCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1); 1511 1341 } 1512 1342 1513 1343 /* Secondary processor-based VM-execution controls. */ 1514 { 1515 uint32_t const fProcCtls2 = pFeatures->fVmxSecondaryExecCtls ? pVmxMsrs->ProcCtls2.n.allowed1 : 0; 1516 pFeatures->fVmxVirtApicAccess = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); 1517 pFeatures->fVmxEpt = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_EPT); 1518 pFeatures->fVmxDescTableExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT); 1519 pFeatures->fVmxRdtscp = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDTSCP); 1520 pFeatures->fVmxVirtX2ApicMode = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE); 1521 pFeatures->fVmxVpid = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VPID); 1522 pFeatures->fVmxWbinvdExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_WBINVD_EXIT); 1523 pFeatures->fVmxUnrestrictedGuest = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST); 1524 pFeatures->fVmxApicRegVirt = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT); 1525 pFeatures->fVmxVirtIntDelivery = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY); 1526 pFeatures->fVmxPauseLoopExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT); 1527 pFeatures->fVmxRdrandExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT); 1528 pFeatures->fVmxInvpcid = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_INVPCID); 1529 pFeatures->fVmxVmFunc = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VMFUNC); 1530 pFeatures->fVmxVmcsShadowing = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING); 1531 pFeatures->fVmxRdseedExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDSEED_EXIT); 1532 pFeatures->fVmxPml = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PML); 1533 pFeatures->fVmxEptXcptVe = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_EPT_VE); 1534 pFeatures->fVmxXsavesXrstors = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_XSAVES_XRSTORS); 1535 pFeatures->fVmxUseTscScaling = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_TSC_SCALING); 1344 if (pGuestFeatures->fVmxSecondaryExecCtls) 1345 { 1346 uint32_t const fFeatures = (pGuestFeatures->fVmxVirtApicAccess << VMX_BF_PROC_CTLS2_VIRT_APIC_ACCESS_SHIFT ) 1347 | (pGuestFeatures->fVmxEpt << VMX_BF_PROC_CTLS2_EPT_SHIFT ) 1348 | (pGuestFeatures->fVmxDescTableExit << VMX_BF_PROC_CTLS2_DESC_TABLE_EXIT_SHIFT ) 1349 | (pGuestFeatures->fVmxRdtscp << VMX_BF_PROC_CTLS2_RDTSCP_SHIFT ) 1350 | (pGuestFeatures->fVmxVirtX2ApicMode << VMX_BF_PROC_CTLS2_VIRT_X2APIC_MODE_SHIFT ) 1351 | (pGuestFeatures->fVmxVpid << VMX_BF_PROC_CTLS2_VPID_SHIFT ) 1352 | (pGuestFeatures->fVmxWbinvdExit << VMX_BF_PROC_CTLS2_WBINVD_EXIT_SHIFT ) 1353 | (pGuestFeatures->fVmxUnrestrictedGuest << VMX_BF_PROC_CTLS2_UNRESTRICTED_GUEST_SHIFT) 1354 | (pGuestFeatures->fVmxApicRegVirt << VMX_BF_PROC_CTLS2_APIC_REG_VIRT_SHIFT ) 1355 | (pGuestFeatures->fVmxVirtIntDelivery << VMX_BF_PROC_CTLS2_VIRT_INT_DELIVERY_SHIFT ) 1356 | (pGuestFeatures->fVmxPauseLoopExit << VMX_BF_PROC_CTLS2_PAUSE_LOOP_EXIT_SHIFT ) 1357 | (pGuestFeatures->fVmxRdrandExit << VMX_BF_PROC_CTLS2_RDRAND_EXIT_SHIFT ) 1358 | (pGuestFeatures->fVmxInvpcid << VMX_BF_PROC_CTLS2_INVPCID_SHIFT ) 1359 | (pGuestFeatures->fVmxVmFunc << VMX_BF_PROC_CTLS2_VMFUNC_SHIFT ) 1360 | (pGuestFeatures->fVmxVmcsShadowing << VMX_BF_PROC_CTLS2_VMCS_SHADOWING_SHIFT ) 1361 | (pGuestFeatures->fVmxRdseedExit << VMX_BF_PROC_CTLS2_RDSEED_EXIT_SHIFT ) 1362 | (pGuestFeatures->fVmxPml << VMX_BF_PROC_CTLS2_PML_SHIFT ) 1363 | (pGuestFeatures->fVmxEptXcptVe << VMX_BF_PROC_CTLS2_EPT_VE_SHIFT ) 1364 | (pGuestFeatures->fVmxXsavesXrstors << VMX_BF_PROC_CTLS2_XSAVES_XRSTORS_SHIFT ) 1365 | (pGuestFeatures->fVmxUseTscScaling << VMX_BF_PROC_CTLS2_TSC_SCALING_SHIFT ); 1366 uint32_t const fAllowed0 = 0; 1367 uint32_t const fAllowed1 = fFeatures; 1368 pGuestVmxMsrs->ProcCtls2.u = RT_MAKE_U64(fAllowed0, fAllowed1); 1536 1369 } 1537 1370 1538 1371 /* VM-exit controls. */ 1539 1372 { 1540 uint32_t const fExitCtls = pVmxMsrs->ExitCtls.n.allowed1; 1541 pFeatures->fVmxExitSaveDebugCtls = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG); 1542 pFeatures->fVmxHostAddrSpaceSize = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 1543 pFeatures->fVmxExitAckExtInt = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT); 1544 pFeatures->fVmxExitSavePatMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR); 1545 pFeatures->fVmxExitLoadPatMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR); 1546 pFeatures->fVmxExitSaveEferMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR); 1547 pFeatures->fVmxExitLoadEferMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR); 1548 pFeatures->fVmxSavePreemptTimer = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER); 1373 uint32_t const fFeatures = (pGuestFeatures->fVmxExitSaveDebugCtls << VMX_BF_EXIT_CTLS_SAVE_DEBUG_SHIFT ) 1374 | (pGuestFeatures->fVmxHostAddrSpaceSize << VMX_BF_EXIT_CTLS_HOST_ADDR_SPACE_SIZE_SHIFT) 1375 | (pGuestFeatures->fVmxExitAckExtInt << VMX_BF_EXIT_CTLS_ACK_EXT_INT_SHIFT ) 1376 | (pGuestFeatures->fVmxExitSavePatMsr << VMX_BF_EXIT_CTLS_SAVE_PAT_MSR_SHIFT ) 1377 | (pGuestFeatures->fVmxExitLoadPatMsr << VMX_BF_EXIT_CTLS_LOAD_PAT_MSR_SHIFT ) 1378 | (pGuestFeatures->fVmxExitSaveEferMsr << VMX_BF_EXIT_CTLS_SAVE_EFER_MSR_SHIFT ) 1379 | (pGuestFeatures->fVmxExitLoadEferMsr << VMX_BF_EXIT_CTLS_LOAD_EFER_MSR_SHIFT ) 1380 | (pGuestFeatures->fVmxSavePreemptTimer << VMX_BF_EXIT_CTLS_SAVE_PREEMPT_TIMER_SHIFT ); 1381 /* Set the default1 class bits. See Intel spec. A.4 "VM-exit Controls". */ 1382 uint32_t const fAllowed0 = VMX_EXIT_CTLS_DEFAULT1; 1383 uint32_t const fAllowed1 = fFeatures | VMX_EXIT_CTLS_DEFAULT1; 1384 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0, 1385 fAllowed1, fFeatures)); 1386 pGuestVmxMsrs->ExitCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1); 1549 1387 } 1550 1388 1551 1389 /* VM-entry controls. */ 1552 1390 { 1553 uint32_t const fEntryCtls = pVmxMsrs->EntryCtls.n.allowed1; 1554 pFeatures->fVmxEntryLoadDebugCtls = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG); 1555 pFeatures->fVmxIa32eModeGuest = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 1556 pFeatures->fVmxEntryLoadEferMsr = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR); 1557 pFeatures->fVmxEntryLoadPatMsr = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR); 1391 uint32_t const fFeatures = (pGuestFeatures->fVmxEntryLoadDebugCtls << VMX_BF_ENTRY_CTLS_LOAD_DEBUG_SHIFT ) 1392 | (pGuestFeatures->fVmxIa32eModeGuest << VMX_BF_ENTRY_CTLS_IA32E_MODE_GUEST_SHIFT) 1393 | (pGuestFeatures->fVmxEntryLoadEferMsr << VMX_BF_ENTRY_CTLS_LOAD_EFER_MSR_SHIFT ) 1394 | (pGuestFeatures->fVmxEntryLoadPatMsr << VMX_BF_ENTRY_CTLS_LOAD_PAT_MSR_SHIFT ); 1395 uint32_t const fAllowed0 = VMX_ENTRY_CTLS_DEFAULT1; 1396 uint32_t const fAllowed1 = fFeatures | VMX_ENTRY_CTLS_DEFAULT1; 1397 AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed0=%#RX32 fFeatures=%#RX32\n", fAllowed0, 1398 fAllowed1, fFeatures)); 1399 pGuestVmxMsrs->EntryCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1); 1558 1400 } 1559 1401 1560 1402 /* Miscellaneous data. */ 1561 1403 { 1562 uint32_t const fMiscData = pVmxMsrs->u64Misc; 1563 pFeatures->fVmxExitSaveEferLma = RT_BOOL(fMiscData & VMX_MISC_EXIT_SAVE_EFER_LMA); 1564 pFeatures->fVmxIntelPt = RT_BOOL(fMiscData & VMX_MISC_INTEL_PT); 1565 pFeatures->fVmxVmwriteAll = RT_BOOL(fMiscData & VMX_MISC_VMWRITE_ALL); 1566 pFeatures->fVmxEntryInjectSoftInt = RT_BOOL(fMiscData & VMX_MISC_ENTRY_INJECT_SOFT_INT); 1567 } 1404 uint64_t const uHostMsr = cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) ? pHostVmxMsrs->u64Misc : 0; 1405 1406 uint8_t const cMaxMsrs = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_AUTOMSR_COUNT_MAX); 1407 uint8_t const fActivityState = RT_BF_GET(uHostMsr, VMX_BF_MISC_ACTIVITY_STATES) & VMX_V_GUEST_ACTIVITY_STATE_MASK; 1408 pGuestVmxMsrs->u64Misc = RT_BF_MAKE(VMX_BF_MISC_PREEMPT_TIMER_TSC, VMX_V_PREEMPT_TIMER_SHIFT ) 1409 | RT_BF_MAKE(VMX_BF_MISC_EXIT_SAVE_EFER_LMA, pGuestFeatures->fVmxExitSaveEferLma ) 1410 | RT_BF_MAKE(VMX_BF_MISC_ACTIVITY_STATES, fActivityState ) 1411 | RT_BF_MAKE(VMX_BF_MISC_INTEL_PT, pGuestFeatures->fVmxIntelPt ) 1412 | RT_BF_MAKE(VMX_BF_MISC_SMM_READ_SMBASE_MSR, 0 ) 1413 | RT_BF_MAKE(VMX_BF_MISC_CR3_TARGET, VMX_V_CR3_TARGET_COUNT ) 1414 | RT_BF_MAKE(VMX_BF_MISC_MAX_MSRS, cMaxMsrs ) 1415 | RT_BF_MAKE(VMX_BF_MISC_VMXOFF_BLOCK_SMI, 0 ) 1416 | RT_BF_MAKE(VMX_BF_MISC_VMWRITE_ALL, pGuestFeatures->fVmxVmwriteAll ) 1417 | RT_BF_MAKE(VMX_BF_MISC_ENTRY_INJECT_SOFT_INT, pGuestFeatures->fVmxEntryInjectSoftInt) 1418 | RT_BF_MAKE(VMX_BF_MISC_MSEG_ID, VMX_V_MSEG_REV_ID ); 1419 } 1420 1421 /* CR0 Fixed-0. */ 1422 pGuestVmxMsrs->u64Cr0Fixed0 = pGuestFeatures->fVmxUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX: VMX_V_CR0_FIXED0; 1423 1424 /* CR0 Fixed-1. */ 1425 { 1426 uint64_t const uHostMsr = cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) ? pHostVmxMsrs->u64Cr0Fixed1 : 0; 1427 pGuestVmxMsrs->u64Cr0Fixed1 = uHostMsr | VMX_V_CR0_FIXED0; /* Make sure the CR0 MB1 bits are not clear. */ 1428 } 1429 1430 /* CR4 Fixed-0. */ 1431 pGuestVmxMsrs->u64Cr4Fixed0 = VMX_V_CR4_FIXED0; 1432 1433 /* CR4 Fixed-1. */ 1434 { 1435 uint64_t const uHostMsr = cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) ? pHostVmxMsrs->u64Cr4Fixed1 : 0; 1436 pGuestVmxMsrs->u64Cr4Fixed1 = uHostMsr | VMX_V_CR4_FIXED0; /* Make sure the CR4 MB1 bits are not clear. */ 1437 } 1438 1439 /* VMCS Enumeration. */ 1440 pGuestVmxMsrs->u64VmcsEnum = VMX_V_VMCS_MAX_INDEX << VMX_BF_VMCS_ENUM_HIGHEST_IDX_SHIFT; 1441 1442 /* VM Functions. */ 1443 if (pGuestFeatures->fVmxVmFunc) 1444 pGuestVmxMsrs->u64VmFunc = RT_BF_MAKE(VMX_BF_VMFUNC_EPTP_SWITCHING, 1); 1568 1445 } 1569 1446 … … 1659 1536 #endif 1660 1537 1661 /** 1662 * Initializes VMX host and guest features. 1663 * 1664 * @param pVM The cross context VM structure. 1665 * 1666 * @remarks This must be called only after HM has fully initialized since it calls 1667 * into HM to retrieve VMX and related MSRs. 1668 */ 1669 static void cpumR3InitVmxCpuFeatures(PVM pVM) 1670 { 1671 /* 1672 * Init. host features. 1673 */ 1674 PCPUMFEATURES pHostFeat = &pVM->cpum.s.HostFeatures; 1675 VMXMSRS VmxMsrs; 1676 if (cpumR3IsHwAssistVmxNstGstExecAllowed(pVM)) 1677 { 1678 /** @todo NSTVMX: When NEM support for nested-VMX is there, we'll need to fetch 1679 * the MSRs from NEM or do the support driver IOCTL route, see patch in 1680 * @bugref{9180}. */ 1681 if (HMIsEnabled(pVM)) 1682 { 1683 int rc = HMVmxGetHostMsrs(pVM, &VmxMsrs); 1684 if (RT_SUCCESS(rc)) 1685 cpumR3ExplodeVmxFeatures(&VmxMsrs, pHostFeat); 1686 } 1687 else 1688 AssertMsgFailed(("NEM support for nested-VMX is not implemented yet\n")); 1689 } 1538 1539 /** 1540 * Initializes VMX guest features and MSRs. 1541 * 1542 * @param pVM The cross context VM structure. 1543 * @param pHostVmxMsrs The host VMX MSRs. Pass NULL when fully emulating VMX 1544 * and no hardware-assisted nested-guest execution is 1545 * possible for this VM. 1546 * @param pGuestVmxMsrs Where to store the initialized guest VMX MSRs. 1547 */ 1548 void cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCVMXMSRS pHostVmxMsrs, PVMXMSRS pGuestVmxMsrs) 1549 { 1550 Assert(pVM); 1551 Assert(pGuestVmxMsrs); 1690 1552 1691 1553 /* 1692 1554 * Initialize the set of VMX features we emulate. 1693 * Note! Some bits might be reported as 1 always if they fall under the default1 class bits 1694 * (e.g. fVmxEntryLoadDebugCtls), see @bugref{9180#c5}. 1555 * 1556 * Note! Some bits might be reported as 1 always if they fall under the 1557 * default1 class bits (e.g. fVmxEntryLoadDebugCtls), see @bugref{9180#c5}. 1695 1558 */ 1696 1559 CPUMFEATURES EmuFeat; … … 1767 1630 * by the hardware, hence we merge our emulated features with the host features below. 1768 1631 */ 1769 PCCPUMFEATURES pBaseFeat = cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) ? pHostFeat: &EmuFeat;1770 PCPUMFEATURES pGuestFeat 1771 pGuestFeat->fVmx = (pBaseFeat->fVmx & EmuFeat.fVmx);1632 PCCPUMFEATURES pBaseFeat = cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) ? &pVM->cpum.s.HostFeatures : &EmuFeat; 1633 PCPUMFEATURES pGuestFeat = &pVM->cpum.s.GuestFeatures; 1634 Assert(pBaseFeat->fVmx); 1772 1635 pGuestFeat->fVmxInsOutInfo = (pBaseFeat->fVmxInsOutInfo & EmuFeat.fVmxInsOutInfo ); 1773 1636 pGuestFeat->fVmxExtIntExit = (pBaseFeat->fVmxExtIntExit & EmuFeat.fVmxExtIntExit ); … … 1860 1723 1861 1724 /* 1862 * Finally initialize the VMX guest MSRs after merging the guest features.1725 * Finally initialize the VMX guest MSRs. 1863 1726 */ 1864 cpumR3InitGuestVmxMsrs(pVM); 1727 cpumR3InitVmxGuestMsrs(pVM, pHostVmxMsrs, pGuestFeat, pGuestVmxMsrs); 1728 } 1729 1730 1731 static int cpumR3GetHostHwvirtMsrs(PCPUMMSRS pMsrs) 1732 { 1733 Assert(pMsrs); 1734 1735 uint32_t fCaps = 0; 1736 int rc = SUPR3QueryVTCaps(&fCaps); 1737 if (RT_SUCCESS(rc)) 1738 { 1739 if (fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V)) 1740 { 1741 SUPHWVIRTMSRS HwvirtMsrs; 1742 int rc = SUPR3GetHwvirtMsrs(&HwvirtMsrs, false /* fForceRequery */); 1743 if (RT_SUCCESS(rc)) 1744 { 1745 if (fCaps & SUPVTCAPS_VT_X) 1746 pMsrs->hwvirt.vmx = HwvirtMsrs.u.vmx; 1747 else 1748 pMsrs->hwvirt.svm = HwvirtMsrs.u.svm; 1749 return VINF_SUCCESS; 1750 } 1751 1752 LogRel(("CPUM: Query hardware-virtualization MSRs failed. rc=%Rrc\n", rc)); 1753 return rc; 1754 } 1755 else 1756 { 1757 LogRel(("CPUM: Querying hardware-virtualization capability succeeded but did not find VT-x or AMD-V\n")); 1758 return VERR_INTERNAL_ERROR_5; 1759 } 1760 } 1761 else 1762 LogRel(("CPUM: No hardware-virtualization capability detected\n")); 1763 1764 return VINF_SUCCESS; 1865 1765 } 1866 1766 … … 1916 1816 if (!ASMHasCpuId()) 1917 1817 { 1918 Log (("The CPU doesn't support CPUID!\n"));1818 LogRel(("The CPU doesn't support CPUID!\n")); 1919 1819 return VERR_UNSUPPORTED_CPU; 1920 1820 } 1921 1821 1922 1822 pVM->cpum.s.fHostMxCsrMask = CPUMR3DeterminHostMxCsrMask(); 1823 1824 CPUMMSRS HostMsrs; 1825 RT_ZERO(HostMsrs); 1826 int rc = cpumR3GetHostHwvirtMsrs(&HostMsrs); 1827 AssertLogRelRCReturn(rc, rc); 1923 1828 1924 1829 PCPUMCPUIDLEAF paLeaves; 1925 1830 uint32_t cLeaves; 1926 intrc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);1831 rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves); 1927 1832 AssertLogRelRCReturn(rc, rc); 1928 1833 1929 rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, & pVM->cpum.s.HostFeatures);1834 rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &HostMsrs, &pVM->cpum.s.HostFeatures); 1930 1835 RTMemFree(paLeaves); 1931 1836 AssertLogRelRCReturn(rc, rc); … … 2042 1947 * Initialize the Guest CPUID and MSR states. 2043 1948 */ 2044 rc = cpumR3InitCpuIdAndMsrs(pVM );1949 rc = cpumR3InitCpuIdAndMsrs(pVM, &HostMsrs); 2045 1950 if (RT_FAILURE(rc)) 2046 1951 return rc; 2047 1952 2048 1953 /* 2049 * Allocate memory required by the guest hardware virtualization state. 1954 * Allocate memory required by the guest hardware-virtualization structures. 1955 * This must be done after initializing CPUID/MSR features as we access the 1956 * the VMX/SVM guest features below. 2050 1957 */ 2051 1958 if (pVM->cpum.s.GuestFeatures.fVmx) … … 2057 1964 if (RT_FAILURE(rc)) 2058 1965 return rc; 2059 2060 /*2061 * Initialize guest hardware virtualization state.2062 */2063 CPUMHWVIRT const enmHwvirt = pVM->aCpus[0].cpum.s.Guest.hwvirt.enmHwvirt;2064 if (enmHwvirt == CPUMHWVIRT_VMX)2065 {2066 for (VMCPUID i = 0; i < pVM->cCpus; i++)2067 cpumR3InitVmxHwVirtState(&pVM->aCpus[i]);2068 2069 /* Initialize VMX features. */2070 cpumR3InitVmxCpuFeatures(pVM);2071 DBGFR3Info(pVM->pUVM, "cpumvmxfeat", "default", DBGFR3InfoLogRelHlp());2072 }2073 else if (enmHwvirt == CPUMHWVIRT_SVM)2074 {2075 for (VMCPUID i = 0; i < pVM->cCpus; i++)2076 cpumR3InitSvmHwVirtState(&pVM->aCpus[i]);2077 }2078 1966 2079 1967 /* … … 2314 2202 Assert(!pVM->cpum.s.GuestFeatures.fVmx || !pVM->cpum.s.GuestFeatures.fSvm); /* Paranoia. */ 2315 2203 if (pVM->cpum.s.GuestFeatures.fVmx) 2316 cpumR3 InitVmxHwVirtState(pVCpu);2204 cpumR3ResetVmxHwVirtState(pVCpu); 2317 2205 else if (pVM->cpum.s.GuestFeatures.fSvm) 2318 cpumR3 InitSvmHwVirtState(pVCpu);2206 cpumR3ResetSvmHwVirtState(pVCpu); 2319 2207 } 2320 2208 … … 2670 2558 } 2671 2559 } 2560 /** @todo NSTVMX: Load VMX state. */ 2672 2561 } 2673 2562 else … … 2773 2662 */ 2774 2663 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2) 2775 return cpumR3LoadCpuId(pVM, pSSM, uVersion); 2664 { 2665 CPUMMSRS GuestMsrs; 2666 RT_ZERO(GuestMsrs); 2667 if (pVM->cpum.s.GuestFeatures.fVmx) 2668 GuestMsrs.hwvirt.vmx = pVM->aCpus[0].cpum.s.Guest.hwvirt.vmx.Msrs; 2669 return cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs); 2670 } 2776 2671 return cpumR3LoadCpuIdPre32(pVM, pSSM, uVersion); 2777 2672 } … … 4032 3927 RTLogRelSetBuffering(fOldBuffered); 4033 3928 LogRel(("******************** End of CPUID dump **********************\n")); 4034 } 4035 3929 3930 /* 3931 * Log VT-x extended features. 3932 * 3933 * SVM features are currently all covered under CPUID so there is nothing 3934 * to do here for SVM. 3935 */ 3936 if (pVM->cpum.s.HostFeatures.fVmx) 3937 { 3938 LogRel(("*********************** VT-x features ***********************\n")); 3939 DBGFR3Info(pVM->pUVM, "cpumvmxfeat", "default", DBGFR3InfoLogRelHlp()); 3940 LogRel(("\n")); 3941 LogRel(("******************* End of VT-x features ********************\n")); 3942 } 3943 } 3944 -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r74163 r76464 1679 1679 1680 1680 1681 int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures) 1682 { 1681 static void cpumR3ExplodeVmxFeatures(PCVMXMSRS pVmxMsrs, PCPUMFEATURES pFeatures) 1682 { 1683 Assert(pVmxMsrs); 1684 Assert(pFeatures); 1685 Assert(pFeatures->fVmx); 1686 1687 /* Basic information. */ 1688 { 1689 uint64_t const u64Basic = pVmxMsrs->u64Basic; 1690 pFeatures->fVmxInsOutInfo = RT_BF_GET(u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS); 1691 } 1692 1693 /* Pin-based VM-execution controls. */ 1694 { 1695 uint32_t const fPinCtls = pVmxMsrs->PinCtls.n.allowed1; 1696 pFeatures->fVmxExtIntExit = RT_BOOL(fPinCtls & VMX_PIN_CTLS_EXT_INT_EXIT); 1697 pFeatures->fVmxNmiExit = RT_BOOL(fPinCtls & VMX_PIN_CTLS_NMI_EXIT); 1698 pFeatures->fVmxVirtNmi = RT_BOOL(fPinCtls & VMX_PIN_CTLS_VIRT_NMI); 1699 pFeatures->fVmxPreemptTimer = RT_BOOL(fPinCtls & VMX_PIN_CTLS_PREEMPT_TIMER); 1700 pFeatures->fVmxPostedInt = RT_BOOL(fPinCtls & VMX_PIN_CTLS_POSTED_INT); 1701 } 1702 1703 /* Processor-based VM-execution controls. */ 1704 { 1705 uint32_t const fProcCtls = pVmxMsrs->ProcCtls.n.allowed1; 1706 pFeatures->fVmxIntWindowExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT); 1707 pFeatures->fVmxTscOffsetting = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING); 1708 pFeatures->fVmxHltExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_HLT_EXIT); 1709 pFeatures->fVmxInvlpgExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_INVLPG_EXIT); 1710 pFeatures->fVmxMwaitExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MWAIT_EXIT); 1711 pFeatures->fVmxRdpmcExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_RDPMC_EXIT); 1712 pFeatures->fVmxRdtscExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_RDTSC_EXIT); 1713 pFeatures->fVmxCr3LoadExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT); 1714 pFeatures->fVmxCr3StoreExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT); 1715 pFeatures->fVmxCr8LoadExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT); 1716 pFeatures->fVmxCr8StoreExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT); 1717 pFeatures->fVmxUseTprShadow = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 1718 pFeatures->fVmxNmiWindowExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT); 1719 pFeatures->fVmxMovDRxExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT); 1720 pFeatures->fVmxUncondIoExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT); 1721 pFeatures->fVmxUseIoBitmaps = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS); 1722 pFeatures->fVmxMonitorTrapFlag = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG); 1723 pFeatures->fVmxUseMsrBitmaps = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS); 1724 pFeatures->fVmxMonitorExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MONITOR_EXIT); 1725 pFeatures->fVmxPauseExit = RT_BOOL(fProcCtls & VMX_PROC_CTLS_PAUSE_EXIT); 1726 pFeatures->fVmxSecondaryExecCtls = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS); 1727 } 1728 1729 /* Secondary processor-based VM-execution controls. */ 1730 { 1731 uint32_t const fProcCtls2 = pFeatures->fVmxSecondaryExecCtls ? pVmxMsrs->ProcCtls2.n.allowed1 : 0; 1732 pFeatures->fVmxVirtApicAccess = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); 1733 pFeatures->fVmxEpt = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_EPT); 1734 pFeatures->fVmxDescTableExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT); 1735 pFeatures->fVmxRdtscp = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDTSCP); 1736 pFeatures->fVmxVirtX2ApicMode = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE); 1737 pFeatures->fVmxVpid = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VPID); 1738 pFeatures->fVmxWbinvdExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_WBINVD_EXIT); 1739 pFeatures->fVmxUnrestrictedGuest = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST); 1740 pFeatures->fVmxApicRegVirt = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT); 1741 pFeatures->fVmxVirtIntDelivery = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY); 1742 pFeatures->fVmxPauseLoopExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT); 1743 pFeatures->fVmxRdrandExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT); 1744 pFeatures->fVmxInvpcid = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_INVPCID); 1745 pFeatures->fVmxVmFunc = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VMFUNC); 1746 pFeatures->fVmxVmcsShadowing = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING); 1747 pFeatures->fVmxRdseedExit = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDSEED_EXIT); 1748 pFeatures->fVmxPml = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PML); 1749 pFeatures->fVmxEptXcptVe = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_EPT_VE); 1750 pFeatures->fVmxXsavesXrstors = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_XSAVES_XRSTORS); 1751 pFeatures->fVmxUseTscScaling = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_TSC_SCALING); 1752 } 1753 1754 /* VM-exit controls. */ 1755 { 1756 uint32_t const fExitCtls = pVmxMsrs->ExitCtls.n.allowed1; 1757 pFeatures->fVmxExitSaveDebugCtls = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG); 1758 pFeatures->fVmxHostAddrSpaceSize = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 1759 pFeatures->fVmxExitAckExtInt = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT); 1760 pFeatures->fVmxExitSavePatMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR); 1761 pFeatures->fVmxExitLoadPatMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR); 1762 pFeatures->fVmxExitSaveEferMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR); 1763 pFeatures->fVmxExitLoadEferMsr = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR); 1764 pFeatures->fVmxSavePreemptTimer = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER); 1765 } 1766 1767 /* VM-entry controls. */ 1768 { 1769 uint32_t const fEntryCtls = pVmxMsrs->EntryCtls.n.allowed1; 1770 pFeatures->fVmxEntryLoadDebugCtls = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG); 1771 pFeatures->fVmxIa32eModeGuest = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 1772 pFeatures->fVmxEntryLoadEferMsr = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR); 1773 pFeatures->fVmxEntryLoadPatMsr = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR); 1774 } 1775 1776 /* Miscellaneous data. */ 1777 { 1778 uint32_t const fMiscData = pVmxMsrs->u64Misc; 1779 pFeatures->fVmxExitSaveEferLma = RT_BOOL(fMiscData & VMX_MISC_EXIT_SAVE_EFER_LMA); 1780 pFeatures->fVmxIntelPt = RT_BOOL(fMiscData & VMX_MISC_INTEL_PT); 1781 pFeatures->fVmxVmwriteAll = RT_BOOL(fMiscData & VMX_MISC_VMWRITE_ALL); 1782 pFeatures->fVmxEntryInjectSoftInt = RT_BOOL(fMiscData & VMX_MISC_ENTRY_INJECT_SOFT_INT); 1783 } 1784 } 1785 1786 1787 int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, PCPUMFEATURES pFeatures) 1788 { 1789 Assert(pMsrs); 1683 1790 RT_ZERO(*pFeatures); 1684 1791 if (cLeaves >= 2) … … 1747 1854 pFeatures->fPcid = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_PCID); 1748 1855 pFeatures->fVmx = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_VMX); 1749 /* VMX sub-features will be initialized in cpumR3InitVmxCpuFeatures(). */ 1856 if (pFeatures->fVmx) 1857 cpumR3ExplodeVmxFeatures(&pMsrs->hwvirt.vmx, pFeatures); 1750 1858 1751 1859 /* Structured extended features. */ … … 2186 2294 * @param paLeaves The leaves. These will be copied (but not freed). 2187 2295 * @param cLeaves The number of leaves. 2296 * @param pMsrs The MSRs. 2188 2297 */ 2189 static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCpum, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves )2298 static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCpum, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs) 2190 2299 { 2191 2300 cpumR3CpuIdAssertOrder(paLeaves, cLeaves); … … 2235 2344 * Explode the guest CPU features. 2236 2345 */ 2237 rc = cpumR3CpuIdExplodeFeatures(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, &pCpum->GuestFeatures); 2346 rc = cpumR3CpuIdExplodeFeatures(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, pMsrs, 2347 &pCpum->GuestFeatures); 2238 2348 AssertLogRelRCReturn(rc, rc); 2239 2349 … … 4219 4329 * @returns VBox status code. 4220 4330 * @param pVM The cross context VM structure. 4331 * @param pHostMsrs Pointer to the host MSRs. 4221 4332 */ 4222 int cpumR3InitCpuIdAndMsrs(PVM pVM) 4223 { 4333 int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs) 4334 { 4335 Assert(pHostMsrs); 4336 4224 4337 PCPUM pCpum = &pVM->cpum.s; 4225 4338 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"); … … 4281 4394 "Please use IMachine::setCPUIDLeaf() instead."); 4282 4395 4396 CPUMMSRS GuestMsrs; 4397 RT_ZERO(GuestMsrs); 4398 4283 4399 /* 4284 4400 * Pre-explode the CPUID info. 4285 4401 */ 4286 4402 if (RT_SUCCESS(rc)) 4287 rc = cpumR3CpuIdExplodeFeatures(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, &pCpum->GuestFeatures); 4403 { 4404 rc = cpumR3CpuIdExplodeFeatures(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs, 4405 &pCpum->GuestFeatures); 4406 } 4288 4407 4289 4408 /* … … 4325 4444 void *pvFree = pCpum->GuestInfo.paCpuIdLeavesR3; 4326 4445 int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCpum, pCpum->GuestInfo.paCpuIdLeavesR3, 4327 pCpum->GuestInfo.cCpuIdLeaves );4446 pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs); 4328 4447 RTMemFree(pvFree); 4329 4448 … … 4339 4458 pCpum->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCpum->GuestInfo.paMsrRangesR3); 4340 4459 4460 /* 4461 * Finally, initialize guest VMX MSRs. 4462 * 4463 * This needs to be done -after- exploding guest features and sanitizing CPUID leaves 4464 * as constructing VMX capabilities MSRs rely on CPU feature bits such as long mode, 4465 * unrestricted execution and possibly more in the future. 4466 */ 4467 if (pVM->cpum.s.GuestFeatures.fVmx) 4468 { 4469 Assert(Config.fNestedHWVirt); 4470 cpumR3InitVmxGuestFeaturesAndMsrs(pVM, &pHostMsrs->hwvirt.vmx, &GuestMsrs.hwvirt.vmx); 4471 4472 /* Copy MSRs to all VCPUs */ 4473 PCVMXMSRS pVmxMsrs = &GuestMsrs.hwvirt.vmx; 4474 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 4475 { 4476 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 4477 memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs)); 4478 } 4479 } 4341 4480 4342 4481 /* … … 5116 5255 * @param paLeaves Guest CPUID leaves loaded from the state. 5117 5256 * @param cLeaves The number of leaves in @a paLeaves. 5257 * @param pMsrs The guest MSRs. 5118 5258 */ 5119 int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves )5259 int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs) 5120 5260 { 5121 5261 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION); … … 5859 5999 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR; 5860 6000 pVM->cpum.s.GuestInfo.DefCpuId = GuestDefCpuId; 5861 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves );6001 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves, pMsrs); 5862 6002 AssertLogRelRCReturn(rc, rc); 5863 6003 … … 5873 6013 * @param pSSM The saved state handle. 5874 6014 * @param uVersion The format version. 6015 * @param pMsrs The guest MSRs. 5875 6016 */ 5876 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion )6017 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs) 5877 6018 { 5878 6019 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION); … … 5888 6029 if (RT_SUCCESS(rc)) 5889 6030 { 5890 rc = cpumR3LoadCpuIdInner(pVM, pSSM, uVersion, paLeaves, cLeaves );6031 rc = cpumR3LoadCpuIdInner(pVM, pSSM, uVersion, paLeaves, cLeaves, pMsrs); 5891 6032 RTMemFree(paLeaves); 5892 6033 } -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r76290 r76464 703 703 if (fCaps & SUPVTCAPS_AMD_V) 704 704 { 705 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_PRE_INIT, 0, NULL); 706 AssertRCReturn(rc, rc); 707 Assert(pVM->hm.s.svm.fSupported); 708 705 pVM->hm.s.svm.fSupported = true; 709 706 LogRel(("HM: HMR3Init: AMD-V%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : "")); 710 707 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT); … … 716 713 if (RT_SUCCESS(rc)) 717 714 { 718 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_PRE_INIT, 0, NULL); 719 AssertRCReturn(rc, rc); 720 Assert(pVM->hm.s.vmx.fSupported); 721 715 pVM->hm.s.vmx.fSupported = true; 722 716 LogRel(("HM: HMR3Init: VT-x%s%s%s\n", 723 717 fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : "", -
trunk/src/VBox/VMM/include/CPUMInternal.h
r75611 r76464 530 530 # ifdef IN_RING3 531 531 int cpumR3DbgInit(PVM pVM); 532 int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures); 533 int cpumR3InitCpuIdAndMsrs(PVM pVM); 532 int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, PCPUMFEATURES pFeatures); 533 int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs); 534 void cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCVMXMSRS pHostVmxMsrs, PVMXMSRS pGuestVmxMsrs); 534 535 void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM); 535 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion );536 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pGuestMsrs); 536 537 int cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion); 537 538 DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r76148 r76464 253 253 alignb 8 254 254 .Guest.hwvirt.svm.HCPhysVmcb RTHCPHYS_RES 1 255 .Guest.hwvirt.svm.u64Padding0 res b 120255 .Guest.hwvirt.svm.u64Padding0 resq 19 256 256 .Guest.hwvirt.enmHwvirt resd 1 257 257 .Guest.hwvirt.fGif resb 1 … … 543 543 alignb 8 544 544 .Hyper.hwvirt.svm.HCPhysVmcb RTHCPHYS_RES 1 545 .Hyper.hwvirt.svm.u64Padding0 res b 120545 .Hyper.hwvirt.svm.u64Padding0 resq 19 546 546 .Hyper.hwvirt.enmHwvirt resd 1 547 547 .Hyper.hwvirt.fGif resb 1
Note:
See TracChangeset
for help on using the changeset viewer.