Changeset 87532 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Feb 2, 2021 11:56:56 AM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 142558
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r87522 r87532 118 118 /** The shift mask employed by the VMX-Preemption timer. */ 119 119 uint8_t cPreemptTimerShift; 120 /** Padding. */121 uint8_t abPadding[3];122 120 /** Whether we're using the preemption timer or not. */ 123 121 bool fUsePreemptTimer; … … 127 125 * module termination. */ 128 126 bool fCalledSUPR0EnableVTx; 129 /** Set to by us to indicate VMX is supported by the CPU. */130 bool fSupported;131 127 } vmx; 132 128 … … 138 134 /** SVM feature bits from cpuid 0x8000000a */ 139 135 uint32_t u32Features; 140 /** Padding. */141 bool afPadding[3];142 /** Set by us to indicate SVM is supported by the CPU. */143 bool fSupported;144 136 } svm; 145 137 } u; … … 167 159 } g_HmR0; 168 160 161 162 /** Set if VT-x (VMX) is supported by the CPU. */ 163 bool g_fHmVmxSupported = false; 164 /** Set if AMD-V is supported by the CPU. */ 165 bool g_fHmSvmSupported = false; 169 166 /** Maximum allowed ASID/VPID (inclusive). 170 167 * @todo r=bird: This is exclusive for VT-x according to source code comment. … … 316 313 if (RT_SUCCESS(rc)) 317 314 { 318 g_ HmR0.hwvirt.u.vmx.fSupported = true;315 g_fHmVmxSupported = true; 319 316 rc = SUPR0EnableVTx(false /* fEnable */); 320 317 AssertLogRelRC(rc); … … 386 383 if (RT_SUCCESS(rc)) 387 384 { 388 g_ HmR0.hwvirt.u.vmx.fSupported = true;385 g_fHmVmxSupported = true; 389 386 VMXDisable(); 390 387 } … … 403 400 */ 404 401 g_HmR0.rcInit = VERR_VMX_IN_VMX_ROOT_MODE; 405 Assert(g_ HmR0.hwvirt.u.vmx.fSupported == false);402 Assert(g_fHmVmxSupported == false); 406 403 } 407 404 … … 416 413 } 417 414 418 if (g_ HmR0.hwvirt.u.vmx.fSupported)415 if (g_fHmVmxSupported) 419 416 { 420 417 rc = VMXR0GlobalInit(); … … 510 507 { 511 508 SUPR0GetHwvirtMsrs(&g_HmR0.hwvirt.Msrs, SUPVTCAPS_AMD_V, false /* fForce */); 512 g_ HmR0.hwvirt.u.svm.fSupported = true;509 g_fHmSvmSupported = true; 513 510 } 514 511 else … … 562 559 /* Default is global VT-x/AMD-V init. */ 563 560 g_HmR0.fGlobalInit = true; 561 562 g_fHmVmxSupported = false; 563 g_fHmSvmSupported = false; 564 g_uHmMaxAsid = 0; 564 565 565 566 /* … … 623 624 { 624 625 int rc; 625 if ( g_ HmR0.hwvirt.u.vmx.fSupported626 if ( g_fHmVmxSupported 626 627 && g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 627 628 { … … 647 648 else 648 649 { 649 Assert(!g_ HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);650 Assert(!g_fHmVmxSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 650 651 651 652 /* Doesn't really matter if this fails. */ … … 694 695 * should move into their respective modules. */ 695 696 /* Finally, call global VT-x/AMD-V termination. */ 696 if (g_ HmR0.hwvirt.u.vmx.fSupported)697 if (g_fHmVmxSupported) 697 698 VMXR0GlobalTerm(); 698 else if (g_ HmR0.hwvirt.u.svm.fSupported)699 else if (g_fHmSvmSupported) 699 700 SVMR0GlobalTerm(); 700 701 … … 765 766 766 767 int rc; 767 if ( g_ HmR0.hwvirt.u.vmx.fSupported768 if ( g_fHmVmxSupported 768 769 && g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 769 770 rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HmR0.hwvirt.Msrs); … … 838 839 839 840 int rc; 840 if ( g_ HmR0.hwvirt.u.vmx.fSupported841 if ( g_fHmVmxSupported 841 842 && g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx) 842 843 { … … 942 943 PHMPHYSCPU pHostCpu = &g_HmR0.aCpuInfo[idCpu]; 943 944 944 Assert(!g_ HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);945 Assert(!g_fHmVmxSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 945 946 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 946 947 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */ … … 1011 1012 { 1012 1013 NOREF(pvData); 1013 Assert(!g_ HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);1014 Assert(!g_fHmVmxSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 1014 1015 1015 1016 /* … … 1052 1053 { 1053 1054 NOREF(pvUser); 1054 Assert(!g_ HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);1055 Assert(!g_fHmVmxSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 1055 1056 1056 1057 #ifdef LOG_ENABLED … … 1084 1085 /* Reinit the CPUs from scratch as the suspend state might have 1085 1086 messed with the MSRs. (lousy BIOSes as usual) */ 1086 if (g_ HmR0.hwvirt.u.vmx.fSupported)1087 if (g_fHmVmxSupported) 1087 1088 rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL); 1088 1089 else … … 1313 1314 if (!g_HmR0.fGlobalInit) 1314 1315 { 1315 Assert(!g_ HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);1316 Assert(!g_fHmVmxSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 1316 1317 rc = hmR0EnableCpu(pVM, idCpu); 1317 1318 if (RT_FAILURE(rc)) … … 1328 1329 if (!g_HmR0.fGlobalInit) 1329 1330 { 1330 Assert(!g_ HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);1331 Assert(!g_fHmVmxSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); 1331 1332 int rc2 = hmR0DisableCpu(idCpu); 1332 1333 AssertRC(rc2); … … 1381 1382 1382 1383 /* Reload host-state (back from ring-3/migrated CPUs) and shared guest/host bits. */ 1383 if (g_ HmR0.hwvirt.u.vmx.fSupported)1384 if (g_fHmVmxSupported) 1384 1385 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE; 1385 1386 else … … 1410 1411 if (RT_SUCCESS(rc)) 1411 1412 { 1412 if (g_HmR0.hwvirt.u.vmx.fSupported) 1413 { 1414 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 1415 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); 1416 } 1413 if (g_fHmVmxSupported) 1414 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 1415 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); 1417 1416 else 1418 { 1419 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) 1420 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); 1421 } 1417 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) 1418 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); 1422 1419 1423 1420 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE -
trunk/src/VBox/VMM/include/HMInternal.h
r87531 r87532 548 548 uint32_t u32Alignment1; 549 549 550 /** Host-physical address for a failing VMXON instruction . */550 /** Host-physical address for a failing VMXON instruction (diagnostics). */ 551 551 RTHCPHYS HCPhysVmxEnableError; 552 552 … … 578 578 uint8_t u8Alignment0[2]; 579 579 580 /* HWCR MSR (for diagnostics)*/580 /** HWCR MSR (for diagnostics). */ 581 581 uint64_t u64MsrHwcr; 582 582 … … 1443 1443 1444 1444 #ifdef IN_RING0 1445 extern bool g_fHmVmxSupported; 1446 extern bool g_fHmSvmSupported; 1445 1447 extern uint32_t g_uHmMaxAsid; 1446 1448
Note:
See TracChangeset
for help on using the changeset viewer.