Changeset 81005 in vbox for trunk/src/VBox/HostDrivers/Support
- Timestamp:
- Sep 25, 2019 10:20:59 AM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp
r81004 r81005 496 496 #endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_FREEBSD */ 497 497 498 /** Hardware-virtualization MSRs. */499 static SUPHWVIRTMSRS g_HwvirtMsrs;500 /** Whether the hardware-virtualization MSRs are cached. */501 static bool g_fHwvirtMsrsCached;502 498 503 499 … … 4604 4600 RTThreadPreemptDisable(&PreemptState); 4605 4601 4606 /** @todo Disabled caching for now until proper locking is implemented. */ 4607 #if 0 4608 /* 4609 * Querying MSRs from hardware can be expensive (exponentially more so 4610 * in a nested-virtualization scenario if they happen to cause VM-exits). 4611 * 4612 * So, if the caller does not force re-querying of MSRs and we have them 4613 * already cached, simply copy the cached MSRs and we're done. 4614 */ 4615 if ( !fForce 4616 && ASMAtomicReadBool(&g_fHwvirtMsrsCached)) 4617 { 4618 memcpy(pMsrs, &g_HwvirtMsrs, sizeof(*pMsrs)); 4619 RTThreadPreemptRestore(&PreemptState); 4620 return VINF_SUCCESS; 4621 } 4622 #else 4623 RT_NOREF(fForce); 4624 #endif 4625 4626 /* 4627 * Query the MSRs from hardware, since it's either the first call since 4628 * driver load or the caller has forced re-querying of the MSRs. 4629 */ 4630 RT_ZERO(*pMsrs); 4602 /* 4603 * Query the MSRs from the hardware. 4604 */ 4605 /** @todo Cache MSR values so future accesses can avoid querying the hardware as 4606 * it may be expensive (esp. in nested virtualization scenarios). Do this 4607 * with proper locking and race safety. */ 4608 SUPHWVIRTMSRS Msrs; 4609 RT_ZERO(Msrs); 4631 4610 4632 4611 /* If the caller claims VT-x/AMD-V is supported, don't need to recheck it. */ … … 4639 4618 if (fCaps & SUPVTCAPS_VT_X) 4640 4619 { 4641 g_HwvirtMsrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);4642 g_HwvirtMsrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);4643 g_HwvirtMsrs.u.vmx.u64PinCtls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);4644 g_HwvirtMsrs.u.vmx.u64ProcCtls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);4645 g_HwvirtMsrs.u.vmx.u64ExitCtls = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);4646 g_HwvirtMsrs.u.vmx.u64EntryCtls = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);4647 g_HwvirtMsrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);4648 g_HwvirtMsrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);4649 g_HwvirtMsrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);4650 g_HwvirtMsrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);4651 g_HwvirtMsrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);4652 g_HwvirtMsrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);4653 4654 if (RT_BF_GET( g_HwvirtMsrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))4655 { 4656 g_HwvirtMsrs.u.vmx.u64TruePinCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);4657 g_HwvirtMsrs.u.vmx.u64TrueProcCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);4658 g_HwvirtMsrs.u.vmx.u64TrueEntryCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);4659 g_HwvirtMsrs.u.vmx.u64TrueExitCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);4660 } 4661 4662 uint32_t const fProcCtlsAllowed1 = RT_HI_U32( g_HwvirtMsrs.u.vmx.u64ProcCtls);4620 Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 4621 Msrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC); 4622 Msrs.u.vmx.u64PinCtls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS); 4623 Msrs.u.vmx.u64ProcCtls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS); 4624 Msrs.u.vmx.u64ExitCtls = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS); 4625 Msrs.u.vmx.u64EntryCtls = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS); 4626 Msrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC); 4627 Msrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0); 4628 Msrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1); 4629 Msrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0); 4630 Msrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1); 4631 Msrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM); 4632 4633 if (RT_BF_GET(Msrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS)) 4634 { 4635 Msrs.u.vmx.u64TruePinCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS); 4636 Msrs.u.vmx.u64TrueProcCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS); 4637 Msrs.u.vmx.u64TrueEntryCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS); 4638 Msrs.u.vmx.u64TrueExitCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS); 4639 } 4640 4641 uint32_t const fProcCtlsAllowed1 = RT_HI_U32(Msrs.u.vmx.u64ProcCtls); 4663 4642 if (fProcCtlsAllowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS) 4664 4643 { 4665 g_HwvirtMsrs.u.vmx.u64ProcCtls2 = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);4666 4667 uint32_t const fProcCtls2Allowed1 = RT_HI_U32( g_HwvirtMsrs.u.vmx.u64ProcCtls2);4644 Msrs.u.vmx.u64ProcCtls2 = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2); 4645 4646 uint32_t const fProcCtls2Allowed1 = RT_HI_U32(Msrs.u.vmx.u64ProcCtls2); 4668 4647 if (fProcCtls2Allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID)) 4669 g_HwvirtMsrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);4648 Msrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP); 4670 4649 4671 4650 if (fProcCtls2Allowed1 & VMX_PROC_CTLS2_VMFUNC) 4672 g_HwvirtMsrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC); 4673 } 4674 ASMAtomicWriteBool(&g_fHwvirtMsrsCached, true); 4651 Msrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC); 4652 } 4675 4653 } 4676 4654 else if (fCaps & SUPVTCAPS_AMD_V) 4677 { 4678 g_HwvirtMsrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR); 4679 ASMAtomicWriteBool(&g_fHwvirtMsrsCached, true); 4680 } 4655 Msrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR); 4681 4656 else 4682 4657 { … … 4687 4662 4688 4663 /* 4689 * We have successfully populated the cache, copy the MSRs to the caller.4664 * Copy the MSRs out. 4690 4665 */ 4691 memcpy(pMsrs, & g_HwvirtMsrs, sizeof(*pMsrs));4666 memcpy(pMsrs, &Msrs, sizeof(*pMsrs)); 4692 4667 } 4693 4668
Note:
See TracChangeset
for help on using the changeset viewer.