VirtualBox

Changeset 57040 in vbox for trunk/src/VBox/HostDrivers


Ignore:
Timestamp:
Jul 21, 2015 11:57:25 AM (10 years ago)
Author:
vboxsync
Message:

SUPDrv: Workaround KVM bug while querying VT-x capability when running inside a KVM guest. Also cleaned up some redundant checks (made into assertions).

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp

    r56774 r57040  
    38343834         * accurately. See @bugref{6873}.
    38353835         *
    3836          * The reason we are being paranoid here and (re)checking is that we don't assume all callers
    3837          * of this function to check it like SUPR0QueryVTCaps() currently does. If we get something
    3838          * wrong here, we can throw a #GP and panic the box. This isn't a performance critical path.
     3836         * We need to check for VMX-in-SMX hardware support here, before writing the MSR as
     3837         * otherwise we risk #GP faulting on CPUs that do not support it. Callers do not check
     3838         * for it.
    38393839         */
    38403840        uint32_t fFeaturesECX, uDummy;
    3841         uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
     3841        uint32_t uVendorEBX, uVendorECX, uVendorEDX;
     3842#ifdef VBOX_STRICT
     3843        /* Callers should have verified these at some point. */
     3844        uint32_t uMaxId;
    38423845        ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
     3846        Assert(ASMIsValidStdRange(uMaxId));
     3847        Assert(   ASMIsIntelCpuEx(     uVendorEBX, uVendorECX, uVendorEDX)
     3848               || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX));
     3849#endif
    38433850        ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
    3844         if (   ASMIsValidStdRange(uMaxId)
    3845             && (   ASMIsIntelCpuEx(     uVendorEBX, uVendorECX, uVendorEDX)
    3846                 || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX)))
    3847         {
    3848             bool fSmxVmxHwSupport = false;
    3849             if (   (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
    3850                 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
    3851                 fSmxVmxHwSupport = true;
    3852 
    3853             u64FeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
    3854                         | MSR_IA32_FEATURE_CONTROL_VMXON;
    3855             if (fSmxVmxHwSupport)
    3856                 u64FeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
    3857 
    3858             /* Commit. */
    3859             ASMWrMsr(MSR_IA32_FEATURE_CONTROL, u64FeatMsr);
    3860 
    3861             /* Verify. */
    3862             u64FeatMsr    = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    3863             fMsrLocked     = RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
    3864             fSmxVmxAllowed = fMsrLocked && RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
    3865             fVmxAllowed    = fMsrLocked && RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
    3866             if (   fVmxAllowed
    3867                 && (   !fSmxVmxHwSupport
    3868                     || fSmxVmxAllowed))
    3869                 rc = VINF_SUCCESS;
    3870             else
    3871                 rc = VERR_VMX_MSR_LOCKING_FAILED;
     3851        bool fSmxVmxHwSupport = false;
     3852        if (   (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
     3853            && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
     3854            fSmxVmxHwSupport = true;
     3855
     3856        u64FeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
     3857                    | MSR_IA32_FEATURE_CONTROL_VMXON;
     3858        if (fSmxVmxHwSupport)
     3859            u64FeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
     3860
     3861        /*
     3862         * Commit.
     3863         */
     3864        ASMWrMsr(MSR_IA32_FEATURE_CONTROL, u64FeatMsr);
     3865
     3866        /*
     3867         * Verify.
     3868         */
     3869        u64FeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     3870        /* Workaround for what is really a KVM bug. See @bugref{6208} comment #48. */
     3871        if (fFeaturesECX & X86_CPUID_FEATURE_ECX_HVP)
     3872        {
     3873            uint32_t uEax, uEbx, uEcx, uEdx;
     3874            ASMCpuId(0x40000000, &uDummy, &uVendorEBX, &uVendorECX, &uVendorEDX);
     3875            if (   uEbx == 0x4B4D564B    /* 'KVMK' */
     3876                && uEcx == 0x564B4D56    /* 'VMKV' */
     3877                && uEdx == 0x0000004D)   /* 'M000' */
     3878                fMsrLocked = true;
    38723879        }
    38733880        else
    3874             rc = VERR_VMX_IPE_5;
     3881            fMsrLocked = RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
     3882        fSmxVmxAllowed = fMsrLocked && RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
     3883        fVmxAllowed    = fMsrLocked && RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
     3884        if (   fVmxAllowed
     3885            && (   !fSmxVmxHwSupport
     3886                || fSmxVmxAllowed))
     3887            rc = VINF_SUCCESS;
     3888        else
     3889            rc = VERR_VMX_MSR_LOCKING_FAILED;
    38753890    }
    38763891
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette