VirtualBox

Changeset 76232 in vbox for trunk/src


Ignore:
Timestamp:
Dec 14, 2018 12:54:14 PM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
127501
Message:

VMM/HMR0: Read the VMX MSRs into a separate function.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r76227 r76232  
    304304 * @returns true if subject to it, false if not.
    305305 */
    306 static bool hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum(void)
     306static bool hmR0InitIntelIsSubjectToVmxPreemptTimerErratum(void)
    307307{
    308308    uint32_t u = ASMCpuId_EAX(1);
     
    329329
    330330/**
     331 * Reads all the VMX feature MSRs.
     332 *
     333 * @param   pVmxMsrs    Where to read the VMX MSRs into.
     334 * @remarks The caller is expected to have verified if this is an Intel CPU and that
     335 *          VMX is present (i.e. SUPR0GetVTSupport() must have returned
     336 *          SUPVTCAPS_VT_X).
     337 */
     338static void hmR0InitIntelReadVmxMsrs(PVMXMSRS pVmxMsrs)
     339{
     340    Assert(pVmxMsrs);
     341    RT_ZERO(*pVmxMsrs);
     342
     343    /*
     344     * Note! We assume here that all MSRs are consistent across host CPUs
     345     * and don't bother with preventing CPU migration.
     346     */
     347
     348    pVmxMsrs->u64FeatCtrl  = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     349    pVmxMsrs->u64Basic     = ASMRdMsr(MSR_IA32_VMX_BASIC);
     350    pVmxMsrs->PinCtls.u    = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
     351    pVmxMsrs->ProcCtls.u   = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
     352    pVmxMsrs->ExitCtls.u   = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
     353    pVmxMsrs->EntryCtls.u  = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
     354    pVmxMsrs->u64Misc      = ASMRdMsr(MSR_IA32_VMX_MISC);
     355    pVmxMsrs->u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
     356    pVmxMsrs->u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
     357    pVmxMsrs->u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
     358    pVmxMsrs->u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
     359    pVmxMsrs->u64VmcsEnum  = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
     360
     361    if (RT_BF_GET(pVmxMsrs->u64Basic, VMX_BF_BASIC_TRUE_CTLS))
     362    {
     363        pVmxMsrs->TruePinCtls.u   = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
     364        pVmxMsrs->TrueProcCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
     365        pVmxMsrs->TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
     366        pVmxMsrs->TrueExitCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
     367    }
     368
     369    if (pVmxMsrs->ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
     370    {
     371        pVmxMsrs->ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
     372        if (pVmxMsrs->ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
     373            pVmxMsrs->u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
     374
     375        if (pVmxMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)
     376            pVmxMsrs->u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
     377    }
     378}
     379
     380
     381/**
    331382 * Intel specific initialization code.
    332383 *
     
    365416    if (RT_SUCCESS(g_HmR0.rcInit))
    366417    {
    367         /* Reread in case it was changed by SUPR0GetVmxUsability(). */
    368         g_HmR0.vmx.Msrs.u64FeatCtrl     = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     418        /* Read CR4 and EFER for logging/diagnostic purposes. */
     419        g_HmR0.vmx.u64HostCr4  = ASMGetCR4();
     420        g_HmR0.vmx.u64HostEfer = ASMRdMsr(MSR_K6_EFER);
     421
     422        /* Read all the VMX MSRs for determining which VMX features we can use later. */
     423        hmR0InitIntelReadVmxMsrs(&g_HmR0.vmx.Msrs);
    369424
    370425        /*
    371          * Read all relevant registers and MSRs.
     426         * KVM workaround: Intel SDM section 34.15.5 describes that MSR_IA32_SMM_MONITOR_CTL
     427         * depends on bit 49 of MSR_IA32_VMX_BASIC while table 35-2 says that this MSR is
     428         * available if either VMX or SMX is supported.
    372429         */
    373         g_HmR0.vmx.u64HostCr4           = ASMGetCR4();
    374         g_HmR0.vmx.u64HostEfer          = ASMRdMsr(MSR_K6_EFER);
    375         g_HmR0.vmx.Msrs.u64Basic        = ASMRdMsr(MSR_IA32_VMX_BASIC);
    376         /* KVM workaround: Intel SDM section 34.15.5 describes that MSR_IA32_SMM_MONITOR_CTL
    377          * depends on bit 49 of MSR_IA32_VMX_BASIC while table 35-2 says that this MSR is
    378          * available if either VMX or SMX is supported. */
    379430        if (RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_DUAL_MON))
    380431            g_HmR0.vmx.u64HostSmmMonitorCtl = ASMRdMsr(MSR_IA32_SMM_MONITOR_CTL);
    381         g_HmR0.vmx.Msrs.PinCtls.u       = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
    382         g_HmR0.vmx.Msrs.ProcCtls.u      = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
    383         g_HmR0.vmx.Msrs.ExitCtls.u      = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
    384         g_HmR0.vmx.Msrs.EntryCtls.u     = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
    385         g_HmR0.vmx.Msrs.u64Misc         = ASMRdMsr(MSR_IA32_VMX_MISC);
    386         g_HmR0.vmx.Msrs.u64Cr0Fixed0    = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
    387         g_HmR0.vmx.Msrs.u64Cr0Fixed1    = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
    388         g_HmR0.vmx.Msrs.u64Cr4Fixed0    = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
    389         g_HmR0.vmx.Msrs.u64Cr4Fixed1    = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
    390         g_HmR0.vmx.Msrs.u64VmcsEnum     = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
    391         if (RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
    392         {
    393             g_HmR0.vmx.Msrs.TruePinCtls.u   = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
    394             g_HmR0.vmx.Msrs.TrueProcCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
    395             g_HmR0.vmx.Msrs.TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
    396             g_HmR0.vmx.Msrs.TrueExitCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
    397         }
    398 
    399         /* VPID 16 bits ASID. */
     432
     433        /* Initialize VPID - 16 bits ASID. */
    400434        g_HmR0.uMaxAsid = 0x10000; /* exclusive */
    401435
    402         if (g_HmR0.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    403         {
    404             g_HmR0.vmx.Msrs.ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
    405             if (g_HmR0.vmx.Msrs.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
    406                 g_HmR0.vmx.Msrs.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
    407 
    408             if (g_HmR0.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)
    409                 g_HmR0.vmx.Msrs.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
    410         }
    411 
     436        /*
     437         * If the host OS has not enabled VT-x for us, try enter VMX root mode
     438         * to really verify if VT-x is usable.
     439         */
    412440        if (!g_HmR0.vmx.fUsingSUPR0EnableVTx)
    413441        {
    414             /*
    415              * Enter root mode
    416              */
     442            /* Allocate a temporary VMXON region. */
    417443            RTR0MEMOBJ hScatchMemObj;
    418444            rc = RTR0MemObjAllocCont(&hScatchMemObj, PAGE_SIZE, false /* fExecutable */);
     
    422448                return rc;
    423449            }
    424 
    425450            void      *pvScatchPage      = RTR0MemObjAddress(hScatchMemObj);
    426451            RTHCPHYS   HCPhysScratchPage = RTR0MemObjGetPagePhysAddr(hScatchMemObj, 0);
    427452            ASMMemZeroPage(pvScatchPage);
    428453
    429             /* Set revision dword at the beginning of the structure. */
     454            /* Set revision dword at the beginning of the VMXON structure. */
    430455            *(uint32_t *)pvScatchPage = RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
    431456
    432             /* Make sure we don't get rescheduled to another cpu during this probe. */
     457            /* Make sure we don't get rescheduled to another CPU during this probe. */
    433458            RTCCUINTREG const fEFlags = ASMIntDisableFlags();
    434459
    435             /*
    436              * Check CR4.VMXE.
    437              */
     460            /* Check CR4.VMXE. */
    438461            g_HmR0.vmx.u64HostCr4 = ASMGetCR4();
    439462            if (!(g_HmR0.vmx.u64HostCr4 & X86_CR4_VMXE))
     
    503526            /*
    504527             * Check for the VMX-Preemption Timer and adjust for the "VMX-Preemption
    505              * Timer Does Not Count Down at the Rate Specified" erratum.
     528             * Timer Does Not Count Down at the Rate Specified" CPU erratum.
    506529             */
    507530            if (g_HmR0.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER)
     
    509532                g_HmR0.vmx.fUsePreemptTimer   = true;
    510533                g_HmR0.vmx.cPreemptTimerShift = RT_BF_GET(g_HmR0.vmx.Msrs.u64Misc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
    511                 if (hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum())
     534                if (hmR0InitIntelIsSubjectToVmxPreemptTimerErratum())
    512535                    g_HmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */
    513536            }
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette