VirtualBox

Changeset 76464 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Dec 25, 2018 4:36:48 AM (6 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:9180 Fix MSR initialization issue for exploding VMX features.

Location:
trunk/src/VBox/VMM
Files:
12 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r76397 r76464  
    365365
    366366/**
    367  * Gets a copy of the VMX host MSRs that were read by HM during ring-0
    368  * initialization.
    369  *
    370  * @return VBox status code.
    371  * @param   pVM        The cross context VM structure.
    372  * @param   pVmxMsrs   Where to store the VMXMSRS struct (only valid when
    373  *                     VINF_SUCCESS is returned).
    374  *
    375  * @remarks Caller needs to take care not to call this function too early. Call
    376  *          after HM initialization is fully complete.
    377  */
    378 VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
    379 {
    380     AssertPtrReturn(pVM,      VERR_INVALID_PARAMETER);
    381     AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
    382     if (pVM->hm.s.vmx.fSupported)
    383     {
    384         *pVmxMsrs = pVM->hm.s.vmx.Msrs;
    385         return VINF_SUCCESS;
    386     }
    387     return VERR_VMX_NO_VMX;
    388 }
    389 
    390 
    391 /**
    392  * Gets the specified VMX host MSR that was read by HM during ring-0
    393  * initialization.
    394  *
    395  * @return VBox status code.
    396  * @param   pVM        The cross context VM structure.
    397  * @param   idMsr      The MSR.
    398  * @param   puValue    Where to store the MSR value (only updated when VINF_SUCCESS
    399  *                     is returned).
    400  *
    401  * @remarks Caller needs to take care not to call this function too early. Call
    402  *          after HM initialization is fully complete.
    403  */
    404 VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
    405 {
    406     AssertPtrReturn(pVM,     VERR_INVALID_PARAMETER);
    407     AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
    408 
    409     if (pVM->hm.s.vmx.fSupported)
    410     {
    411         PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
    412         switch (idMsr)
    413         {
    414             case MSR_IA32_FEATURE_CONTROL:         *puValue =  pVmxMsrs->u64FeatCtrl;      break;
    415             case MSR_IA32_VMX_BASIC:               *puValue =  pVmxMsrs->u64Basic;         break;
    416             case MSR_IA32_VMX_PINBASED_CTLS:       *puValue =  pVmxMsrs->PinCtls.u;        break;
    417             case MSR_IA32_VMX_PROCBASED_CTLS:      *puValue =  pVmxMsrs->ProcCtls.u;       break;
    418             case MSR_IA32_VMX_PROCBASED_CTLS2:     *puValue =  pVmxMsrs->ProcCtls2.u;      break;
    419             case MSR_IA32_VMX_EXIT_CTLS:           *puValue =  pVmxMsrs->ExitCtls.u;       break;
    420             case MSR_IA32_VMX_ENTRY_CTLS:          *puValue =  pVmxMsrs->EntryCtls.u;      break;
    421             case MSR_IA32_VMX_TRUE_PINBASED_CTLS:  *puValue =  pVmxMsrs->TruePinCtls.u;    break;
    422             case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue =  pVmxMsrs->TrueProcCtls.u;   break;
    423             case MSR_IA32_VMX_TRUE_ENTRY_CTLS:     *puValue =  pVmxMsrs->TrueEntryCtls.u;  break;
    424             case MSR_IA32_VMX_TRUE_EXIT_CTLS:      *puValue =  pVmxMsrs->TrueExitCtls.u;   break;
    425             case MSR_IA32_VMX_MISC:                *puValue =  pVmxMsrs->u64Misc;          break;
    426             case MSR_IA32_VMX_CR0_FIXED0:          *puValue =  pVmxMsrs->u64Cr0Fixed0;     break;
    427             case MSR_IA32_VMX_CR0_FIXED1:          *puValue =  pVmxMsrs->u64Cr0Fixed1;     break;
    428             case MSR_IA32_VMX_CR4_FIXED0:          *puValue =  pVmxMsrs->u64Cr4Fixed0;     break;
    429             case MSR_IA32_VMX_CR4_FIXED1:          *puValue =  pVmxMsrs->u64Cr4Fixed1;     break;
    430             case MSR_IA32_VMX_VMCS_ENUM:           *puValue =  pVmxMsrs->u64VmcsEnum;      break;
    431             case MSR_IA32_VMX_VMFUNC:              *puValue =  pVmxMsrs->u64VmFunc;        break;
    432             case MSR_IA32_VMX_EPT_VPID_CAP:        *puValue =  pVmxMsrs->u64EptVpidCaps;   break;
    433             default:
    434             {
    435                 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
    436                 return VERR_NOT_FOUND;
    437             }
    438         }
    439         return VINF_SUCCESS;
    440     }
    441     return VERR_VMX_NO_VMX;
    442 }
    443 
    444 
    445 /**
    446367 * Gets the descriptive name of a VMX instruction/VM-exit diagnostic code.
    447368 *
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r76397 r76464  
    2626#include "HMInternal.h"
    2727#include <VBox/vmm/vm.h>
    28 #include <VBox/vmm/hm_vmx.h>
     28#include <VBox/vmm/hm_svm.h>
    2929#include <VBox/vmm/hmvmxinline.h>
    30 #include <VBox/vmm/hm_svm.h>
    3130#include <VBox/err.h>
    3231#include <VBox/log.h>
     
    9392    DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPU pVCpu));
    9493    DECLR0CALLBACKMEMBER(int,  pfnEnableCpu, (PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
    95                                               bool fEnabledByHost, void *pvArg));
     94                                              bool fEnabledByHost, PCSUPHWVIRTMSRS pHwvirtMsrs));
    9695    DECLR0CALLBACKMEMBER(int,  pfnDisableCpu, (PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
    9796    DECLR0CALLBACKMEMBER(int,  pfnInitVM, (PVM pVM));
     
    10099    /** @} */
    101100
    102     /** Maximum ASID allowed. */
    103     uint32_t                        uMaxAsid;
    104 
    105     /** VT-x data. */
     101    /** Hardware-virtualization data. */
    106102    struct
    107103    {
    108         /** Set to by us to indicate VMX is supported by the CPU. */
    109         bool                        fSupported;
    110         /** Whether we're using SUPR0EnableVTx or not. */
    111         bool                        fUsingSUPR0EnableVTx;
    112         /** Whether we're using the preemption timer or not. */
    113         bool                        fUsePreemptTimer;
    114         /** The shift mask employed by the VMX-Preemption timer. */
    115         uint8_t                     cPreemptTimerShift;
    116 
    117         /** Host CR4 value (set by ring-0 VMX init) */
    118         uint64_t                    u64HostCr4;
    119         /** Host EFER value (set by ring-0 VMX init) */
    120         uint64_t                    u64HostEfer;
    121         /** Host SMM monitor control (used for logging/diagnostics) */
    122         uint64_t                    u64HostSmmMonitorCtl;
    123 
    124         /** VMX MSR values. */
    125         VMXMSRS                     Msrs;
    126 
    127         /** Last instruction error. */
    128         uint32_t                    ulLastInstrError;
    129 
    130         /** Set if we've called SUPR0EnableVTx(true) and should disable it during
    131          * module termination. */
    132         bool                        fCalledSUPR0EnableVTx;
    133     } vmx;
    134 
    135     /** AMD-V information. */
    136     struct
    137     {
    138         /* HWCR MSR (for diagnostics) */
    139         uint64_t                    u64MsrHwcr;
    140 
    141         /** SVM revision. */
    142         uint32_t                    u32Rev;
    143 
    144         /** SVM feature bits from cpuid 0x8000000a */
    145         uint32_t                    u32Features;
    146 
    147         /** Set by us to indicate SVM is supported by the CPU. */
    148         bool                        fSupported;
    149     } svm;
     104        union
     105        {
     106            /** VT-x data. */
     107            struct
     108            {
     109                /** Host CR4 value (set by ring-0 VMX init) */
     110                uint64_t                    u64HostCr4;
     111                /** Host EFER value (set by ring-0 VMX init) */
     112                uint64_t                    u64HostEfer;
     113                /** Host SMM monitor control (used for logging/diagnostics) */
     114                uint64_t                    u64HostSmmMonitorCtl;
     115                /** Last instruction error. */
     116                uint32_t                    ulLastInstrError;
     117                /** The shift mask employed by the VMX-Preemption timer. */
     118                uint8_t                     cPreemptTimerShift;
     119                /** Padding. */
     120                uint8_t                     abPadding[3];
     121                /** Whether we're using the preemption timer or not. */
     122                bool                        fUsePreemptTimer;
     123                /** Whether we're using SUPR0EnableVTx or not. */
     124                bool                        fUsingSUPR0EnableVTx;
     125                /** Set if we've called SUPR0EnableVTx(true) and should disable it during
     126                 * module termination. */
     127                bool                        fCalledSUPR0EnableVTx;
     128                /** Set to by us to indicate VMX is supported by the CPU. */
     129                bool                        fSupported;
     130            } vmx;
     131
     132            /** AMD-V data. */
     133            struct
     134            {
     135                /** SVM revision. */
     136                uint32_t                    u32Rev;
     137                /** SVM feature bits from cpuid 0x8000000a */
     138                uint32_t                    u32Features;
     139                /** Padding. */
     140                bool                        afPadding[3];
     141                /** Set by us to indicate SVM is supported by the CPU. */
     142                bool                        fSupported;
     143            } svm;
     144        } u;
     145        /** Maximum allowed ASID/VPID (inclusive). */
     146        uint32_t                    uMaxAsid;
     147        /** MSRs. */
     148        SUPHWVIRTMSRS               Msrs;
     149    } hwvirt;
    150150
    151151    /** Last recorded error code during HM ring-0 init. */
     
    239239
    240240static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
    241                                             bool fEnabledBySystem, void *pvArg)
    242 {
    243     RT_NOREF6(pHostCpu, pVM, pvCpuPage, HCPhysCpuPage, fEnabledBySystem, pvArg);
     241                                            bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs)
     242{
     243    RT_NOREF6(pHostCpu, pVM, pvCpuPage, HCPhysCpuPage, fEnabledBySystem, pHwvirtMsrs);
    244244    return VINF_SUCCESS;
    245245}
     
    329329
    330330/**
    331  * Reads all the VMX feature MSRs.
    332  *
    333  * @param   pVmxMsrs    Where to read the VMX MSRs into.
    334  * @remarks The caller is expected to have verified if this is an Intel CPU and that
    335  *          VMX is present (i.e. SUPR0GetVTSupport() must have returned
    336  *          SUPVTCAPS_VT_X).
    337  */
    338 static void hmR0InitIntelReadVmxMsrs(PVMXMSRS pVmxMsrs)
    339 {
    340     Assert(pVmxMsrs);
    341     RT_ZERO(*pVmxMsrs);
    342 
    343     /*
    344      * Note! We assume here that all MSRs are consistent across host CPUs
    345      * and don't bother with preventing CPU migration.
    346      */
    347 
    348     pVmxMsrs->u64FeatCtrl  = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    349     pVmxMsrs->u64Basic     = ASMRdMsr(MSR_IA32_VMX_BASIC);
    350     pVmxMsrs->PinCtls.u    = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
    351     pVmxMsrs->ProcCtls.u   = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
    352     pVmxMsrs->ExitCtls.u   = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
    353     pVmxMsrs->EntryCtls.u  = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
    354     pVmxMsrs->u64Misc      = ASMRdMsr(MSR_IA32_VMX_MISC);
    355     pVmxMsrs->u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
    356     pVmxMsrs->u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
    357     pVmxMsrs->u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
    358     pVmxMsrs->u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
    359     pVmxMsrs->u64VmcsEnum  = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
    360 
    361     if (RT_BF_GET(pVmxMsrs->u64Basic, VMX_BF_BASIC_TRUE_CTLS))
    362     {
    363         pVmxMsrs->TruePinCtls.u   = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
    364         pVmxMsrs->TrueProcCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
    365         pVmxMsrs->TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
    366         pVmxMsrs->TrueExitCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
    367     }
    368 
    369     if (pVmxMsrs->ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    370     {
    371         pVmxMsrs->ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
    372         if (pVmxMsrs->ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
    373             pVmxMsrs->u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
    374 
    375         if (pVmxMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)
    376             pVmxMsrs->u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
    377     }
    378 }
    379 
    380 
    381 /**
    382331 * Intel specific initialization code.
    383332 *
     
    387336{
    388337    /* Read this MSR now as it may be useful for error reporting when initializing VT-x fails. */
    389     g_HmR0.vmx.Msrs.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     338    g_HmR0.hwvirt.Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    390339
    391340    /*
     
    394343     */
    395344    int rc = g_HmR0.rcInit = SUPR0EnableVTx(true /* fEnable */);
    396     g_HmR0.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;
    397     if (g_HmR0.vmx.fUsingSUPR0EnableVTx)
     345    g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED;
     346    if (g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)
    398347    {
    399348        AssertLogRelMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));
    400349        if (RT_SUCCESS(rc))
    401350        {
    402             g_HmR0.vmx.fSupported = true;
     351            g_HmR0.hwvirt.u.vmx.fSupported = true;
    403352            rc = SUPR0EnableVTx(false /* fEnable */);
    404353            AssertLogRelRC(rc);
     
    417366    {
    418367        /* Read CR4 and EFER for logging/diagnostic purposes. */
    419         g_HmR0.vmx.u64HostCr4  = ASMGetCR4();
    420         g_HmR0.vmx.u64HostEfer = ASMRdMsr(MSR_K6_EFER);
    421 
    422         /* Read all the VMX MSRs for determining which VMX features we can use later. */
    423         hmR0InitIntelReadVmxMsrs(&g_HmR0.vmx.Msrs);
     368        g_HmR0.hwvirt.u.vmx.u64HostCr4  = ASMGetCR4();
     369        g_HmR0.hwvirt.u.vmx.u64HostEfer = ASMRdMsr(MSR_K6_EFER);
     370
     371        /* Get VMX MSRs for determining VMX features we can ultimately use. */
     372        SUPR0GetHwvirtMsrs(&g_HmR0.hwvirt.Msrs, SUPVTCAPS_VT_X, false /* fForce */);
    424373
    425374        /*
    426          * KVM workaround: Intel SDM section 34.15.5 describes that MSR_IA32_SMM_MONITOR_CTL
    427          * depends on bit 49 of MSR_IA32_VMX_BASIC while table 35-2 says that this MSR is
    428          * available if either VMX or SMX is supported.
     375         * Nested KVM workaround: Intel SDM section 34.15.5 describes that
     376         * MSR_IA32_SMM_MONITOR_CTL depends on bit 49 of MSR_IA32_VMX_BASIC while
     377         * table 35-2 says that this MSR is available if either VMX or SMX is supported.
    429378         */
    430         if (RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_DUAL_MON))
    431             g_HmR0.vmx.u64HostSmmMonitorCtl = ASMRdMsr(MSR_IA32_SMM_MONITOR_CTL);
     379        uint64_t const uVmxBasicMsr = g_HmR0.hwvirt.Msrs.u.vmx.u64Basic;
     380        if (RT_BF_GET(uVmxBasicMsr, VMX_BF_BASIC_DUAL_MON))
     381            g_HmR0.hwvirt.u.vmx.u64HostSmmMonitorCtl = ASMRdMsr(MSR_IA32_SMM_MONITOR_CTL);
    432382
    433383        /* Initialize VPID - 16 bits ASID. */
    434         g_HmR0.uMaxAsid = 0x10000; /* exclusive */
     384        g_HmR0.hwvirt.uMaxAsid = 0x10000; /* exclusive */
    435385
    436386        /*
     
    438388         * to really verify if VT-x is usable.
    439389         */
    440         if (!g_HmR0.vmx.fUsingSUPR0EnableVTx)
     390        if (!g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)
    441391        {
    442392            /* Allocate a temporary VMXON region. */
     
    453403
    454404            /* Set revision dword at the beginning of the VMXON structure. */
    455             *(uint32_t *)pvScatchPage = RT_BF_GET(g_HmR0.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
     405            *(uint32_t *)pvScatchPage = RT_BF_GET(uVmxBasicMsr, VMX_BF_BASIC_VMCS_ID);
    456406
    457407            /* Make sure we don't get rescheduled to another CPU during this probe. */
     
    459409
    460410            /* Check CR4.VMXE. */
    461             g_HmR0.vmx.u64HostCr4 = ASMGetCR4();
    462             if (!(g_HmR0.vmx.u64HostCr4 & X86_CR4_VMXE))
     411            g_HmR0.hwvirt.u.vmx.u64HostCr4 = ASMGetCR4();
     412            if (!(g_HmR0.hwvirt.u.vmx.u64HostCr4 & X86_CR4_VMXE))
    463413            {
    464414                /* In theory this bit could be cleared behind our back. Which would cause #UD
    465415                   faults when we try to execute the VMX instructions... */
    466                 ASMSetCR4(g_HmR0.vmx.u64HostCr4 | X86_CR4_VMXE);
     416                ASMSetCR4(g_HmR0.hwvirt.u.vmx.u64HostCr4 | X86_CR4_VMXE);
    467417            }
    468418
     
    475425            if (RT_SUCCESS(rc))
    476426            {
    477                 g_HmR0.vmx.fSupported = true;
     427                g_HmR0.hwvirt.u.vmx.fSupported = true;
    478428                VMXDisable();
    479429            }
     
    492442                 */
    493443                g_HmR0.rcInit = VERR_VMX_IN_VMX_ROOT_MODE;
    494                 Assert(g_HmR0.vmx.fSupported == false);
     444                Assert(g_HmR0.hwvirt.u.vmx.fSupported == false);
    495445            }
    496446
     
    499449             * set before (some software could incorrectly think it is in VMX mode).
    500450             */
    501             ASMSetCR4(g_HmR0.vmx.u64HostCr4);
     451            ASMSetCR4(g_HmR0.hwvirt.u.vmx.u64HostCr4);
    502452            ASMSetFlags(fEFlags);
    503453
     
    505455        }
    506456
    507         if (g_HmR0.vmx.fSupported)
     457        if (g_HmR0.hwvirt.u.vmx.fSupported)
    508458        {
    509459            rc = VMXR0GlobalInit();
     
    528478             * Timer Does Not Count Down at the Rate Specified" CPU erratum.
    529479             */
    530             if (g_HmR0.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER)
     480            uint32_t const fPinCtls = g_HmR0.hwvirt.Msrs.u.vmx.PinCtls.n.allowed1;
     481            if (fPinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
    531482            {
    532                 g_HmR0.vmx.fUsePreemptTimer   = true;
    533                 g_HmR0.vmx.cPreemptTimerShift = RT_BF_GET(g_HmR0.vmx.Msrs.u64Misc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
     483                uint64_t const uVmxMiscMsr = g_HmR0.hwvirt.Msrs.u.vmx.u64Misc;
     484                g_HmR0.hwvirt.u.vmx.fUsePreemptTimer   = true;
     485                g_HmR0.hwvirt.u.vmx.cPreemptTimerShift = RT_BF_GET(uVmxMiscMsr, VMX_BF_MISC_PREEMPT_TIMER_TSC);
    534486                if (hmR0InitIntelIsSubjectToVmxPreemptTimerErratum())
    535                     g_HmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */
     487                    g_HmR0.hwvirt.u.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */
    536488            }
    537489        }
     
    575527    /* Query AMD features. */
    576528    uint32_t u32Dummy;
    577     ASMCpuId(0x8000000a, &g_HmR0.svm.u32Rev, &g_HmR0.uMaxAsid, &u32Dummy, &g_HmR0.svm.u32Features);
     529    ASMCpuId(0x8000000a, &g_HmR0.hwvirt.u.svm.u32Rev, &g_HmR0.hwvirt.uMaxAsid, &u32Dummy, &g_HmR0.hwvirt.u.svm.u32Features);
    578530
    579531    /*
     
    593545    if (RT_SUCCESS(rc))
    594546    {
    595         /* Read the HWCR MSR for diagnostics. */
    596         g_HmR0.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
    597         g_HmR0.svm.fSupported = true;
     547        SUPR0GetHwvirtMsrs(&g_HmR0.hwvirt.Msrs, SUPVTCAPS_AMD_V, false /* fForce */);
     548        g_HmR0.hwvirt.u.svm.fSupported = true;
    598549    }
    599550    else
     
    685636     * when brought offline/online or suspending/resuming.
    686637     */
    687     if (!g_HmR0.vmx.fUsingSUPR0EnableVTx)
     638    if (!g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)
    688639    {
    689640        rc = RTMpNotificationRegister(hmR0MpEventCallback, NULL);
     
    707658{
    708659    int rc;
    709     if (   g_HmR0.vmx.fSupported
    710         && g_HmR0.vmx.fUsingSUPR0EnableVTx)
     660    if (   g_HmR0.hwvirt.u.vmx.fSupported
     661        && g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)
    711662    {
    712663        /*
     
    715666        Assert(g_HmR0.fGlobalInit);
    716667
    717         if (g_HmR0.vmx.fCalledSUPR0EnableVTx)
     668        if (g_HmR0.hwvirt.u.vmx.fCalledSUPR0EnableVTx)
    718669        {
    719670            rc = SUPR0EnableVTx(false /* fEnable */);
    720             g_HmR0.vmx.fCalledSUPR0EnableVTx = false;
     671            g_HmR0.hwvirt.u.vmx.fCalledSUPR0EnableVTx = false;
    721672        }
    722673        else
     
    731682    else
    732683    {
    733         Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);
     684        Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);
    734685
    735686        /* Doesn't really matter if this fails. */
     
    778729     *        should move into their respective modules. */
    779730    /* Finally, call global VT-x/AMD-V termination. */
    780     if (g_HmR0.vmx.fSupported)
     731    if (g_HmR0.hwvirt.u.vmx.fSupported)
    781732        VMXR0GlobalTerm();
    782     else if (g_HmR0.svm.fSupported)
     733    else if (g_HmR0.hwvirt.u.svm.fSupported)
    783734        SVMR0GlobalTerm();
    784735
     
    849800
    850801    int rc;
    851     if (g_HmR0.vmx.fSupported && g_HmR0.vmx.fUsingSUPR0EnableVTx)
    852         rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HmR0.vmx.Msrs);
     802    if (   g_HmR0.hwvirt.u.vmx.fSupported
     803        && g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)
     804        rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HmR0.hwvirt.Msrs);
    853805    else
    854806    {
    855807        AssertLogRelMsgReturn(pHostCpu->hMemObj != NIL_RTR0MEMOBJ, ("hmR0EnableCpu failed idCpu=%u.\n", idCpu), VERR_HM_IPE_1);
    856         if (g_HmR0.vmx.fSupported)
    857             rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, &g_HmR0.vmx.Msrs);
    858         else
    859             rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, NULL /* pvArg */);
     808        rc = g_HmR0.pfnEnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, &g_HmR0.hwvirt.Msrs);
    860809    }
    861810    if (RT_SUCCESS(rc))
    862811        pHostCpu->fConfigured = true;
    863 
    864812    return rc;
    865813}
     
    925873
    926874    int rc;
    927     if (   g_HmR0.vmx.fSupported
    928         && g_HmR0.vmx.fUsingSUPR0EnableVTx)
     875    if (   g_HmR0.hwvirt.u.vmx.fSupported
     876        && g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)
    929877    {
    930878        /*
     
    934882        if (RT_SUCCESS(rc))
    935883        {
    936             g_HmR0.vmx.fCalledSUPR0EnableVTx = true;
     884            g_HmR0.hwvirt.u.vmx.fCalledSUPR0EnableVTx = true;
    937885            /* If the host provides a VT-x init API, then we'll rely on that for global init. */
    938886            g_HmR0.fGlobalInit = pVM->hm.s.fGlobalInit = true;
     
    1029977    PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[idCpu];
    1030978
    1031     Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);
     979    Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);
    1032980    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1033981    Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */
     
    10981046{
    10991047    NOREF(pvData);
    1100     Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);
     1048    Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);
    11011049
    11021050    /*
     
    11391087{
    11401088    NOREF(pvUser);
    1141     Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);
     1089    Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);
    11421090
    11431091#ifdef LOG_ENABLED
     
    11711119            /* Reinit the CPUs from scratch as the suspend state might have
    11721120               messed with the MSRs. (lousy BIOSes as usual) */
    1173             if (g_HmR0.vmx.fSupported)
     1121            if (g_HmR0.hwvirt.u.vmx.fSupported)
    11741122                rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL);
    11751123            else
     
    11981146
    11991147/**
    1200  * Pre-initializes ring-0 HM per-VM structures.
    1201  *
    1202  * This is the first HM ring-0 function to be called when a VM is created. It is
    1203  * called after VT-x/AMD-V has been detected, and initialized and -after- HM's CFGM
    1204  * settings have been queried.
    1205  *
    1206  * This copies relevant, global HM structures into per-VM data and initializes some
    1207  * per-VCPU data.
     1148 * Does ring-0 per-VM HM initialization.
     1149 *
     1150 * This will call the CPU specific init. routine which may initialize and allocate
     1151 * resources for virtual CPUs.
    12081152 *
    12091153 * @returns VBox status code.
    12101154 * @param   pVM         The cross context VM structure.
    12111155 *
    1212  * @remarks This is called during HMR3Init(). Be really careful what we call here as
    1213  *          almost no VM machinery is up at this point (e.g. PGM, CPUM).
    1214  */
    1215 VMMR0_INT_DECL(int) HMR0PreInitVM(PVM pVM)
     1156 * @remarks This is called after HMR3Init(), see vmR3CreateU() and
     1157 *          vmR3InitRing3().
     1158 */
     1159VMMR0_INT_DECL(int) HMR0InitVM(PVM pVM)
    12161160{
    12171161    AssertReturn(pVM, VERR_INVALID_PARAMETER);
     1162
     1163    /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */
     1164    if (ASMAtomicReadBool(&g_HmR0.fSuspended))
     1165        return VERR_HM_SUSPEND_PENDING;
    12181166
    12191167    /*
    12201168     * Copy globals to the VM structure.
    12211169     */
    1222     pVM->hm.s.vmx.fSupported      = g_HmR0.vmx.fSupported;
    1223     pVM->hm.s.svm.fSupported      = g_HmR0.svm.fSupported;
    12241170    Assert(!(pVM->hm.s.vmx.fSupported && pVM->hm.s.svm.fSupported));
    12251171    if (pVM->hm.s.vmx.fSupported)
    12261172    {
    1227         pVM->hm.s.vmx.fUsePreemptTimer     &= g_HmR0.vmx.fUsePreemptTimer;     /* Can be overridden by CFGM. See HMR3Init(). */
    1228         pVM->hm.s.vmx.cPreemptTimerShift    = g_HmR0.vmx.cPreemptTimerShift;
    1229         pVM->hm.s.vmx.u64HostCr4            = g_HmR0.vmx.u64HostCr4;
    1230         pVM->hm.s.vmx.u64HostEfer           = g_HmR0.vmx.u64HostEfer;
    1231         pVM->hm.s.vmx.u64HostSmmMonitorCtl  = g_HmR0.vmx.u64HostSmmMonitorCtl;
    1232         pVM->hm.s.vmx.Msrs                  = g_HmR0.vmx.Msrs;
     1173        pVM->hm.s.vmx.fUsePreemptTimer     &= g_HmR0.hwvirt.u.vmx.fUsePreemptTimer; /* Can be overridden by CFGM see HMR3Init(). */
     1174        pVM->hm.s.vmx.cPreemptTimerShift    = g_HmR0.hwvirt.u.vmx.cPreemptTimerShift;
     1175        pVM->hm.s.vmx.u64HostCr4            = g_HmR0.hwvirt.u.vmx.u64HostCr4;
     1176        pVM->hm.s.vmx.u64HostEfer           = g_HmR0.hwvirt.u.vmx.u64HostEfer;
     1177        pVM->hm.s.vmx.u64HostSmmMonitorCtl  = g_HmR0.hwvirt.u.vmx.u64HostSmmMonitorCtl;
     1178        pVM->hm.s.vmx.Msrs                  = g_HmR0.hwvirt.Msrs.u.vmx;
    12331179    }
    12341180    else if (pVM->hm.s.svm.fSupported)
    12351181    {
    1236         pVM->hm.s.svm.u64MsrHwcr  = g_HmR0.svm.u64MsrHwcr;
    1237         pVM->hm.s.svm.u32Rev      = g_HmR0.svm.u32Rev;
    1238         pVM->hm.s.svm.u32Features = g_HmR0.svm.u32Features;
     1182        pVM->hm.s.svm.u32Rev      = g_HmR0.hwvirt.u.svm.u32Rev;
     1183        pVM->hm.s.svm.u32Features = g_HmR0.hwvirt.u.svm.u32Features;
     1184        pVM->hm.s.svm.u64MsrHwcr  = g_HmR0.hwvirt.Msrs.u.svm.u64MsrHwcr;
    12391185    }
    12401186    pVM->hm.s.rcInit              = g_HmR0.rcInit;
    1241     pVM->hm.s.uMaxAsid            = g_HmR0.uMaxAsid;
     1187    pVM->hm.s.uMaxAsid            = g_HmR0.hwvirt.uMaxAsid;
    12421188
    12431189    /*
     
    12641210        AssertReturn(!pVCpu->hm.s.uCurrentAsid, VERR_HM_IPE_3);
    12651211    }
    1266 
    1267     return VINF_SUCCESS;
    1268 }
    1269 
    1270 
    1271 /**
    1272  * Does ring-0 per-VM HM initialization.
    1273  *
    1274  * This will call the CPU specific init. routine which may initialize and allocate
    1275  * resources for virtual CPUs.
    1276  *
    1277  * @returns VBox status code.
    1278  * @param   pVM         The cross context VM structure.
    1279  *
    1280  * @remarks This is called after HMR3Init(), see vmR3CreateU() and
    1281  *          vmR3InitRing3().
    1282  */
    1283 VMMR0_INT_DECL(int) HMR0InitVM(PVM pVM)
    1284 {
    1285     AssertReturn(pVM, VERR_INVALID_PARAMETER);
    1286 
    1287     /* Make sure we don't touch HM after we've disabled HM in preparation of a suspend. */
    1288     if (ASMAtomicReadBool(&g_HmR0.fSuspended))
    1289         return VERR_HM_SUSPEND_PENDING;
    12901212
    12911213    /*
     
    13611283    if (!g_HmR0.fGlobalInit)
    13621284    {
    1363         Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);
     1285        Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);
    13641286        rc = hmR0EnableCpu(pVM, idCpu);
    13651287        if (RT_FAILURE(rc))
     
    13761298    if (!g_HmR0.fGlobalInit)
    13771299    {
    1378         Assert(!g_HmR0.vmx.fSupported || !g_HmR0.vmx.fUsingSUPR0EnableVTx);
     1300        Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);
    13791301        int rc2 = hmR0DisableCpu(idCpu);
    13801302        AssertRC(rc2);
     
    14091331
    14101332    /* Reload host-state (back from ring-3/migrated CPUs) and shared guest/host bits. */
    1411     if (g_HmR0.vmx.fSupported)
     1333    if (g_HmR0.hwvirt.u.vmx.fSupported)
    14121334        pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE;
    14131335    else
     
    14461368    PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[idCpu];
    14471369    Assert(pHostCpu);
    1448     if (g_HmR0.vmx.fSupported)
     1370    if (g_HmR0.hwvirt.u.vmx.fSupported)
    14491371    {
    14501372        Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
     
    17301652
    17311653    /* No such issues with AMD-V */
    1732     if (!g_HmR0.vmx.fSupported)
     1654    if (!g_HmR0.hwvirt.u.vmx.fSupported)
    17331655        return VINF_SUCCESS;
    17341656
     
    17521674    /* When using SUPR0EnableVTx we must let the host suspend and resume VT-x,
    17531675       regardless of whether we're currently using VT-x or not. */
    1754     if (g_HmR0.vmx.fUsingSUPR0EnableVTx)
     1676    if (g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)
    17551677    {
    17561678        *pfVTxDisabled = SUPR0SuspendVTxOnCpu();
     
    17961718        return;         /* nothing to do */
    17971719
    1798     Assert(g_HmR0.vmx.fSupported);
    1799     if (g_HmR0.vmx.fUsingSUPR0EnableVTx)
     1720    Assert(g_HmR0.hwvirt.u.vmx.fSupported);
     1721    if (g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx)
    18001722        SUPR0ResumeVTxOnCpu(fVTxDisabled);
    18011723    else
     
    18101732                         && pHostCpu->HCPhysMemObj != NIL_RTHCPHYS);
    18111733
    1812         VMXR0EnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, &g_HmR0.vmx.Msrs);
     1734        VMXR0EnableCpu(pHostCpu, pVM, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj, false, &g_HmR0.hwvirt.Msrs);
    18131735    }
    18141736}
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r76402 r76464  
    525525 * @param   HCPhysCpuPage   Physical address of the global CPU page.
    526526 * @param   fEnabledByHost  Whether the host OS has already initialized AMD-V.
    527  * @param   pvArg           Unused on AMD-V.
     527 * @param   pHwvirtMsrs     Pointer to the hardware-virtualization MSRs (currently
     528 *                          unused).
    528529 */
    529530VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
    530                               void *pvArg)
     531                              PCSUPHWVIRTMSRS pHwvirtMsrs)
    531532{
    532533    Assert(!fEnabledByHost);
     
    536537    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    537538
    538     NOREF(pvArg);
    539     NOREF(fEnabledByHost);
     539    RT_NOREF2(fEnabledByHost, pHwvirtMsrs);
    540540
    541541    /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.h

    r73606 r76464  
    3939VMMR0DECL(void)         SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    4040VMMR0DECL(int)          SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage,
    41                                        bool fEnabledBySystem, void *pvArg);
     41                                       bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs);
    4242VMMR0DECL(int)          SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    4343VMMR0DECL(int)          SVMR0InitVM(PVM pVM);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r76397 r76464  
    224224            uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
    225225            Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
     226            NOREF(uXcptTmp); \
    226227            return VINF_SUCCESS; \
    227228        } \
     
    11271128 * @param   fEnabledByHost  Set if SUPR0EnableVTx() or similar was used to
    11281129 *                          enable VT-x on the host.
    1129  * @param   pvMsrs          Opaque pointer to VMXMSRS struct.
     1130 * @param   pHwvirtMsrs     Pointer to the hardware-virtualization MSRs.
    11301131 */
    11311132VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
    1132                               void *pvMsrs)
     1133                              PCSUPHWVIRTMSRS pHwvirtMsrs)
    11331134{
    11341135    Assert(pHostCpu);
    1135     Assert(pvMsrs);
     1136    Assert(pHwvirtMsrs);
    11361137    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    11371138
     
    11491150     * invalidated when flushing by VPID.
    11501151     */
    1151     PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
     1152    PCVMXMSRS pMsrs = &pHwvirtMsrs->u.vmx;
    11521153    if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
    11531154    {
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r72967 r76464  
    3232VMMR0DECL(void)         VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    3333VMMR0DECL(int)          VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys,
    34                                        bool fEnabledBySystem, void *pvMsrs);
     34                                       bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs);
    3535VMMR0DECL(int)          VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    3636VMMR0DECL(int)          VMXR0GlobalInit(void);
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r76290 r76464  
    19301930
    19311931        /*
    1932          * Pre-initialize hardware-assisted mode per-VM data.
    1933          */
    1934         case VMMR0_DO_HM_PRE_INIT:
    1935             rc = HMR0PreInitVM(pVM);
    1936             VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    1937             break;
    1938 
    1939         /*
    19401932         * Switch to RC to execute Hypervisor function.
    19411933         */
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r76310 r76464  
    788788                for (VMCPUID i = 0; i < pVM->cCpus; i++)
    789789                    pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_FFXSR_LEAKY;
    790                 Log(("CPUMR3Init: host CPU has leaky fxsave/fxrstor behaviour\n"));
     790                Log(("CPUM: Host CPU has leaky fxsave/fxrstor behaviour\n"));
    791791            }
    792792        }
     
    902902
    903903/**
    904  * Initializes (or re-initializes) per-VCPU SVM hardware virtualization state.
     904 * Resets per-VCPU SVM hardware virtualization state.
    905905 *
    906906 * @param   pVCpu   The cross context virtual CPU structure.
    907907 */
    908 DECLINLINE(void) cpumR3InitSvmHwVirtState(PVMCPU pVCpu)
     908DECLINLINE(void) cpumR3ResetSvmHwVirtState(PVMCPU pVCpu)
    909909{
    910910    PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
     
    11141114
    11151115/**
    1116  * Initializes (or re-initializes) per-VCPU VMX hardware virtualization state.
     1116 * Resets per-VCPU VMX hardware virtualization state.
    11171117 *
    11181118 * @param   pVCpu   The cross context virtual CPU structure.
    11191119 */
    1120 DECLINLINE(void) cpumR3InitVmxHwVirtState(PVMCPU pVCpu)
     1120DECLINLINE(void) cpumR3ResetVmxHwVirtState(PVMCPU pVCpu)
    11211121{
    11221122    PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
     
    11571157        pHlp->pfnPrintf(pHlp, "  Mnemonic - Description                                  = guest (host)\n");
    11581158        VMXFEATDUMP("VMX - Virtual-Machine Extensions                       ", fVmx);
    1159         if (!pGuestFeatures->fVmx)
    1160             return;
    11611159        /* Basic. */
    11621160        VMXFEATDUMP("InsOutInfo - INS/OUTS instruction info.                ", fVmxInsOutInfo);
     
    12501248        || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
    12511249        return true;
     1250#else
     1251    NOREF(pVM);
    12521252#endif
    12531253    return false;
     
    12561256
    12571257/**
    1258  * Initializes the guest VMX MSRs from guest-CPU features.
    1259  *
    1260  * @param   pVM     The cross context VM structure.
    1261  */
    1262 static void cpumR3InitGuestVmxMsrs(PVM pVM)
    1263 {
    1264     PVMCPU         pVCpu0    = &pVM->aCpus[0];
    1265     PCCPUMFEATURES pFeatures = &pVM->cpum.s.GuestFeatures;
    1266     PVMXMSRS       pVmxMsrs  = &pVCpu0->cpum.s.Guest.hwvirt.vmx.Msrs;
    1267 
    1268     Assert(pFeatures->fVmx);
    1269     RT_ZERO(*pVmxMsrs);
    1270 
    1271     /* Feature control. */
    1272     pVmxMsrs->u64FeatCtrl = MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON;
    1273 
    1274     /* Basic information. */
    1275     {
    1276         uint64_t const u64Basic = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID,         VMX_V_VMCS_REVISION_ID   )
    1277                                 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE,       VMX_V_VMCS_SIZE          )
    1278                                 | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH,  !pFeatures->fLongMode    )
    1279                                 | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON,        0                        )
    1280                                 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE,   VMX_BASIC_MEM_TYPE_WB    )
    1281                                 | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS,   pFeatures->fVmxInsOutInfo)
    1282                                 | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS,       0                        );
    1283         pVmxMsrs->u64Basic = u64Basic;
    1284     }
    1285 
    1286     /* Pin-based VM-execution controls. */
    1287     {
    1288         uint32_t const fFeatures = (pFeatures->fVmxExtIntExit   << VMX_BF_PIN_CTLS_EXT_INT_EXIT_SHIFT )
    1289                                  | (pFeatures->fVmxNmiExit      << VMX_BF_PIN_CTLS_NMI_EXIT_SHIFT     )
    1290                                  | (pFeatures->fVmxVirtNmi      << VMX_BF_PIN_CTLS_VIRT_NMI_SHIFT     )
    1291                                  | (pFeatures->fVmxPreemptTimer << VMX_BF_PIN_CTLS_PREEMPT_TIMER_SHIFT)
    1292                                  | (pFeatures->fVmxPostedInt    << VMX_BF_PIN_CTLS_POSTED_INT_SHIFT   );
    1293         uint32_t const fAllowed0 = VMX_PIN_CTLS_DEFAULT1;
    1294         uint32_t const fAllowed1 = fFeatures | VMX_PIN_CTLS_DEFAULT1;
    1295         AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n",
    1296                                                          fAllowed0, fAllowed1, fFeatures));
    1297         pVmxMsrs->PinCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
    1298     }
    1299 
    1300     /* Processor-based VM-execution controls. */
    1301     {
    1302         uint32_t const fFeatures = (pFeatures->fVmxIntWindowExit     << VMX_BF_PROC_CTLS_INT_WINDOW_EXIT_SHIFT   )
    1303                                  | (pFeatures->fVmxTscOffsetting     << VMX_BF_PROC_CTLS_USE_TSC_OFFSETTING_SHIFT)
    1304                                  | (pFeatures->fVmxHltExit           << VMX_BF_PROC_CTLS_HLT_EXIT_SHIFT          )
    1305                                  | (pFeatures->fVmxInvlpgExit        << VMX_BF_PROC_CTLS_INVLPG_EXIT_SHIFT       )
    1306                                  | (pFeatures->fVmxMwaitExit         << VMX_BF_PROC_CTLS_MWAIT_EXIT_SHIFT        )
    1307                                  | (pFeatures->fVmxRdpmcExit         << VMX_BF_PROC_CTLS_RDPMC_EXIT_SHIFT        )
    1308                                  | (pFeatures->fVmxRdtscExit         << VMX_BF_PROC_CTLS_RDTSC_EXIT_SHIFT        )
    1309                                  | (pFeatures->fVmxCr3LoadExit       << VMX_BF_PROC_CTLS_CR3_LOAD_EXIT_SHIFT     )
    1310                                  | (pFeatures->fVmxCr3StoreExit      << VMX_BF_PROC_CTLS_CR3_STORE_EXIT_SHIFT    )
    1311                                  | (pFeatures->fVmxCr8LoadExit       << VMX_BF_PROC_CTLS_CR8_LOAD_EXIT_SHIFT     )
    1312                                  | (pFeatures->fVmxCr8StoreExit      << VMX_BF_PROC_CTLS_CR8_STORE_EXIT_SHIFT    )
    1313                                  | (pFeatures->fVmxUseTprShadow      << VMX_BF_PROC_CTLS_USE_TPR_SHADOW_SHIFT    )
    1314                                  | (pFeatures->fVmxNmiWindowExit     << VMX_BF_PROC_CTLS_NMI_WINDOW_EXIT_SHIFT   )
    1315                                  | (pFeatures->fVmxMovDRxExit        << VMX_BF_PROC_CTLS_MOV_DR_EXIT_SHIFT       )
    1316                                  | (pFeatures->fVmxUncondIoExit      << VMX_BF_PROC_CTLS_UNCOND_IO_EXIT_SHIFT    )
    1317                                  | (pFeatures->fVmxUseIoBitmaps      << VMX_BF_PROC_CTLS_USE_IO_BITMAPS_SHIFT    )
    1318                                  | (pFeatures->fVmxMonitorTrapFlag   << VMX_BF_PROC_CTLS_MONITOR_TRAP_FLAG_SHIFT )
    1319                                  | (pFeatures->fVmxUseMsrBitmaps     << VMX_BF_PROC_CTLS_USE_MSR_BITMAPS_SHIFT   )
    1320                                  | (pFeatures->fVmxMonitorExit       << VMX_BF_PROC_CTLS_MONITOR_EXIT_SHIFT      )
    1321                                  | (pFeatures->fVmxPauseExit         << VMX_BF_PROC_CTLS_PAUSE_EXIT_SHIFT        )
    1322                                  | (pFeatures->fVmxSecondaryExecCtls << VMX_BF_PROC_CTLS_USE_SECONDARY_CTLS_SHIFT);
    1323         uint32_t const fAllowed0 = VMX_PROC_CTLS_DEFAULT1;
    1324         uint32_t const fAllowed1 = fFeatures | VMX_PROC_CTLS_DEFAULT1;
    1325         AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0,
    1326                                                          fAllowed1, fFeatures));
    1327         pVmxMsrs->ProcCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
    1328     }
    1329 
    1330     /* Secondary processor-based VM-execution controls. */
    1331     if (pFeatures->fVmxSecondaryExecCtls)
    1332     {
    1333         uint32_t const fFeatures = (pFeatures->fVmxVirtApicAccess    << VMX_BF_PROC_CTLS2_VIRT_APIC_ACCESS_SHIFT  )
    1334                                  | (pFeatures->fVmxEpt               << VMX_BF_PROC_CTLS2_EPT_SHIFT               )
    1335                                  | (pFeatures->fVmxDescTableExit     << VMX_BF_PROC_CTLS2_DESC_TABLE_EXIT_SHIFT   )
    1336                                  | (pFeatures->fVmxRdtscp            << VMX_BF_PROC_CTLS2_RDTSCP_SHIFT            )
    1337                                  | (pFeatures->fVmxVirtX2ApicMode    << VMX_BF_PROC_CTLS2_VIRT_X2APIC_MODE_SHIFT  )
    1338                                  | (pFeatures->fVmxVpid              << VMX_BF_PROC_CTLS2_VPID_SHIFT              )
    1339                                  | (pFeatures->fVmxWbinvdExit        << VMX_BF_PROC_CTLS2_WBINVD_EXIT_SHIFT       )
    1340                                  | (pFeatures->fVmxUnrestrictedGuest << VMX_BF_PROC_CTLS2_UNRESTRICTED_GUEST_SHIFT)
    1341                                  | (pFeatures->fVmxApicRegVirt       << VMX_BF_PROC_CTLS2_APIC_REG_VIRT_SHIFT     )
    1342                                  | (pFeatures->fVmxVirtIntDelivery   << VMX_BF_PROC_CTLS2_VIRT_INT_DELIVERY_SHIFT )
    1343                                  | (pFeatures->fVmxPauseLoopExit     << VMX_BF_PROC_CTLS2_PAUSE_LOOP_EXIT_SHIFT   )
    1344                                  | (pFeatures->fVmxRdrandExit        << VMX_BF_PROC_CTLS2_RDRAND_EXIT_SHIFT       )
    1345                                  | (pFeatures->fVmxInvpcid           << VMX_BF_PROC_CTLS2_INVPCID_SHIFT           )
    1346                                  | (pFeatures->fVmxVmFunc            << VMX_BF_PROC_CTLS2_VMFUNC_SHIFT            )
    1347                                  | (pFeatures->fVmxVmcsShadowing     << VMX_BF_PROC_CTLS2_VMCS_SHADOWING_SHIFT    )
    1348                                  | (pFeatures->fVmxRdseedExit        << VMX_BF_PROC_CTLS2_RDSEED_EXIT_SHIFT       )
    1349                                  | (pFeatures->fVmxPml               << VMX_BF_PROC_CTLS2_PML_SHIFT               )
    1350                                  | (pFeatures->fVmxEptXcptVe         << VMX_BF_PROC_CTLS2_EPT_VE_SHIFT            )
    1351                                  | (pFeatures->fVmxXsavesXrstors     << VMX_BF_PROC_CTLS2_XSAVES_XRSTORS_SHIFT    )
    1352                                  | (pFeatures->fVmxUseTscScaling     << VMX_BF_PROC_CTLS2_TSC_SCALING_SHIFT       );
    1353         uint32_t const fAllowed0 = 0;
    1354         uint32_t const fAllowed1 = fFeatures;
    1355         pVmxMsrs->ProcCtls2.u = RT_MAKE_U64(fAllowed0, fAllowed1);
    1356     }
    1357 
    1358     /* VM-exit controls. */
    1359     {
    1360         uint32_t const fFeatures = (pFeatures->fVmxExitSaveDebugCtls << VMX_BF_EXIT_CTLS_SAVE_DEBUG_SHIFT          )
    1361                                  | (pFeatures->fVmxHostAddrSpaceSize << VMX_BF_EXIT_CTLS_HOST_ADDR_SPACE_SIZE_SHIFT)
    1362                                  | (pFeatures->fVmxExitAckExtInt     << VMX_BF_EXIT_CTLS_ACK_EXT_INT_SHIFT         )
    1363                                  | (pFeatures->fVmxExitSavePatMsr    << VMX_BF_EXIT_CTLS_SAVE_PAT_MSR_SHIFT        )
    1364                                  | (pFeatures->fVmxExitLoadPatMsr    << VMX_BF_EXIT_CTLS_LOAD_PAT_MSR_SHIFT        )
    1365                                  | (pFeatures->fVmxExitSaveEferMsr   << VMX_BF_EXIT_CTLS_SAVE_EFER_MSR_SHIFT       )
    1366                                  | (pFeatures->fVmxExitLoadEferMsr   << VMX_BF_EXIT_CTLS_LOAD_EFER_MSR_SHIFT       )
    1367                                  | (pFeatures->fVmxSavePreemptTimer  << VMX_BF_EXIT_CTLS_SAVE_PREEMPT_TIMER_SHIFT  );
    1368         /* Set the default1 class bits. See Intel spec. A.4 "VM-exit Controls". */
    1369         uint32_t const fAllowed0 = VMX_EXIT_CTLS_DEFAULT1;
    1370         uint32_t const fAllowed1 = fFeatures | VMX_EXIT_CTLS_DEFAULT1;
    1371         AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0,
    1372                                                          fAllowed1, fFeatures));
    1373         pVmxMsrs->ExitCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
    1374     }
    1375 
    1376     /* VM-entry controls. */
    1377     {
    1378         uint32_t const fFeatures = (pFeatures->fVmxEntryLoadDebugCtls << VMX_BF_ENTRY_CTLS_LOAD_DEBUG_SHIFT      )
    1379                                  | (pFeatures->fVmxIa32eModeGuest     << VMX_BF_ENTRY_CTLS_IA32E_MODE_GUEST_SHIFT)
    1380                                  | (pFeatures->fVmxEntryLoadEferMsr   << VMX_BF_ENTRY_CTLS_LOAD_EFER_MSR_SHIFT   )
    1381                                  | (pFeatures->fVmxEntryLoadPatMsr    << VMX_BF_ENTRY_CTLS_LOAD_PAT_MSR_SHIFT    );
    1382         uint32_t const fAllowed0 = VMX_ENTRY_CTLS_DEFAULT1;
    1383         uint32_t const fAllowed1 = fFeatures | VMX_ENTRY_CTLS_DEFAULT1;
    1384         AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed0=%#RX32 fFeatures=%#RX32\n", fAllowed0,
    1385                                                          fAllowed1, fFeatures));
    1386         pVmxMsrs->EntryCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
    1387     }
    1388 
    1389     /* Miscellaneous data. */
    1390     {
    1391         uint64_t uHostMsr = 0;
    1392         if (cpumR3IsHwAssistVmxNstGstExecAllowed(pVM))
    1393             HMVmxGetHostMsr(pVM, MSR_IA32_VMX_MISC, &uHostMsr);
    1394         uint8_t const cMaxMsrs       = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_AUTOMSR_COUNT_MAX);
    1395         uint8_t const fActivityState = RT_BF_GET(uHostMsr, VMX_BF_MISC_ACTIVITY_STATES) & VMX_V_GUEST_ACTIVITY_STATE_MASK;
    1396         pVmxMsrs->u64Misc = RT_BF_MAKE(VMX_BF_MISC_PREEMPT_TIMER_TSC,      VMX_V_PREEMPT_TIMER_SHIFT        )
    1397                           | RT_BF_MAKE(VMX_BF_MISC_EXIT_SAVE_EFER_LMA,     pFeatures->fVmxExitSaveEferLma   )
    1398                           | RT_BF_MAKE(VMX_BF_MISC_ACTIVITY_STATES,        fActivityState                   )
    1399                           | RT_BF_MAKE(VMX_BF_MISC_INTEL_PT,               pFeatures->fVmxIntelPt           )
    1400                           | RT_BF_MAKE(VMX_BF_MISC_SMM_READ_SMBASE_MSR,    0                                )
    1401                           | RT_BF_MAKE(VMX_BF_MISC_CR3_TARGET,             VMX_V_CR3_TARGET_COUNT           )
    1402                           | RT_BF_MAKE(VMX_BF_MISC_MAX_MSRS,               cMaxMsrs                         )
    1403                           | RT_BF_MAKE(VMX_BF_MISC_VMXOFF_BLOCK_SMI,       0                                )
    1404                           | RT_BF_MAKE(VMX_BF_MISC_VMWRITE_ALL,            pFeatures->fVmxVmwriteAll        )
    1405                           | RT_BF_MAKE(VMX_BF_MISC_ENTRY_INJECT_SOFT_INT,  pFeatures->fVmxEntryInjectSoftInt)
    1406                           | RT_BF_MAKE(VMX_BF_MISC_MSEG_ID,                VMX_V_MSEG_REV_ID                );
    1407     }
    1408 
    1409     /* CR0 Fixed-0. */
    1410     pVmxMsrs->u64Cr0Fixed0 = pFeatures->fVmxUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX:  VMX_V_CR0_FIXED0;
    1411 
    1412     /* CR0 Fixed-1. */
    1413     {
    1414         uint64_t uHostMsr = 0;
    1415         if (cpumR3IsHwAssistVmxNstGstExecAllowed(pVM))
    1416             HMVmxGetHostMsr(pVM, MSR_IA32_VMX_CR0_FIXED1, &uHostMsr);
    1417         pVmxMsrs->u64Cr0Fixed1 = uHostMsr | VMX_V_CR0_FIXED0;   /* Make sure the CR0 MB1 bits are not clear. */
    1418     }
    1419 
    1420     /* CR4 Fixed-0. */
    1421     pVmxMsrs->u64Cr4Fixed0 = VMX_V_CR4_FIXED0;
    1422 
    1423     /* CR4 Fixed-1. */
    1424     {
    1425         uint64_t uHostMsr = 0;
    1426         if (cpumR3IsHwAssistVmxNstGstExecAllowed(pVM))
    1427             HMVmxGetHostMsr(pVM, MSR_IA32_VMX_CR4_FIXED1, &uHostMsr);
    1428         pVmxMsrs->u64Cr4Fixed1 = uHostMsr | VMX_V_CR4_FIXED0;   /* Make sure the CR4 MB1 bits are not clear. */
    1429     }
    1430 
    1431     /* VMCS Enumeration. */
    1432     pVmxMsrs->u64VmcsEnum = VMX_V_VMCS_MAX_INDEX << VMX_BF_VMCS_ENUM_HIGHEST_IDX_SHIFT;
    1433 
    1434     /* VM Functions. */
    1435     if (pFeatures->fVmxVmFunc)
    1436         pVmxMsrs->u64VmFunc = RT_BF_MAKE(VMX_BF_VMFUNC_EPTP_SWITCHING, 1);
     1258 * Initializes the VMX guest MSRs from guest CPU features based on the host MSRs.
     1259 *
     1260 * @param   pVM             The cross context VM structure.
     1261 * @param   pHostVmxMsrs    The host VMX MSRs. Pass NULL when fully emulating VMX
     1262 *                          and no hardware-assisted nested-guest execution is
     1263 *                          possible for this VM.
     1264 * @param   pGuestFeatures  The guest features to use (only VMX features are
     1265 *                          accessed).
     1266 * @param   pGuestVmxMsrs   Where to store the initialized guest VMX MSRs.
     1267 *
     1268 * @remarks This function ASSUMES the VMX guest-features are already exploded!
     1269 */
     1270static void cpumR3InitVmxGuestMsrs(PVM pVM, PCVMXMSRS pHostVmxMsrs, PCCPUMFEATURES pGuestFeatures, PVMXMSRS pGuestVmxMsrs)
     1271{
     1272    Assert(!cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) || pHostVmxMsrs);
     1273    Assert(pGuestFeatures->fVmx);
    14371274
    14381275    /*
     
    14451282     */
    14461283
    1447     /*
    1448      * Copy the MSRs values initialized in VCPU 0 to all other VCPUs.
    1449      */
    1450     for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
    1451     {
    1452         PVMCPU pVCpu = &pVM->aCpus[idCpu];
    1453         Assert(pVCpu);
    1454         memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs));
    1455     }
    1456 }
    1457 
    1458 
    1459 /**
    1460  * Explode VMX features from the provided MSRs.
    1461  *
    1462  * @param   pVmxMsrs        Pointer to the VMX MSRs.
    1463  * @param   pFeatures       Pointer to the features struct. to populate.
    1464  */
    1465 static void cpumR3ExplodeVmxFeatures(PCVMXMSRS pVmxMsrs, PCPUMFEATURES pFeatures)
    1466 {
    1467     Assert(pVmxMsrs);
    1468     Assert(pFeatures);
    1469     Assert(pFeatures->fVmx);
     1284    /* Feature control. */
     1285    pGuestVmxMsrs->u64FeatCtrl = MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON;
    14701286
    14711287    /* Basic information. */
    14721288    {
    1473         uint64_t const u64Basic = pVmxMsrs->u64Basic;
    1474         pFeatures->fVmxInsOutInfo            = RT_BF_GET(u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
     1289        uint64_t const u64Basic = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID,         VMX_V_VMCS_REVISION_ID        )
     1290                                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE,       VMX_V_VMCS_SIZE               )
     1291                                | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH,  !pGuestFeatures->fLongMode    )
     1292                                | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON,        0                             )
     1293                                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE,   VMX_BASIC_MEM_TYPE_WB         )
     1294                                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS,   pGuestFeatures->fVmxInsOutInfo)
     1295                                | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS,       0                             );
     1296        pGuestVmxMsrs->u64Basic = u64Basic;
    14751297    }
    14761298
    14771299    /* Pin-based VM-execution controls. */
    14781300    {
    1479         uint32_t const fPinCtls = pVmxMsrs->PinCtls.n.allowed1;
    1480         pFeatures->fVmxExtIntExit            = RT_BOOL(fPinCtls & VMX_PIN_CTLS_EXT_INT_EXIT);
    1481         pFeatures->fVmxNmiExit               = RT_BOOL(fPinCtls & VMX_PIN_CTLS_NMI_EXIT);
    1482         pFeatures->fVmxVirtNmi               = RT_BOOL(fPinCtls & VMX_PIN_CTLS_VIRT_NMI);
    1483         pFeatures->fVmxPreemptTimer          = RT_BOOL(fPinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
    1484         pFeatures->fVmxPostedInt             = RT_BOOL(fPinCtls & VMX_PIN_CTLS_POSTED_INT);
     1301        uint32_t const fFeatures = (pGuestFeatures->fVmxExtIntExit   << VMX_BF_PIN_CTLS_EXT_INT_EXIT_SHIFT )
     1302                                 | (pGuestFeatures->fVmxNmiExit      << VMX_BF_PIN_CTLS_NMI_EXIT_SHIFT     )
     1303                                 | (pGuestFeatures->fVmxVirtNmi      << VMX_BF_PIN_CTLS_VIRT_NMI_SHIFT     )
     1304                                 | (pGuestFeatures->fVmxPreemptTimer << VMX_BF_PIN_CTLS_PREEMPT_TIMER_SHIFT)
     1305                                 | (pGuestFeatures->fVmxPostedInt    << VMX_BF_PIN_CTLS_POSTED_INT_SHIFT   );
     1306        uint32_t const fAllowed0 = VMX_PIN_CTLS_DEFAULT1;
     1307        uint32_t const fAllowed1 = fFeatures | VMX_PIN_CTLS_DEFAULT1;
     1308        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n",
     1309                                                         fAllowed0, fAllowed1, fFeatures));
     1310        pGuestVmxMsrs->PinCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
    14851311    }
    14861312
    14871313    /* Processor-based VM-execution controls. */
    14881314    {
    1489         uint32_t const fProcCtls = pVmxMsrs->ProcCtls.n.allowed1;
    1490         pFeatures->fVmxIntWindowExit         = RT_BOOL(fProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT);
    1491         pFeatures->fVmxTscOffsetting         = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING);
    1492         pFeatures->fVmxHltExit               = RT_BOOL(fProcCtls & VMX_PROC_CTLS_HLT_EXIT);
    1493         pFeatures->fVmxInvlpgExit            = RT_BOOL(fProcCtls & VMX_PROC_CTLS_INVLPG_EXIT);
    1494         pFeatures->fVmxMwaitExit             = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MWAIT_EXIT);
    1495         pFeatures->fVmxRdpmcExit             = RT_BOOL(fProcCtls & VMX_PROC_CTLS_RDPMC_EXIT);
    1496         pFeatures->fVmxRdtscExit             = RT_BOOL(fProcCtls & VMX_PROC_CTLS_RDTSC_EXIT);
    1497         pFeatures->fVmxCr3LoadExit           = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT);
    1498         pFeatures->fVmxCr3StoreExit          = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT);
    1499         pFeatures->fVmxCr8LoadExit           = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT);
    1500         pFeatures->fVmxCr8StoreExit          = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT);
    1501         pFeatures->fVmxUseTprShadow          = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
    1502         pFeatures->fVmxNmiWindowExit         = RT_BOOL(fProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT);
    1503         pFeatures->fVmxMovDRxExit            = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
    1504         pFeatures->fVmxUncondIoExit          = RT_BOOL(fProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT);
    1505         pFeatures->fVmxUseIoBitmaps          = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS);
    1506         pFeatures->fVmxMonitorTrapFlag       = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
    1507         pFeatures->fVmxUseMsrBitmaps         = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
    1508         pFeatures->fVmxMonitorExit           = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MONITOR_EXIT);
    1509         pFeatures->fVmxPauseExit             = RT_BOOL(fProcCtls & VMX_PROC_CTLS_PAUSE_EXIT);
    1510         pFeatures->fVmxSecondaryExecCtls     = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
     1315        uint32_t const fFeatures = (pGuestFeatures->fVmxIntWindowExit     << VMX_BF_PROC_CTLS_INT_WINDOW_EXIT_SHIFT   )
     1316                                 | (pGuestFeatures->fVmxTscOffsetting     << VMX_BF_PROC_CTLS_USE_TSC_OFFSETTING_SHIFT)
     1317                                 | (pGuestFeatures->fVmxHltExit           << VMX_BF_PROC_CTLS_HLT_EXIT_SHIFT          )
     1318                                 | (pGuestFeatures->fVmxInvlpgExit        << VMX_BF_PROC_CTLS_INVLPG_EXIT_SHIFT       )
     1319                                 | (pGuestFeatures->fVmxMwaitExit         << VMX_BF_PROC_CTLS_MWAIT_EXIT_SHIFT        )
     1320                                 | (pGuestFeatures->fVmxRdpmcExit         << VMX_BF_PROC_CTLS_RDPMC_EXIT_SHIFT        )
     1321                                 | (pGuestFeatures->fVmxRdtscExit         << VMX_BF_PROC_CTLS_RDTSC_EXIT_SHIFT        )
     1322                                 | (pGuestFeatures->fVmxCr3LoadExit       << VMX_BF_PROC_CTLS_CR3_LOAD_EXIT_SHIFT     )
     1323                                 | (pGuestFeatures->fVmxCr3StoreExit      << VMX_BF_PROC_CTLS_CR3_STORE_EXIT_SHIFT    )
     1324                                 | (pGuestFeatures->fVmxCr8LoadExit       << VMX_BF_PROC_CTLS_CR8_LOAD_EXIT_SHIFT     )
     1325                                 | (pGuestFeatures->fVmxCr8StoreExit      << VMX_BF_PROC_CTLS_CR8_STORE_EXIT_SHIFT    )
     1326                                 | (pGuestFeatures->fVmxUseTprShadow      << VMX_BF_PROC_CTLS_USE_TPR_SHADOW_SHIFT    )
     1327                                 | (pGuestFeatures->fVmxNmiWindowExit     << VMX_BF_PROC_CTLS_NMI_WINDOW_EXIT_SHIFT   )
     1328                                 | (pGuestFeatures->fVmxMovDRxExit        << VMX_BF_PROC_CTLS_MOV_DR_EXIT_SHIFT       )
     1329                                 | (pGuestFeatures->fVmxUncondIoExit      << VMX_BF_PROC_CTLS_UNCOND_IO_EXIT_SHIFT    )
     1330                                 | (pGuestFeatures->fVmxUseIoBitmaps      << VMX_BF_PROC_CTLS_USE_IO_BITMAPS_SHIFT    )
     1331                                 | (pGuestFeatures->fVmxMonitorTrapFlag   << VMX_BF_PROC_CTLS_MONITOR_TRAP_FLAG_SHIFT )
     1332                                 | (pGuestFeatures->fVmxUseMsrBitmaps     << VMX_BF_PROC_CTLS_USE_MSR_BITMAPS_SHIFT   )
     1333                                 | (pGuestFeatures->fVmxMonitorExit       << VMX_BF_PROC_CTLS_MONITOR_EXIT_SHIFT      )
     1334                                 | (pGuestFeatures->fVmxPauseExit         << VMX_BF_PROC_CTLS_PAUSE_EXIT_SHIFT        )
     1335                                 | (pGuestFeatures->fVmxSecondaryExecCtls << VMX_BF_PROC_CTLS_USE_SECONDARY_CTLS_SHIFT);
     1336        uint32_t const fAllowed0 = VMX_PROC_CTLS_DEFAULT1;
     1337        uint32_t const fAllowed1 = fFeatures | VMX_PROC_CTLS_DEFAULT1;
     1338        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0,
     1339                                                         fAllowed1, fFeatures));
     1340        pGuestVmxMsrs->ProcCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
    15111341    }
    15121342
    15131343    /* Secondary processor-based VM-execution controls. */
    1514     {
    1515         uint32_t const fProcCtls2 = pFeatures->fVmxSecondaryExecCtls ? pVmxMsrs->ProcCtls2.n.allowed1 : 0;
    1516         pFeatures->fVmxVirtApicAccess        = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
    1517         pFeatures->fVmxEpt                   = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_EPT);
    1518         pFeatures->fVmxDescTableExit         = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT);
    1519         pFeatures->fVmxRdtscp                = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDTSCP);
    1520         pFeatures->fVmxVirtX2ApicMode        = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
    1521         pFeatures->fVmxVpid                  = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VPID);
    1522         pFeatures->fVmxWbinvdExit            = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_WBINVD_EXIT);
    1523         pFeatures->fVmxUnrestrictedGuest     = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
    1524         pFeatures->fVmxApicRegVirt           = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT);
    1525         pFeatures->fVmxVirtIntDelivery       = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
    1526         pFeatures->fVmxPauseLoopExit         = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
    1527         pFeatures->fVmxRdrandExit            = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT);
    1528         pFeatures->fVmxInvpcid               = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_INVPCID);
    1529         pFeatures->fVmxVmFunc                = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VMFUNC);
    1530         pFeatures->fVmxVmcsShadowing         = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING);
    1531         pFeatures->fVmxRdseedExit            = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDSEED_EXIT);
    1532         pFeatures->fVmxPml                   = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PML);
    1533         pFeatures->fVmxEptXcptVe             = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_EPT_VE);
    1534         pFeatures->fVmxXsavesXrstors         = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_XSAVES_XRSTORS);
    1535         pFeatures->fVmxUseTscScaling         = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_TSC_SCALING);
     1344    if (pGuestFeatures->fVmxSecondaryExecCtls)
     1345    {
     1346        uint32_t const fFeatures = (pGuestFeatures->fVmxVirtApicAccess    << VMX_BF_PROC_CTLS2_VIRT_APIC_ACCESS_SHIFT  )
     1347                                 | (pGuestFeatures->fVmxEpt               << VMX_BF_PROC_CTLS2_EPT_SHIFT               )
     1348                                 | (pGuestFeatures->fVmxDescTableExit     << VMX_BF_PROC_CTLS2_DESC_TABLE_EXIT_SHIFT   )
     1349                                 | (pGuestFeatures->fVmxRdtscp            << VMX_BF_PROC_CTLS2_RDTSCP_SHIFT            )
     1350                                 | (pGuestFeatures->fVmxVirtX2ApicMode    << VMX_BF_PROC_CTLS2_VIRT_X2APIC_MODE_SHIFT  )
     1351                                 | (pGuestFeatures->fVmxVpid              << VMX_BF_PROC_CTLS2_VPID_SHIFT              )
     1352                                 | (pGuestFeatures->fVmxWbinvdExit        << VMX_BF_PROC_CTLS2_WBINVD_EXIT_SHIFT       )
     1353                                 | (pGuestFeatures->fVmxUnrestrictedGuest << VMX_BF_PROC_CTLS2_UNRESTRICTED_GUEST_SHIFT)
     1354                                 | (pGuestFeatures->fVmxApicRegVirt       << VMX_BF_PROC_CTLS2_APIC_REG_VIRT_SHIFT     )
     1355                                 | (pGuestFeatures->fVmxVirtIntDelivery   << VMX_BF_PROC_CTLS2_VIRT_INT_DELIVERY_SHIFT )
     1356                                 | (pGuestFeatures->fVmxPauseLoopExit     << VMX_BF_PROC_CTLS2_PAUSE_LOOP_EXIT_SHIFT   )
     1357                                 | (pGuestFeatures->fVmxRdrandExit        << VMX_BF_PROC_CTLS2_RDRAND_EXIT_SHIFT       )
     1358                                 | (pGuestFeatures->fVmxInvpcid           << VMX_BF_PROC_CTLS2_INVPCID_SHIFT           )
     1359                                 | (pGuestFeatures->fVmxVmFunc            << VMX_BF_PROC_CTLS2_VMFUNC_SHIFT            )
     1360                                 | (pGuestFeatures->fVmxVmcsShadowing     << VMX_BF_PROC_CTLS2_VMCS_SHADOWING_SHIFT    )
     1361                                 | (pGuestFeatures->fVmxRdseedExit        << VMX_BF_PROC_CTLS2_RDSEED_EXIT_SHIFT       )
     1362                                 | (pGuestFeatures->fVmxPml               << VMX_BF_PROC_CTLS2_PML_SHIFT               )
     1363                                 | (pGuestFeatures->fVmxEptXcptVe         << VMX_BF_PROC_CTLS2_EPT_VE_SHIFT            )
     1364                                 | (pGuestFeatures->fVmxXsavesXrstors     << VMX_BF_PROC_CTLS2_XSAVES_XRSTORS_SHIFT    )
     1365                                 | (pGuestFeatures->fVmxUseTscScaling     << VMX_BF_PROC_CTLS2_TSC_SCALING_SHIFT       );
     1366        uint32_t const fAllowed0 = 0;
     1367        uint32_t const fAllowed1 = fFeatures;
     1368        pGuestVmxMsrs->ProcCtls2.u = RT_MAKE_U64(fAllowed0, fAllowed1);
    15361369    }
    15371370
    15381371    /* VM-exit controls. */
    15391372    {
    1540         uint32_t const fExitCtls = pVmxMsrs->ExitCtls.n.allowed1;
    1541         pFeatures->fVmxExitSaveDebugCtls     = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG);
    1542         pFeatures->fVmxHostAddrSpaceSize     = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    1543         pFeatures->fVmxExitAckExtInt         = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT);
    1544         pFeatures->fVmxExitSavePatMsr        = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR);
    1545         pFeatures->fVmxExitLoadPatMsr        = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR);
    1546         pFeatures->fVmxExitSaveEferMsr       = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR);
    1547         pFeatures->fVmxExitLoadEferMsr       = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR);
    1548         pFeatures->fVmxSavePreemptTimer      = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
     1373        uint32_t const fFeatures = (pGuestFeatures->fVmxExitSaveDebugCtls << VMX_BF_EXIT_CTLS_SAVE_DEBUG_SHIFT          )
     1374                                 | (pGuestFeatures->fVmxHostAddrSpaceSize << VMX_BF_EXIT_CTLS_HOST_ADDR_SPACE_SIZE_SHIFT)
     1375                                 | (pGuestFeatures->fVmxExitAckExtInt     << VMX_BF_EXIT_CTLS_ACK_EXT_INT_SHIFT         )
     1376                                 | (pGuestFeatures->fVmxExitSavePatMsr    << VMX_BF_EXIT_CTLS_SAVE_PAT_MSR_SHIFT        )
     1377                                 | (pGuestFeatures->fVmxExitLoadPatMsr    << VMX_BF_EXIT_CTLS_LOAD_PAT_MSR_SHIFT        )
     1378                                 | (pGuestFeatures->fVmxExitSaveEferMsr   << VMX_BF_EXIT_CTLS_SAVE_EFER_MSR_SHIFT       )
     1379                                 | (pGuestFeatures->fVmxExitLoadEferMsr   << VMX_BF_EXIT_CTLS_LOAD_EFER_MSR_SHIFT       )
     1380                                 | (pGuestFeatures->fVmxSavePreemptTimer  << VMX_BF_EXIT_CTLS_SAVE_PREEMPT_TIMER_SHIFT  );
     1381        /* Set the default1 class bits. See Intel spec. A.4 "VM-exit Controls". */
     1382        uint32_t const fAllowed0 = VMX_EXIT_CTLS_DEFAULT1;
     1383        uint32_t const fAllowed1 = fFeatures | VMX_EXIT_CTLS_DEFAULT1;
     1384        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed1=%#RX32 fFeatures=%#RX32\n", fAllowed0,
     1385                                                         fAllowed1, fFeatures));
     1386        pGuestVmxMsrs->ExitCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
    15491387    }
    15501388
    15511389    /* VM-entry controls. */
    15521390    {
    1553         uint32_t const fEntryCtls = pVmxMsrs->EntryCtls.n.allowed1;
    1554         pFeatures->fVmxEntryLoadDebugCtls    = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG);
    1555         pFeatures->fVmxIa32eModeGuest        = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
    1556         pFeatures->fVmxEntryLoadEferMsr      = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR);
    1557         pFeatures->fVmxEntryLoadPatMsr       = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR);
     1391        uint32_t const fFeatures = (pGuestFeatures->fVmxEntryLoadDebugCtls << VMX_BF_ENTRY_CTLS_LOAD_DEBUG_SHIFT      )
     1392                                 | (pGuestFeatures->fVmxIa32eModeGuest     << VMX_BF_ENTRY_CTLS_IA32E_MODE_GUEST_SHIFT)
     1393                                 | (pGuestFeatures->fVmxEntryLoadEferMsr   << VMX_BF_ENTRY_CTLS_LOAD_EFER_MSR_SHIFT   )
     1394                                 | (pGuestFeatures->fVmxEntryLoadPatMsr    << VMX_BF_ENTRY_CTLS_LOAD_PAT_MSR_SHIFT    );
     1395        uint32_t const fAllowed0 = VMX_ENTRY_CTLS_DEFAULT1;
     1396        uint32_t const fAllowed1 = fFeatures | VMX_ENTRY_CTLS_DEFAULT1;
     1397        AssertMsg((fAllowed0 & fAllowed1) == fAllowed0, ("fAllowed0=%#RX32 fAllowed0=%#RX32 fFeatures=%#RX32\n", fAllowed0,
     1398                                                         fAllowed1, fFeatures));
     1399        pGuestVmxMsrs->EntryCtls.u = RT_MAKE_U64(fAllowed0, fAllowed1);
    15581400    }
    15591401
    15601402    /* Miscellaneous data. */
    15611403    {
    1562         uint32_t const fMiscData = pVmxMsrs->u64Misc;
    1563         pFeatures->fVmxExitSaveEferLma       = RT_BOOL(fMiscData & VMX_MISC_EXIT_SAVE_EFER_LMA);
    1564         pFeatures->fVmxIntelPt               = RT_BOOL(fMiscData & VMX_MISC_INTEL_PT);
    1565         pFeatures->fVmxVmwriteAll            = RT_BOOL(fMiscData & VMX_MISC_VMWRITE_ALL);
    1566         pFeatures->fVmxEntryInjectSoftInt    = RT_BOOL(fMiscData & VMX_MISC_ENTRY_INJECT_SOFT_INT);
    1567     }
     1404        uint64_t const uHostMsr = cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) ? pHostVmxMsrs->u64Misc : 0;
     1405
     1406        uint8_t const  cMaxMsrs       = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_AUTOMSR_COUNT_MAX);
     1407        uint8_t const  fActivityState = RT_BF_GET(uHostMsr, VMX_BF_MISC_ACTIVITY_STATES) & VMX_V_GUEST_ACTIVITY_STATE_MASK;
     1408        pGuestVmxMsrs->u64Misc = RT_BF_MAKE(VMX_BF_MISC_PREEMPT_TIMER_TSC,      VMX_V_PREEMPT_TIMER_SHIFT             )
     1409                               | RT_BF_MAKE(VMX_BF_MISC_EXIT_SAVE_EFER_LMA,     pGuestFeatures->fVmxExitSaveEferLma   )
     1410                               | RT_BF_MAKE(VMX_BF_MISC_ACTIVITY_STATES,        fActivityState                        )
     1411                               | RT_BF_MAKE(VMX_BF_MISC_INTEL_PT,               pGuestFeatures->fVmxIntelPt           )
     1412                               | RT_BF_MAKE(VMX_BF_MISC_SMM_READ_SMBASE_MSR,    0                                     )
     1413                               | RT_BF_MAKE(VMX_BF_MISC_CR3_TARGET,             VMX_V_CR3_TARGET_COUNT                )
     1414                               | RT_BF_MAKE(VMX_BF_MISC_MAX_MSRS,               cMaxMsrs                              )
     1415                               | RT_BF_MAKE(VMX_BF_MISC_VMXOFF_BLOCK_SMI,       0                                     )
     1416                               | RT_BF_MAKE(VMX_BF_MISC_VMWRITE_ALL,            pGuestFeatures->fVmxVmwriteAll        )
     1417                               | RT_BF_MAKE(VMX_BF_MISC_ENTRY_INJECT_SOFT_INT,  pGuestFeatures->fVmxEntryInjectSoftInt)
     1418                               | RT_BF_MAKE(VMX_BF_MISC_MSEG_ID,                VMX_V_MSEG_REV_ID                     );
     1419    }
     1420
     1421    /* CR0 Fixed-0. */
     1422    pGuestVmxMsrs->u64Cr0Fixed0 = pGuestFeatures->fVmxUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX:  VMX_V_CR0_FIXED0;
     1423
     1424    /* CR0 Fixed-1. */
     1425    {
     1426        uint64_t const uHostMsr = cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) ? pHostVmxMsrs->u64Cr0Fixed1 : 0;
     1427        pGuestVmxMsrs->u64Cr0Fixed1 = uHostMsr | VMX_V_CR0_FIXED0;   /* Make sure the CR0 MB1 bits are not clear. */
     1428    }
     1429
     1430    /* CR4 Fixed-0. */
     1431    pGuestVmxMsrs->u64Cr4Fixed0 = VMX_V_CR4_FIXED0;
     1432
     1433    /* CR4 Fixed-1. */
     1434    {
     1435        uint64_t const uHostMsr = cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) ? pHostVmxMsrs->u64Cr4Fixed1 : 0;
     1436        pGuestVmxMsrs->u64Cr4Fixed1 = uHostMsr | VMX_V_CR4_FIXED0;   /* Make sure the CR4 MB1 bits are not clear. */
     1437    }
     1438
     1439    /* VMCS Enumeration. */
     1440    pGuestVmxMsrs->u64VmcsEnum = VMX_V_VMCS_MAX_INDEX << VMX_BF_VMCS_ENUM_HIGHEST_IDX_SHIFT;
     1441
     1442    /* VM Functions. */
     1443    if (pGuestFeatures->fVmxVmFunc)
     1444        pGuestVmxMsrs->u64VmFunc = RT_BF_MAKE(VMX_BF_VMFUNC_EPTP_SWITCHING, 1);
    15681445}
    15691446
     
    16591536#endif
    16601537
    1661 /**
    1662  * Initializes VMX host and guest features.
    1663  *
    1664  * @param   pVM     The cross context VM structure.
    1665  *
    1666  * @remarks This must be called only after HM has fully initialized since it calls
    1667  *          into HM to retrieve VMX and related MSRs.
    1668  */
    1669 static void cpumR3InitVmxCpuFeatures(PVM pVM)
    1670 {
    1671     /*
    1672      * Init. host features.
    1673      */
    1674     PCPUMFEATURES pHostFeat = &pVM->cpum.s.HostFeatures;
    1675     VMXMSRS VmxMsrs;
    1676     if (cpumR3IsHwAssistVmxNstGstExecAllowed(pVM))
    1677     {
    1678         /** @todo NSTVMX: When NEM support for nested-VMX is there, we'll need to fetch
    1679          *        the MSRs from NEM or do the support driver IOCTL route, see patch in
    1680          *        @bugref{9180}. */
    1681         if (HMIsEnabled(pVM))
    1682         {
    1683             int rc = HMVmxGetHostMsrs(pVM, &VmxMsrs);
    1684             if (RT_SUCCESS(rc))
    1685                 cpumR3ExplodeVmxFeatures(&VmxMsrs, pHostFeat);
    1686         }
    1687         else
    1688             AssertMsgFailed(("NEM support for nested-VMX is not implemented yet\n"));
    1689     }
     1538
     1539/**
     1540 * Initializes VMX guest features and MSRs.
     1541 *
     1542 * @param   pVM             The cross context VM structure.
     1543 * @param   pHostVmxMsrs    The host VMX MSRs. Pass NULL when fully emulating VMX
     1544 *                          and no hardware-assisted nested-guest execution is
     1545 *                          possible for this VM.
     1546 * @param   pGuestVmxMsrs   Where to store the initialized guest VMX MSRs.
     1547 */
     1548void cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCVMXMSRS pHostVmxMsrs, PVMXMSRS pGuestVmxMsrs)
     1549{
     1550    Assert(pVM);
     1551    Assert(pGuestVmxMsrs);
    16901552
    16911553    /*
    16921554     * Initialize the set of VMX features we emulate.
    1693      * Note! Some bits might be reported as 1 always if they fall under the default1 class bits
    1694      *       (e.g. fVmxEntryLoadDebugCtls), see @bugref{9180#c5}.
     1555     *
     1556     * Note! Some bits might be reported as 1 always if they fall under the
     1557     * default1 class bits (e.g. fVmxEntryLoadDebugCtls), see @bugref{9180#c5}.
    16951558     */
    16961559    CPUMFEATURES EmuFeat;
     
    17671630     * by the hardware, hence we merge our emulated features with the host features below.
    17681631     */
    1769     PCCPUMFEATURES pBaseFeat    = cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) ? pHostFeat : &EmuFeat;
    1770     PCPUMFEATURES  pGuestFeat   = &pVM->cpum.s.GuestFeatures;
    1771     pGuestFeat->fVmx                      = (pBaseFeat->fVmx                      & EmuFeat.fVmx                     );
     1632    PCCPUMFEATURES pBaseFeat  = cpumR3IsHwAssistVmxNstGstExecAllowed(pVM) ? &pVM->cpum.s.HostFeatures : &EmuFeat;
     1633    PCPUMFEATURES  pGuestFeat = &pVM->cpum.s.GuestFeatures;
     1634    Assert(pBaseFeat->fVmx);
    17721635    pGuestFeat->fVmxInsOutInfo            = (pBaseFeat->fVmxInsOutInfo            & EmuFeat.fVmxInsOutInfo           );
    17731636    pGuestFeat->fVmxExtIntExit            = (pBaseFeat->fVmxExtIntExit            & EmuFeat.fVmxExtIntExit           );
     
    18601723
    18611724    /*
    1862      * Finally initialize the VMX guest MSRs after merging the guest features.
     1725     * Finally initialize the VMX guest MSRs.
    18631726     */
    1864     cpumR3InitGuestVmxMsrs(pVM);
     1727    cpumR3InitVmxGuestMsrs(pVM, pHostVmxMsrs, pGuestFeat, pGuestVmxMsrs);
     1728}
     1729
     1730
     1731static int cpumR3GetHostHwvirtMsrs(PCPUMMSRS pMsrs)
     1732{
     1733    Assert(pMsrs);
     1734
     1735    uint32_t fCaps = 0;
     1736    int rc = SUPR3QueryVTCaps(&fCaps);
     1737    if (RT_SUCCESS(rc))
     1738    {
     1739        if (fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V))
     1740        {
     1741            SUPHWVIRTMSRS HwvirtMsrs;
     1742            int rc = SUPR3GetHwvirtMsrs(&HwvirtMsrs, false /* fForceRequery */);
     1743            if (RT_SUCCESS(rc))
     1744            {
     1745                if (fCaps & SUPVTCAPS_VT_X)
     1746                    pMsrs->hwvirt.vmx = HwvirtMsrs.u.vmx;
     1747                else
     1748                    pMsrs->hwvirt.svm = HwvirtMsrs.u.svm;
     1749                return VINF_SUCCESS;
     1750            }
     1751
     1752            LogRel(("CPUM: Query hardware-virtualization MSRs failed. rc=%Rrc\n", rc));
     1753            return rc;
     1754        }
     1755        else
     1756        {
     1757            LogRel(("CPUM: Querying hardware-virtualization capability succeeded but did not find VT-x or AMD-V\n"));
     1758            return VERR_INTERNAL_ERROR_5;
     1759        }
     1760    }
     1761    else
     1762        LogRel(("CPUM: No hardware-virtualization capability detected\n"));
     1763
     1764    return VINF_SUCCESS;
    18651765}
    18661766
     
    19161816    if (!ASMHasCpuId())
    19171817    {
    1918         Log(("The CPU doesn't support CPUID!\n"));
     1818        LogRel(("The CPU doesn't support CPUID!\n"));
    19191819        return VERR_UNSUPPORTED_CPU;
    19201820    }
    19211821
    19221822    pVM->cpum.s.fHostMxCsrMask = CPUMR3DeterminHostMxCsrMask();
     1823
     1824    CPUMMSRS HostMsrs;
     1825    RT_ZERO(HostMsrs);
     1826    int rc = cpumR3GetHostHwvirtMsrs(&HostMsrs);
     1827    AssertLogRelRCReturn(rc, rc);
    19231828
    19241829    PCPUMCPUIDLEAF  paLeaves;
    19251830    uint32_t        cLeaves;
    1926     int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);
     1831    rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);
    19271832    AssertLogRelRCReturn(rc, rc);
    19281833
    1929     rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &pVM->cpum.s.HostFeatures);
     1834    rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &HostMsrs, &pVM->cpum.s.HostFeatures);
    19301835    RTMemFree(paLeaves);
    19311836    AssertLogRelRCReturn(rc, rc);
     
    20421947     * Initialize the Guest CPUID and MSR states.
    20431948     */
    2044     rc = cpumR3InitCpuIdAndMsrs(pVM);
     1949    rc = cpumR3InitCpuIdAndMsrs(pVM, &HostMsrs);
    20451950    if (RT_FAILURE(rc))
    20461951        return rc;
    20471952
    20481953    /*
    2049      * Allocate memory required by the guest hardware virtualization state.
     1954     * Allocate memory required by the guest hardware-virtualization structures.
     1955     * This must be done after initializing CPUID/MSR features as we access the
     1956     * the VMX/SVM guest features below.
    20501957     */
    20511958    if (pVM->cpum.s.GuestFeatures.fVmx)
     
    20571964    if (RT_FAILURE(rc))
    20581965        return rc;
    2059 
    2060     /*
    2061      * Initialize guest hardware virtualization state.
    2062      */
    2063     CPUMHWVIRT const enmHwvirt = pVM->aCpus[0].cpum.s.Guest.hwvirt.enmHwvirt;
    2064     if (enmHwvirt == CPUMHWVIRT_VMX)
    2065     {
    2066         for (VMCPUID i = 0; i < pVM->cCpus; i++)
    2067             cpumR3InitVmxHwVirtState(&pVM->aCpus[i]);
    2068 
    2069         /* Initialize VMX features. */
    2070         cpumR3InitVmxCpuFeatures(pVM);
    2071         DBGFR3Info(pVM->pUVM, "cpumvmxfeat", "default", DBGFR3InfoLogRelHlp());
    2072     }
    2073     else if (enmHwvirt == CPUMHWVIRT_SVM)
    2074     {
    2075         for (VMCPUID i = 0; i < pVM->cCpus; i++)
    2076             cpumR3InitSvmHwVirtState(&pVM->aCpus[i]);
    2077     }
    20781966
    20791967    /*
     
    23142202    Assert(!pVM->cpum.s.GuestFeatures.fVmx || !pVM->cpum.s.GuestFeatures.fSvm);   /* Paranoia. */
    23152203    if (pVM->cpum.s.GuestFeatures.fVmx)
    2316         cpumR3InitVmxHwVirtState(pVCpu);
     2204        cpumR3ResetVmxHwVirtState(pVCpu);
    23172205    else if (pVM->cpum.s.GuestFeatures.fSvm)
    2318         cpumR3InitSvmHwVirtState(pVCpu);
     2206        cpumR3ResetSvmHwVirtState(pVCpu);
    23192207}
    23202208
     
    26702558                    }
    26712559                }
     2560                /** @todo NSTVMX: Load VMX state. */
    26722561            }
    26732562            else
     
    27732662     */
    27742663    if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2)
    2775         return cpumR3LoadCpuId(pVM, pSSM, uVersion);
     2664    {
     2665        CPUMMSRS GuestMsrs;
     2666        RT_ZERO(GuestMsrs);
     2667        if (pVM->cpum.s.GuestFeatures.fVmx)
     2668            GuestMsrs.hwvirt.vmx = pVM->aCpus[0].cpum.s.Guest.hwvirt.vmx.Msrs;
     2669        return cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs);
     2670    }
    27762671    return cpumR3LoadCpuIdPre32(pVM, pSSM, uVersion);
    27772672}
     
    40323927    RTLogRelSetBuffering(fOldBuffered);
    40333928    LogRel(("******************** End of CPUID dump **********************\n"));
    4034 }
    4035 
     3929
     3930    /*
     3931     * Log VT-x extended features.
     3932     *
     3933     * SVM features are currently all covered under CPUID so there is nothing
     3934     * to do here for SVM.
     3935     */
     3936    if (pVM->cpum.s.HostFeatures.fVmx)
     3937    {
     3938        LogRel(("*********************** VT-x features ***********************\n"));
     3939        DBGFR3Info(pVM->pUVM, "cpumvmxfeat", "default", DBGFR3InfoLogRelHlp());
     3940        LogRel(("\n"));
     3941        LogRel(("******************* End of VT-x features ********************\n"));
     3942    }
     3943}
     3944
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r74163 r76464  
    16791679
    16801680
    1681 int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures)
    1682 {
     1681static void cpumR3ExplodeVmxFeatures(PCVMXMSRS pVmxMsrs, PCPUMFEATURES pFeatures)
     1682{
     1683    Assert(pVmxMsrs);
     1684    Assert(pFeatures);
     1685    Assert(pFeatures->fVmx);
     1686
     1687    /* Basic information. */
     1688    {
     1689        uint64_t const u64Basic = pVmxMsrs->u64Basic;
     1690        pFeatures->fVmxInsOutInfo            = RT_BF_GET(u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
     1691    }
     1692
     1693    /* Pin-based VM-execution controls. */
     1694    {
     1695        uint32_t const fPinCtls = pVmxMsrs->PinCtls.n.allowed1;
     1696        pFeatures->fVmxExtIntExit            = RT_BOOL(fPinCtls & VMX_PIN_CTLS_EXT_INT_EXIT);
     1697        pFeatures->fVmxNmiExit               = RT_BOOL(fPinCtls & VMX_PIN_CTLS_NMI_EXIT);
     1698        pFeatures->fVmxVirtNmi               = RT_BOOL(fPinCtls & VMX_PIN_CTLS_VIRT_NMI);
     1699        pFeatures->fVmxPreemptTimer          = RT_BOOL(fPinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
     1700        pFeatures->fVmxPostedInt             = RT_BOOL(fPinCtls & VMX_PIN_CTLS_POSTED_INT);
     1701    }
     1702
     1703    /* Processor-based VM-execution controls. */
     1704    {
     1705        uint32_t const fProcCtls = pVmxMsrs->ProcCtls.n.allowed1;
     1706        pFeatures->fVmxIntWindowExit         = RT_BOOL(fProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT);
     1707        pFeatures->fVmxTscOffsetting         = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING);
     1708        pFeatures->fVmxHltExit               = RT_BOOL(fProcCtls & VMX_PROC_CTLS_HLT_EXIT);
     1709        pFeatures->fVmxInvlpgExit            = RT_BOOL(fProcCtls & VMX_PROC_CTLS_INVLPG_EXIT);
     1710        pFeatures->fVmxMwaitExit             = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MWAIT_EXIT);
     1711        pFeatures->fVmxRdpmcExit             = RT_BOOL(fProcCtls & VMX_PROC_CTLS_RDPMC_EXIT);
     1712        pFeatures->fVmxRdtscExit             = RT_BOOL(fProcCtls & VMX_PROC_CTLS_RDTSC_EXIT);
     1713        pFeatures->fVmxCr3LoadExit           = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT);
     1714        pFeatures->fVmxCr3StoreExit          = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT);
     1715        pFeatures->fVmxCr8LoadExit           = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT);
     1716        pFeatures->fVmxCr8StoreExit          = RT_BOOL(fProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT);
     1717        pFeatures->fVmxUseTprShadow          = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
     1718        pFeatures->fVmxNmiWindowExit         = RT_BOOL(fProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT);
     1719        pFeatures->fVmxMovDRxExit            = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
     1720        pFeatures->fVmxUncondIoExit          = RT_BOOL(fProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT);
     1721        pFeatures->fVmxUseIoBitmaps          = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS);
     1722        pFeatures->fVmxMonitorTrapFlag       = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
     1723        pFeatures->fVmxUseMsrBitmaps         = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
     1724        pFeatures->fVmxMonitorExit           = RT_BOOL(fProcCtls & VMX_PROC_CTLS_MONITOR_EXIT);
     1725        pFeatures->fVmxPauseExit             = RT_BOOL(fProcCtls & VMX_PROC_CTLS_PAUSE_EXIT);
     1726        pFeatures->fVmxSecondaryExecCtls     = RT_BOOL(fProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
     1727    }
     1728
     1729    /* Secondary processor-based VM-execution controls. */
     1730    {
     1731        uint32_t const fProcCtls2 = pFeatures->fVmxSecondaryExecCtls ? pVmxMsrs->ProcCtls2.n.allowed1 : 0;
     1732        pFeatures->fVmxVirtApicAccess        = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
     1733        pFeatures->fVmxEpt                   = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_EPT);
     1734        pFeatures->fVmxDescTableExit         = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT);
     1735        pFeatures->fVmxRdtscp                = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDTSCP);
     1736        pFeatures->fVmxVirtX2ApicMode        = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
     1737        pFeatures->fVmxVpid                  = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VPID);
     1738        pFeatures->fVmxWbinvdExit            = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_WBINVD_EXIT);
     1739        pFeatures->fVmxUnrestrictedGuest     = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
     1740        pFeatures->fVmxApicRegVirt           = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT);
     1741        pFeatures->fVmxVirtIntDelivery       = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
     1742        pFeatures->fVmxPauseLoopExit         = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
     1743        pFeatures->fVmxRdrandExit            = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT);
     1744        pFeatures->fVmxInvpcid               = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_INVPCID);
     1745        pFeatures->fVmxVmFunc                = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VMFUNC);
     1746        pFeatures->fVmxVmcsShadowing         = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING);
     1747        pFeatures->fVmxRdseedExit            = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_RDSEED_EXIT);
     1748        pFeatures->fVmxPml                   = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_PML);
     1749        pFeatures->fVmxEptXcptVe             = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_EPT_VE);
     1750        pFeatures->fVmxXsavesXrstors         = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_XSAVES_XRSTORS);
     1751        pFeatures->fVmxUseTscScaling         = RT_BOOL(fProcCtls2 & VMX_PROC_CTLS2_TSC_SCALING);
     1752    }
     1753
     1754    /* VM-exit controls. */
     1755    {
     1756        uint32_t const fExitCtls = pVmxMsrs->ExitCtls.n.allowed1;
     1757        pFeatures->fVmxExitSaveDebugCtls     = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG);
     1758        pFeatures->fVmxHostAddrSpaceSize     = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     1759        pFeatures->fVmxExitAckExtInt         = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT);
     1760        pFeatures->fVmxExitSavePatMsr        = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR);
     1761        pFeatures->fVmxExitLoadPatMsr        = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR);
     1762        pFeatures->fVmxExitSaveEferMsr       = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR);
     1763        pFeatures->fVmxExitLoadEferMsr       = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR);
     1764        pFeatures->fVmxSavePreemptTimer      = RT_BOOL(fExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
     1765    }
     1766
     1767    /* VM-entry controls. */
     1768    {
     1769        uint32_t const fEntryCtls = pVmxMsrs->EntryCtls.n.allowed1;
     1770        pFeatures->fVmxEntryLoadDebugCtls    = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG);
     1771        pFeatures->fVmxIa32eModeGuest        = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
     1772        pFeatures->fVmxEntryLoadEferMsr      = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR);
     1773        pFeatures->fVmxEntryLoadPatMsr       = RT_BOOL(fEntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR);
     1774    }
     1775
     1776    /* Miscellaneous data. */
     1777    {
     1778        uint32_t const fMiscData = pVmxMsrs->u64Misc;
     1779        pFeatures->fVmxExitSaveEferLma       = RT_BOOL(fMiscData & VMX_MISC_EXIT_SAVE_EFER_LMA);
     1780        pFeatures->fVmxIntelPt               = RT_BOOL(fMiscData & VMX_MISC_INTEL_PT);
     1781        pFeatures->fVmxVmwriteAll            = RT_BOOL(fMiscData & VMX_MISC_VMWRITE_ALL);
     1782        pFeatures->fVmxEntryInjectSoftInt    = RT_BOOL(fMiscData & VMX_MISC_ENTRY_INJECT_SOFT_INT);
     1783    }
     1784}
     1785
     1786
     1787int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, PCPUMFEATURES pFeatures)
     1788{
     1789    Assert(pMsrs);
    16831790    RT_ZERO(*pFeatures);
    16841791    if (cLeaves >= 2)
     
    17471854        pFeatures->fPcid                = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_PCID);
    17481855        pFeatures->fVmx                 = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_VMX);
    1749         /* VMX sub-features will be initialized in cpumR3InitVmxCpuFeatures(). */
     1856        if (pFeatures->fVmx)
     1857            cpumR3ExplodeVmxFeatures(&pMsrs->hwvirt.vmx, pFeatures);
    17501858
    17511859        /* Structured extended features. */
     
    21862294 * @param   paLeaves    The leaves.  These will be copied (but not freed).
    21872295 * @param   cLeaves     The number of leaves.
     2296 * @param   pMsrs       The MSRs.
    21882297 */
    2189 static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCpum, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves)
     2298static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCpum, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs)
    21902299{
    21912300    cpumR3CpuIdAssertOrder(paLeaves, cLeaves);
     
    22352344     * Explode the guest CPU features.
    22362345     */
    2237     rc = cpumR3CpuIdExplodeFeatures(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, &pCpum->GuestFeatures);
     2346    rc = cpumR3CpuIdExplodeFeatures(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, pMsrs,
     2347                                    &pCpum->GuestFeatures);
    22382348    AssertLogRelRCReturn(rc, rc);
    22392349
     
    42194329 * @returns VBox status code.
    42204330 * @param   pVM          The cross context VM structure.
     4331 * @param   pHostMsrs    Pointer to the host MSRs.
    42214332 */
    4222 int cpumR3InitCpuIdAndMsrs(PVM pVM)
    4223 {
     4333int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs)
     4334{
     4335    Assert(pHostMsrs);
     4336
    42244337    PCPUM       pCpum    = &pVM->cpum.s;
    42254338    PCFGMNODE   pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
     
    42814394                        "Please use IMachine::setCPUIDLeaf() instead.");
    42824395
     4396    CPUMMSRS GuestMsrs;
     4397    RT_ZERO(GuestMsrs);
     4398
    42834399    /*
    42844400     * Pre-explode the CPUID info.
    42854401     */
    42864402    if (RT_SUCCESS(rc))
    4287         rc = cpumR3CpuIdExplodeFeatures(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, &pCpum->GuestFeatures);
     4403    {
     4404        rc = cpumR3CpuIdExplodeFeatures(pCpum->GuestInfo.paCpuIdLeavesR3, pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs,
     4405                                        &pCpum->GuestFeatures);
     4406    }
    42884407
    42894408    /*
     
    43254444        void *pvFree = pCpum->GuestInfo.paCpuIdLeavesR3;
    43264445        int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCpum, pCpum->GuestInfo.paCpuIdLeavesR3,
    4327                                                      pCpum->GuestInfo.cCpuIdLeaves);
     4446                                                     pCpum->GuestInfo.cCpuIdLeaves, &GuestMsrs);
    43284447        RTMemFree(pvFree);
    43294448
     
    43394458        pCpum->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCpum->GuestInfo.paMsrRangesR3);
    43404459
     4460        /*
     4461         * Finally, initialize guest VMX MSRs.
     4462         *
     4463         * This needs to be done -after- exploding guest features and sanitizing CPUID leaves
     4464         * as constructing VMX capabilities MSRs rely on CPU feature bits such as long mode,
     4465         * unrestricted execution and possibly more in the future.
     4466         */
     4467        if (pVM->cpum.s.GuestFeatures.fVmx)
     4468        {
     4469            Assert(Config.fNestedHWVirt);
     4470            cpumR3InitVmxGuestFeaturesAndMsrs(pVM, &pHostMsrs->hwvirt.vmx, &GuestMsrs.hwvirt.vmx);
     4471
     4472            /* Copy MSRs to all VCPUs */
     4473            PCVMXMSRS pVmxMsrs = &GuestMsrs.hwvirt.vmx;
     4474            for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     4475            {
     4476                PVMCPU pVCpu = &pVM->aCpus[idCpu];
     4477                memcpy(&pVCpu->cpum.s.Guest.hwvirt.vmx.Msrs, pVmxMsrs, sizeof(*pVmxMsrs));
     4478            }
     4479        }
    43414480
    43424481        /*
     
    51165255 * @param   paLeaves            Guest CPUID leaves loaded from the state.
    51175256 * @param   cLeaves             The number of leaves in @a paLeaves.
     5257 * @param   pMsrs               The guest MSRs.
    51185258 */
    5119 int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves)
     5259int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs)
    51205260{
    51215261    AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
     
    58595999    pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR;
    58606000    pVM->cpum.s.GuestInfo.DefCpuId = GuestDefCpuId;
    5861     rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves);
     6001    rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves, pMsrs);
    58626002    AssertLogRelRCReturn(rc, rc);
    58636003
     
    58736013 * @param   pSSM                The saved state handle.
    58746014 * @param   uVersion            The format version.
     6015 * @param   pMsrs               The guest MSRs.
    58756016 */
    5876 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
     6017int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs)
    58776018{
    58786019    AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
     
    58886029    if (RT_SUCCESS(rc))
    58896030    {
    5890         rc = cpumR3LoadCpuIdInner(pVM, pSSM, uVersion, paLeaves, cLeaves);
     6031        rc = cpumR3LoadCpuIdInner(pVM, pSSM, uVersion, paLeaves, cLeaves, pMsrs);
    58916032        RTMemFree(paLeaves);
    58926033    }
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r76290 r76464  
    703703            if (fCaps & SUPVTCAPS_AMD_V)
    704704            {
    705                 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_PRE_INIT, 0, NULL);
    706                 AssertRCReturn(rc, rc);
    707                 Assert(pVM->hm.s.svm.fSupported);
    708 
     705                pVM->hm.s.svm.fSupported = true;
    709706                LogRel(("HM: HMR3Init: AMD-V%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
    710707                VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
     
    716713                if (RT_SUCCESS(rc))
    717714                {
    718                     rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_PRE_INIT, 0, NULL);
    719                     AssertRCReturn(rc, rc);
    720                     Assert(pVM->hm.s.vmx.fSupported);
    721 
     715                    pVM->hm.s.vmx.fSupported = true;
    722716                    LogRel(("HM: HMR3Init: VT-x%s%s%s\n",
    723717                            fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : "",
  • trunk/src/VBox/VMM/include/CPUMInternal.h

    r75611 r76464  
    530530# ifdef IN_RING3
    531531int                 cpumR3DbgInit(PVM pVM);
    532 int                 cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures);
    533 int                 cpumR3InitCpuIdAndMsrs(PVM pVM);
     532int                 cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, PCPUMFEATURES pFeatures);
     533int                 cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs);
     534void                cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCVMXMSRS pHostVmxMsrs, PVMXMSRS pGuestVmxMsrs);
    534535void                cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM);
    535 int                 cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion);
     536int                 cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pGuestMsrs);
    536537int                 cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion);
    537538DECLCALLBACK(void)  cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r76148 r76464  
    253253    alignb 8
    254254    .Guest.hwvirt.svm.HCPhysVmcb             RTHCPHYS_RES 1
    255     .Guest.hwvirt.svm.u64Padding0            resb         120
     255    .Guest.hwvirt.svm.u64Padding0            resq         19
    256256    .Guest.hwvirt.enmHwvirt                  resd         1
    257257    .Guest.hwvirt.fGif                       resb         1
     
    543543    alignb 8
    544544    .Hyper.hwvirt.svm.HCPhysVmcb             RTHCPHYS_RES 1
    545     .Hyper.hwvirt.svm.u64Padding0            resb         120
     545    .Hyper.hwvirt.svm.u64Padding0            resq         19
    546546    .Hyper.hwvirt.enmHwvirt                  resd         1
    547547    .Hyper.hwvirt.fGif                       resb         1
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette