VirtualBox

Changeset 73293 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jul 21, 2018 3:11:53 PM (6 years ago)
Author:
vboxsync
Message:

VMM, SUPDrv: Nested VMX: bugref:9180 Read VMX true control MSRs, dump them. Remove pVM->hm.cpuid as we for a long time now
have cpum.ro.HostFeatures available. Related cleanups and simplifications.

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r73266 r73293  
    363363
    364364        if (   pVM->hm.s.vmx.fSupported
    365             && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
     365            && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
    366366        {
    367367            return true;
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r73292 r73293  
    152152    int32_t                         rcInit;
    153153
    154     /** CPUID 0x80000001 ecx:edx features */
    155     struct
    156     {
    157         uint32_t                    u32AMDFeatureECX;
    158         uint32_t                    u32AMDFeatureEDX;
    159     } cpuid;
    160 
    161154    /** If set, VT-x/AMD-V is enabled globally at init time, otherwise it's
    162155     * enabled and disabled each time it's used to execute guest code. */
     
    340333 *
    341334 * @returns VBox status code (will only fail if out of memory).
    342  */
    343 static int hmR0InitIntel(uint32_t u32FeaturesECX, uint32_t u32FeaturesEDX)
     335 * @param   uFeatEcx        Standard cpuid:1 feature ECX leaf.
     336 * @param   uFeatEdx        Standard cpuid:1 feature EDX leaf.
     337 */
     338static int hmR0InitIntel(uint32_t uFeatEcx, uint32_t uFeatEdx)
    344339{
    345340    /*
     
    347342     * We also assume all VT-x-enabled CPUs support fxsave/fxrstor.
    348343     */
    349     if (    (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
    350          && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
    351          && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
    352        )
     344    if (    (uFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
     345         && (uFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
     346         && (uFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
    353347    {
    354348        /* Read this MSR now as it may be useful for error reporting when initializing VT-x fails. */
    355         g_HmR0.vmx.Msrs.u64FeatureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     349        g_HmR0.vmx.Msrs.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    356350
    357351        /*
     
    382376        {
    383377            /* Reread in case it was changed by SUPR0GetVmxUsability(). */
    384             g_HmR0.vmx.Msrs.u64FeatureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
     378            g_HmR0.vmx.Msrs.u64FeatCtrl    = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
    385379
    386380            /*
     
    391385            g_HmR0.vmx.Msrs.u64Basic        = ASMRdMsr(MSR_IA32_VMX_BASIC);
    392386            /* KVM workaround: Intel SDM section 34.15.5 describes that MSR_IA32_SMM_MONITOR_CTL
    393              * depends on bit 49 of MSR_IA32_VMX_BASIC_INFO while table 35-2 says that this MSR
    394              * is available if either VMX or SMX is supported. */
     387             * depends on bit 49 of MSR_IA32_VMX_BASIC while table 35-2 says that this MSR is
     388             * available if either VMX or SMX is supported. */
    395389            if (MSR_IA32_VMX_BASIC_DUAL_MON(g_HmR0.vmx.Msrs.u64Basic))
    396390                g_HmR0.vmx.u64HostSmmMonitorCtl = ASMRdMsr(MSR_IA32_SMM_MONITOR_CTL);
    397             g_HmR0.vmx.Msrs.VmxPinCtls.u    = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
    398             g_HmR0.vmx.Msrs.VmxProcCtls.u   = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
    399             g_HmR0.vmx.Msrs.VmxExit.u       = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
    400             g_HmR0.vmx.Msrs.VmxEntry.u      = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
     391            g_HmR0.vmx.Msrs.PinCtls.u       = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
     392            g_HmR0.vmx.Msrs.ProcCtls.u      = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
     393            g_HmR0.vmx.Msrs.ExitCtls.u      = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
     394            g_HmR0.vmx.Msrs.EntryCtls.u     = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
    401395            g_HmR0.vmx.Msrs.u64Misc         = ASMRdMsr(MSR_IA32_VMX_MISC);
    402396            g_HmR0.vmx.Msrs.u64Cr0Fixed0    = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
     
    405399            g_HmR0.vmx.Msrs.u64Cr4Fixed1    = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
    406400            g_HmR0.vmx.Msrs.u64VmcsEnum     = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
     401            if (MSR_IA32_VMX_BASIC_TRUE_CONTROLS(g_HmR0.vmx.Msrs.u64Basic))
     402            {
     403                g_HmR0.vmx.Msrs.TruePinCtls.u   = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
     404                g_HmR0.vmx.Msrs.TrueProcCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
     405                g_HmR0.vmx.Msrs.TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
     406                g_HmR0.vmx.Msrs.TrueExitCtls.u  = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
     407            }
     408
    407409            /* VPID 16 bits ASID. */
    408             g_HmR0.uMaxAsid                 = 0x10000; /* exclusive */
    409 
    410             if (g_HmR0.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     410            g_HmR0.uMaxAsid = 0x10000; /* exclusive */
     411
     412            if (g_HmR0.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    411413            {
    412                 g_HmR0.vmx.Msrs.VmxProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
    413                 if (g_HmR0.vmx.Msrs.VmxProcCtls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID))
     414                g_HmR0.vmx.Msrs.ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
     415                if (g_HmR0.vmx.Msrs.ProcCtls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID))
    414416                    g_HmR0.vmx.Msrs.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
    415417
    416                 if (g_HmR0.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC)
     418                if (g_HmR0.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC)
    417419                    g_HmR0.vmx.Msrs.u64Vmfunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
    418420            }
     
    513515                 * Timer Does Not Count Down at the Rate Specified" erratum.
    514516                 */
    515                 if (g_HmR0.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER)
     517                if (g_HmR0.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER)
    516518                {
    517519                    g_HmR0.vmx.fUsePreemptTimer   = true;
     
    537539 *
    538540 * @returns VBox status code.
    539  */
    540 static int hmR0InitAmd(uint32_t u32FeaturesEDX, uint32_t uMaxExtLeaf)
     541 * @param   uFeatEdx        Standard cpuid:1 feature EDX leaf.
     542 * @param   uExtFeatEcx     Extended cpuid:0x80000001 feature ECX leaf.
     543 * @param   uMaxExtLeaf     Extended cpuid:0x80000000 feature maximum valid leaf.
     544 */
     545static int hmR0InitAmd(uint32_t uFeatEdx, uint32_t uExtFeatEcx, uint32_t uMaxExtLeaf)
    541546{
    542547    /*
    543      * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)
    544      * We also assume all SVM-enabled CPUs support fxsave/fxrstor.
     548     * Read all SVM MSRs if SVM is available.
     549     * We also require all SVM-enabled CPUs to support rdmsr/wrmsr and fxsave/fxrstor.
    545550     */
    546551    int rc;
    547     if (   (g_HmR0.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)
    548         && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
    549         && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
     552    if (   (uExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
     553        && (uFeatEdx    & X86_CPUID_FEATURE_EDX_MSR)
     554        && (uFeatEdx    & X86_CPUID_FEATURE_EDX_FXSR)
    550555        && ASMIsValidExtRange(uMaxExtLeaf)
    551         && uMaxExtLeaf >= 0x8000000a
    552        )
     556        && uMaxExtLeaf >= 0x8000000a)
    553557    {
    554558        /* Call the global AMD-V initialization routine. */
     
    606610    else
    607611    {
    608         rc = VINF_SUCCESS;         /* Don't fail if AMD-V is not supported. See @bugref{6785}. */
     612        /* Don't fail if AMD-V is not supported. See @bugref{6785}. */
     613        rc = VINF_SUCCESS;
    609614        g_HmR0.rcInit = VERR_SVM_NO_SVM;
    610615    }
     
    669674    {
    670675        /* Standard features. */
    671         uint32_t uMaxLeaf, u32VendorEBX, u32VendorECX, u32VendorEDX;
    672         ASMCpuId(0, &uMaxLeaf, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);
     676        uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
     677        ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
    673678        if (ASMIsValidStdRange(uMaxLeaf))
    674679        {
    675             uint32_t u32FeaturesECX, u32FeaturesEDX, u32Dummy;
    676             ASMCpuId(1, &u32Dummy, &u32Dummy,   &u32FeaturesECX, &u32FeaturesEDX);
    677 
    678             /* Query AMD features. */
    679             uint32_t uMaxExtLeaf = ASMCpuId_EAX(0x80000000);
    680             if (ASMIsValidExtRange(uMaxExtLeaf))
    681                 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy,
    682                          &g_HmR0.cpuid.u32AMDFeatureECX,
    683                          &g_HmR0.cpuid.u32AMDFeatureEDX);
    684             else
    685                 g_HmR0.cpuid.u32AMDFeatureECX = g_HmR0.cpuid.u32AMDFeatureEDX = 0;
     680            uint32_t uFeatEcx, uFeatEdx, uDummy;
     681            ASMCpuId(1, &uDummy, &uDummy, &uFeatEcx, &uFeatEdx);
    686682
    687683            /* Go to CPU specific initialization code. */
    688             if (   ASMIsIntelCpuEx(u32VendorEBX, u32VendorECX, u32VendorEDX)
    689                 || ASMIsViaCentaurCpuEx(u32VendorEBX, u32VendorECX, u32VendorEDX))
     684            if (   ASMIsIntelCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
     685                || ASMIsViaCentaurCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
    690686            {
    691                 rc = hmR0InitIntel(u32FeaturesECX, u32FeaturesEDX);
     687                rc = hmR0InitIntel(uFeatEcx, uFeatEdx);
    692688                if (RT_FAILURE(rc))
    693689                    return rc;
    694690            }
    695             else if (ASMIsAmdCpuEx(u32VendorEBX, u32VendorECX, u32VendorEDX))
     691            else if (ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
    696692            {
    697                 rc = hmR0InitAmd(u32FeaturesEDX, uMaxExtLeaf);
     693                /* Query extended features for SVM capability. */
     694                uint32_t       uExtFeatEcx;
     695                uint32_t const uMaxExtLeaf = ASMCpuId_EAX(0x80000000);
     696                if (ASMIsValidExtRange(uMaxExtLeaf))
     697                    ASMCpuId(0x80000001, &uDummy, &uDummy, &uExtFeatEcx, &uDummy);
     698                else
     699                    uExtFeatEcx = 0;
     700
     701                rc = hmR0InitAmd(uFeatEdx, uExtFeatEcx, uMaxExtLeaf);
    698702                if (RT_FAILURE(rc))
    699703                    return rc;
     
    12521256     * Copy globals to the VM structure.
    12531257     */
    1254     pVM->hm.s.vmx.fSupported            = g_HmR0.vmx.fSupported;
    1255     pVM->hm.s.svm.fSupported            = g_HmR0.svm.fSupported;
    1256 
    1257     pVM->hm.s.vmx.fUsePreemptTimer     &= g_HmR0.vmx.fUsePreemptTimer;     /* Can be overridden by CFGM. See HMR3Init(). */
    1258     pVM->hm.s.vmx.cPreemptTimerShift    = g_HmR0.vmx.cPreemptTimerShift;
    1259     pVM->hm.s.vmx.u64HostCr4            = g_HmR0.vmx.u64HostCr4;
    1260     pVM->hm.s.vmx.u64HostEfer           = g_HmR0.vmx.u64HostEfer;
    1261     pVM->hm.s.vmx.u64HostSmmMonitorCtl  = g_HmR0.vmx.u64HostSmmMonitorCtl;
    1262     pVM->hm.s.vmx.Msrs                  = g_HmR0.vmx.Msrs;
    1263     pVM->hm.s.svm.u64MsrHwcr            = g_HmR0.svm.u64MsrHwcr;
    1264     pVM->hm.s.svm.u32Rev                = g_HmR0.svm.u32Rev;
    1265     pVM->hm.s.svm.u32Features           = g_HmR0.svm.u32Features;
    1266     pVM->hm.s.cpuid.u32AMDFeatureECX    = g_HmR0.cpuid.u32AMDFeatureECX;
    1267     pVM->hm.s.cpuid.u32AMDFeatureEDX    = g_HmR0.cpuid.u32AMDFeatureEDX;
    1268     pVM->hm.s.rcInit                    = g_HmR0.rcInit;
    1269     pVM->hm.s.uMaxAsid                  = g_HmR0.uMaxAsid;
    1270 
    1271     if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */
     1258    pVM->hm.s.vmx.fSupported      = g_HmR0.vmx.fSupported;
     1259    pVM->hm.s.svm.fSupported      = g_HmR0.svm.fSupported;
     1260    Assert(!(pVM->hm.s.vmx.fSupported && pVM->hm.s.svm.fSupported));
     1261    if (pVM->hm.s.vmx.fSupported)
     1262    {
     1263        pVM->hm.s.vmx.fUsePreemptTimer     &= g_HmR0.vmx.fUsePreemptTimer;     /* Can be overridden by CFGM. See HMR3Init(). */
     1264        pVM->hm.s.vmx.cPreemptTimerShift    = g_HmR0.vmx.cPreemptTimerShift;
     1265        pVM->hm.s.vmx.u64HostCr4            = g_HmR0.vmx.u64HostCr4;
     1266        pVM->hm.s.vmx.u64HostEfer           = g_HmR0.vmx.u64HostEfer;
     1267        pVM->hm.s.vmx.u64HostSmmMonitorCtl  = g_HmR0.vmx.u64HostSmmMonitorCtl;
     1268        pVM->hm.s.vmx.Msrs                  = g_HmR0.vmx.Msrs;
     1269    }
     1270    else if (pVM->hm.s.svm.fSupported)
     1271    {
     1272        pVM->hm.s.svm.u64MsrHwcr  = g_HmR0.svm.u64MsrHwcr;
     1273        pVM->hm.s.svm.u32Rev      = g_HmR0.svm.u32Rev;
     1274        pVM->hm.s.svm.u32Features = g_HmR0.svm.u32Features;
     1275    }
     1276    pVM->hm.s.rcInit              = g_HmR0.rcInit;
     1277    pVM->hm.s.uMaxAsid            = g_HmR0.uMaxAsid;
     1278
     1279    /*
     1280     * Set default maximum inner loops in ring-0 before returning to ring-3.
     1281     * Can be overriden using CFGM.
     1282     */
     1283    if (!pVM->hm.s.cMaxResumeLoops)
    12721284    {
    12731285        pVM->hm.s.cMaxResumeLoops       = 1024;
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r73287 r73293  
    46044604     * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
    46054605     */
    4606     if (    (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
     4606    if (   pVM->cpum.ro.HostFeatures.fRdTscP
    46074607        && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
    46084608    {
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r73292 r73293  
    916916        hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
    917917
    918         if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     918        if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    919919            hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    920920
     
    985985
    986986    /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
    987     if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     987    if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    988988    {
    989989        rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
     
    10081008        /* Get the allocated virtual-APIC page from the APIC device for transparent TPR accesses. */
    10091009        if (   PDMHasApic(pVM)
    1010             && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW))
     1010            && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW))
    10111011        {
    10121012            rc = APICGetApicPageForCpu(pVCpu, &pVCpu->hm.s.vmx.HCPhysVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
     
    10231023         * update HMAreMsrBitmapsAvailable().
    10241024         */
    1025         if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     1025        if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    10261026        {
    10271027            rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
     
    22922292{
    22932293    PVM pVM = pVCpu->CTX_SUFF(pVM);
    2294     uint32_t       fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0;   /* Bits set here must always be set. */
    2295     uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1;      /* Bits cleared here must always be cleared. */
     2294    uint32_t       fVal = pVM->hm.s.vmx.Msrs.PinCtls.n.disallowed0;   /* Bits set here must always be set. */
     2295    uint32_t const fZap = pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1;      /* Bits cleared here must always be cleared. */
    22962296
    22972297    fVal |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT              /* External interrupts cause a VM-exit. */
    22982298          | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT;                 /* Non-maskable interrupts (NMIs) cause a VM-exit. */
    22992299
    2300     if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
     2300    if (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
    23012301        fVal |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI;         /* Use virtual NMIs and virtual-NMI blocking features. */
    23022302
     
    23042304    if (pVM->hm.s.vmx.fUsePreemptTimer)
    23052305    {
    2306         Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
     2306        Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
    23072307        fVal |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
    23082308    }
     
    23122312    if (pVM->hm.s.fPostedIntrs)
    23132313    {
    2314         Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR);
    2315         Assert(pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT);
     2314        Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR);
     2315        Assert(pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT);
    23162316        fVal |= VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR;
    23172317    }
     
    23212321    {
    23222322        LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
    2323                     pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, fVal, fZap));
     2323                    pVM->hm.s.vmx.Msrs.PinCtls.n.disallowed0, fVal, fZap));
    23242324        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
    23252325        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    23472347{
    23482348    PVM pVM = pVCpu->CTX_SUFF(pVM);
    2349     uint32_t       fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
    2350     uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
     2349    uint32_t       fVal = pVM->hm.s.vmx.Msrs.ProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
     2350    uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
    23512351
    23522352    /* WBINVD causes a VM-exit. */
    2353     if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
     2353    if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
    23542354        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;
    23552355
     
    23622362     * it to the guest. Without this, guest executing INVPCID would cause a #UD.
    23632363     */
    2364     if (   (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
     2364    if (   (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
    23652365        && pVM->cpum.ro.GuestFeatures.fInvpcid)
    23662366        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
     
    23782378    {
    23792379        /* Enable APIC-register virtualization. */
    2380         Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT);
     2380        Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT);
    23812381        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT;
    23822382
    23832383        /* Enable virtual-interrupt delivery. */
    2384         Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY);
     2384        Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY);
    23852385        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY;
    23862386    }
     
    23902390    /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
    23912391     *        done dynamically. */
    2392     if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     2392    if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    23932393    {
    23942394        Assert(pVM->hm.s.vmx.HCPhysApicAccess);
     
    24002400
    24012401    /* Enable RDTSCP. */
    2402     if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     2402    if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    24032403        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;
    24042404
    24052405    /* Enable Pause-Loop exiting. */
    2406     if (   pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT
     2406    if (   pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT
    24072407        && pVM->hm.s.vmx.cPleGapTicks
    24082408        && pVM->hm.s.vmx.cPleWindowTicks)
     
    24182418    {
    24192419        LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
    2420                     pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, fVal, fZap));
     2420                    pVM->hm.s.vmx.Msrs.ProcCtls2.n.disallowed0, fVal, fZap));
    24212421        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
    24222422        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    24442444{
    24452445    PVM pVM = pVCpu->CTX_SUFF(pVM);
    2446     uint32_t       fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;  /* Bits set here must be set in the VMCS. */
    2447     uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
     2446    uint32_t       fVal = pVM->hm.s.vmx.Msrs.ProcCtls.n.disallowed0;  /* Bits set here must be set in the VMCS. */
     2447    uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
    24482448
    24492449    fVal |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT                      /* HLT causes a VM-exit. */
     
    24562456
    24572457    /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
    2458     if (   !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
    2459         ||  (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
     2458    if (   !(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
     2459        ||  (pVM->hm.s.vmx.Msrs.ProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
    24602460    {
    24612461        LogRelFunc(("Unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
     
    24752475    /* Use TPR shadowing if supported by the CPU. */
    24762476    if (   PDMHasApic(pVM)
    2477         && pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
     2477        && pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    24782478    {
    24792479        Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
     
    25022502
    25032503    /* Use MSR-bitmaps if supported by the CPU. */
    2504     if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     2504    if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    25052505    {
    25062506        fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
     
    25432543
    25442544    /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
    2545     if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     2545    if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    25462546        fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
    25472547
     
    25492549    {
    25502550        LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
    2551                     pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, fVal, fZap));
     2551                    pVM->hm.s.vmx.Msrs.ProcCtls.n.disallowed0, fVal, fZap));
    25522552        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
    25532553        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    27622762    Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
    27632763#if HC_ARCH_BITS == 64
    2764     if (   (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
    2765         && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1  & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
    2766         && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1  & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
     2764    if (   (pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
     2765        && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1  & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
     2766        && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1  & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
    27672767    {
    27682768        pVM->hm.s.vmx.fSupportsVmcsEfer = true;
     
    32033203        && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
    32043204    {
    3205         /* Assert that host is PAE capable. */
    3206         Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
     3205        /* Assert that host is NX capable. */
     3206        Assert(pVCpu->CTX_SUFF(pVM)->cpum.ro.HostFeatures.fNoExecute);
    32073207        return true;
    32083208    }
     
    32293229    {
    32303230        PVM pVM = pVCpu->CTX_SUFF(pVM);
    3231         uint32_t       fVal = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
    3232         uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
     3231        uint32_t       fVal = pVM->hm.s.vmx.Msrs.EntryCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
     3232        uint32_t const fZap = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
    32333233
    32343234        /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
     
    32643264        {
    32653265            Log4Func(("Invalid VM-entry controls combo! Cpu=%RX64 fVal=%RX64 fZap=%RX64\n",
    3266                       pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, fVal, fZap));
     3266                      pVM->hm.s.vmx.Msrs.EntryCtls.n.disallowed0, fVal, fZap));
    32673267            pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
    32683268            return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    32963296    {
    32973297        PVM pVM = pVCpu->CTX_SUFF(pVM);
    3298         uint32_t       fVal = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0;  /* Bits set here must be set in the VMCS. */
    3299         uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
     3298        uint32_t       fVal = pVM->hm.s.vmx.Msrs.ExitCtls.n.disallowed0;  /* Bits set here must be set in the VMCS. */
     3299        uint32_t const fZap = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
    33003300
    33013301        /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
     
    33423342        /* Enable saving of the VMX preemption timer value on VM-exit. */
    33433343        if (    pVM->hm.s.vmx.fUsePreemptTimer
    3344             && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))
     3344            && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))
    33453345            fVal |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
    33463346
     
    33483348        {
    33493349            LogRelFunc(("Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n",
    3350                         pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, fVal, fZap));
     3350                        pVM->hm.s.vmx.Msrs.ExitCtls.n.disallowed0, fVal, fZap));
    33513351            pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
    33523352            return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    40794079        /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
    40804080        PVM pVM = pVCpu->CTX_SUFF(pVM);
    4081         if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
     4081        if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
    40824082        {
    40834083            uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
     
    47904790
    47914791                /* We need to intercept reads too, see @bugref{7386#c16}. */
    4792                 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     4792                if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    47934793                    hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
    47944794                Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pCtx->msrEFER,
     
    70997099DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
    71007100{
    7101     if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
     7101    if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
    71027102    {
    71037103        if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
     
    71357135DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
    71367136{
    7137     if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
     7137    if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
    71387138    {
    71397139        if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
     
    73707370             * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
    73717371             */
    7372             Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
     7372            Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
    73737373            fIntrState = 0;
    73747374        }
     
    91709170     * Sanitize the control stuff.
    91719171     */
    9172     pDbgState->fCpe2Extra       &= pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;
     9172    pDbgState->fCpe2Extra       &= pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1;
    91739173    if (pDbgState->fCpe2Extra)
    91749174        pDbgState->fCpe1Extra   |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
    9175     pDbgState->fCpe1Extra       &= pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;
    9176     pDbgState->fCpe1Unwanted    &= ~pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;
     9175    pDbgState->fCpe1Extra       &= pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1;
     9176    pDbgState->fCpe1Unwanted    &= ~pVM->hm.s.vmx.Msrs.ProcCtls.n.disallowed0;
    91779177    if (pVCpu->hm.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    91789178    {
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r73292 r73293  
    12301230    {
    12311231        LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.rcInit));
    1232         LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.Msrs.u64FeatureCtrl));
     1232        LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.Msrs.u64FeatCtrl));
    12331233        switch (pVM->hm.s.rcInit)
    12341234        {
     
    13531353
    13541354/**
    1355  * Finish VT-x initialization (after ring-0 init).
    1356  *
    1357  * @returns VBox status code.
    1358  * @param   pVM                The cross context VM structure.
    1359  */
    1360 static int hmR3InitFinalizeR0Intel(PVM pVM)
    1361 {
    1362     int rc;
    1363 
    1364     Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
    1365     AssertLogRelReturn(pVM->hm.s.vmx.Msrs.u64FeatureCtrl != 0, VERR_HM_IPE_4);
    1366 
    1367     uint64_t val;
    1368     uint64_t zap;
    1369 
    1370     LogRel(("HM: Using VT-x implementation 2.0\n"));
    1371     LogRel(("HM: Host CR4                        = %#RX64\n", pVM->hm.s.vmx.u64HostCr4));
    1372     LogRel(("HM: Host EFER                       = %#RX64\n", pVM->hm.s.vmx.u64HostEfer));
    1373     LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL        = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl));
    1374 
    1375     val = pVM->hm.s.vmx.Msrs.u64FeatureCtrl;
    1376     LogRel(("HM: MSR_IA32_FEATURE_CONTROL        = %#RX64\n", val));
     1355 * Reports MSR_IA32_FEATURE_CONTROL MSR to the log.
     1356 *
     1357 * @param   fFeatMsr    The feature control MSR value.
     1358 */
     1359static void hmR3VmxReportFeatCtlMsr(uint64_t fFeatMsr)
     1360{
     1361    uint64_t const val = fFeatMsr;
     1362    LogRel(("HM: MSR_IA32_FEATURE_CONTROL          = %#RX64\n", val));
    13771363    HMVMX_REPORT_MSR_CAP(val, "LOCK",             MSR_IA32_FEATURE_CONTROL_LOCK);
    13781364    HMVMX_REPORT_MSR_CAP(val, "SMX_VMXON",        MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
     
    13891375    HMVMX_REPORT_MSR_CAP(val, "SGX_GLOBAL_EN",    MSR_IA32_FEATURE_CONTROL_SGX_GLOBAL_EN);
    13901376    HMVMX_REPORT_MSR_CAP(val, "LMCE",             MSR_IA32_FEATURE_CONTROL_LMCE);
    1391     if (!(pVM->hm.s.vmx.Msrs.u64FeatureCtrl & MSR_IA32_FEATURE_CONTROL_LOCK))
     1377    if (!(val & MSR_IA32_FEATURE_CONTROL_LOCK))
    13921378        LogRel(("HM:   MSR_IA32_FEATURE_CONTROL lock bit not set, possibly bad hardware!\n"));
    1393 
    1394     LogRel(("HM: MSR_IA32_VMX_BASIC              = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Basic));
    1395     LogRel(("HM:   VMCS id                         = %#x\n",      MSR_IA32_VMX_BASIC_VMCS_ID(pVM->hm.s.vmx.Msrs.u64Basic)));
    1396     LogRel(("HM:   VMCS size                       = %u bytes\n", MSR_IA32_VMX_BASIC_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64Basic)));
    1397     LogRel(("HM:   VMCS physical address limit     = %s\n",       MSR_IA32_VMX_BASIC_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.Msrs.u64Basic) ? "< 4 GB" : "None"));
    1398     LogRel(("HM:   VMCS memory type                = %s\n",       hmR3VmxGetMemTypeDesc(pVM->hm.s.vmx.Msrs.u64Basic)));
    1399     LogRel(("HM:   Dual-monitor treatment support  = %RTbool\n",  MSR_IA32_VMX_BASIC_DUAL_MON(pVM->hm.s.vmx.Msrs.u64Basic)));
    1400     LogRel(("HM:   OUTS & INS instruction-info     = %RTbool\n",  MSR_IA32_VMX_BASIC_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64Basic)));
    1401     LogRel(("HM:   Supports true capability MSRs   = %RTbool\n",  MSR_IA32_VMX_BASIC_TRUE_CONTROLS(pVM->hm.s.vmx.Msrs.u64Basic)));
    1402     LogRel(("HM: Max resume loops                = %u\n", pVM->hm.s.cMaxResumeLoops));
    1403 
    1404     LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS      = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxPinCtls.u));
    1405     val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1;
    1406     zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0;
     1379}
     1380
     1381
     1382/**
     1383 * Reports MSR_IA32_VMX_BASIC MSR to the log.
     1384 *
     1385 * @param   uBasicMsr    The VMX basic MSR value.
     1386 */
     1387static void hmR3VmxReportBasicMsr(uint64_t uBasicMsr)
     1388{
     1389    LogRel(("HM: MSR_IA32_VMX_BASIC                = %#RX64\n", uBasicMsr));
     1390    LogRel(("HM:   VMCS id                           = %#x\n",      MSR_IA32_VMX_BASIC_VMCS_ID(uBasicMsr)));
     1391    LogRel(("HM:   VMCS size                         = %u bytes\n", MSR_IA32_VMX_BASIC_VMCS_SIZE(uBasicMsr)));
     1392    LogRel(("HM:   VMCS physical address limit       = %s\n",       MSR_IA32_VMX_BASIC_VMCS_PHYS_WIDTH(uBasicMsr) ? "< 4 GB"
     1393                                                                                                                : "None"));
     1394    LogRel(("HM:   VMCS memory type                  = %s\n",       hmR3VmxGetMemTypeDesc(uBasicMsr)));
     1395    LogRel(("HM:   Dual-monitor treatment support    = %RTbool\n",  MSR_IA32_VMX_BASIC_DUAL_MON(uBasicMsr)));
     1396    LogRel(("HM:   OUTS & INS instruction-info       = %RTbool\n",  MSR_IA32_VMX_BASIC_VMCS_INS_OUTS(uBasicMsr)));
     1397    LogRel(("HM:   Supports true capability MSRs     = %RTbool\n",  MSR_IA32_VMX_BASIC_TRUE_CONTROLS(uBasicMsr)));
     1398}
     1399
     1400
     1401/**
     1402 * Reports MSR_IA32_PINBASED_CTLS to the log.
     1403 *
     1404 * @param   pVmxMsr    Pointer to the VMX MSR.
     1405 */
     1406static void hmR3VmxReportPinBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
     1407{
     1408    uint64_t const val = pVmxMsr->n.allowed1;
     1409    uint64_t const zap = pVmxMsr->n.disallowed0;
     1410    LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS        = %#RX64\n", pVmxMsr->u));
    14071411    HMVMX_REPORT_FEAT(val, zap, "EXT_INT_EXIT",  VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT);
    14081412    HMVMX_REPORT_FEAT(val, zap, "NMI_EXIT",      VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT);
     
    14101414    HMVMX_REPORT_FEAT(val, zap, "PREEMPT_TIMER", VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
    14111415    HMVMX_REPORT_FEAT(val, zap, "POSTED_INTR",   VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR);
    1412 
    1413     LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS     = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls.u));
    1414     val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;
    1415     zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;
     1416}
     1417
     1418
     1419/**
     1420 * Reports MSR_IA32_VMX_PROCBASED_CTLS MSR to the log.
     1421 *
     1422 * @param   pVmxMsr    Pointer to the VMX MSR.
     1423 */
     1424static void hmR3VmxReportProcBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
     1425{
     1426    uint64_t const val = pVmxMsr->n.allowed1;
     1427    uint64_t const zap = pVmxMsr->n.disallowed0;
     1428    LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS       = %#RX64\n", pVmxMsr->u));
    14161429    HMVMX_REPORT_FEAT(val, zap, "INT_WINDOW_EXIT",         VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
    14171430    HMVMX_REPORT_FEAT(val, zap, "USE_TSC_OFFSETTING",      VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING);
     
    14351448    HMVMX_REPORT_FEAT(val, zap, "PAUSE_EXIT",              VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT);
    14361449    HMVMX_REPORT_FEAT(val, zap, "USE_SECONDARY_EXEC_CTRL", VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL);
    1437     if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    1438     {
    1439         LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2    = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.u));
    1440         val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;
    1441         zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0;
    1442         HMVMX_REPORT_FEAT(val, zap, "VIRT_APIC",             VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC);
    1443         HMVMX_REPORT_FEAT(val, zap, "EPT",                   VMX_VMCS_CTRL_PROC_EXEC2_EPT);
    1444         HMVMX_REPORT_FEAT(val, zap, "DESCRIPTOR_TABLE_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT);
    1445         HMVMX_REPORT_FEAT(val, zap, "RDTSCP",                VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP);
    1446         HMVMX_REPORT_FEAT(val, zap, "VIRT_X2APIC",           VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC);
    1447         HMVMX_REPORT_FEAT(val, zap, "VPID",                  VMX_VMCS_CTRL_PROC_EXEC2_VPID);
    1448         HMVMX_REPORT_FEAT(val, zap, "WBINVD_EXIT",           VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT);
    1449         HMVMX_REPORT_FEAT(val, zap, "UNRESTRICTED_GUEST",    VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST);
    1450         HMVMX_REPORT_FEAT(val, zap, "APIC_REG_VIRT",         VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT);
    1451         HMVMX_REPORT_FEAT(val, zap, "VIRT_INTR_DELIVERY",    VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY);
    1452         HMVMX_REPORT_FEAT(val, zap, "PAUSE_LOOP_EXIT",       VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT);
    1453         HMVMX_REPORT_FEAT(val, zap, "RDRAND_EXIT",           VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT);
    1454         HMVMX_REPORT_FEAT(val, zap, "INVPCID",               VMX_VMCS_CTRL_PROC_EXEC2_INVPCID);
    1455         HMVMX_REPORT_FEAT(val, zap, "VMFUNC",                VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC);
    1456         HMVMX_REPORT_FEAT(val, zap, "VMCS_SHADOWING",        VMX_VMCS_CTRL_PROC_EXEC2_VMCS_SHADOWING);
    1457         HMVMX_REPORT_FEAT(val, zap, "ENCLS_EXIT",            VMX_VMCS_CTRL_PROC_EXEC2_ENCLS_EXIT);
    1458         HMVMX_REPORT_FEAT(val, zap, "RDSEED_EXIT",           VMX_VMCS_CTRL_PROC_EXEC2_RDSEED_EXIT);
    1459         HMVMX_REPORT_FEAT(val, zap, "PML",                   VMX_VMCS_CTRL_PROC_EXEC2_PML);
    1460         HMVMX_REPORT_FEAT(val, zap, "EPT_VE",                VMX_VMCS_CTRL_PROC_EXEC2_EPT_VE);
    1461         HMVMX_REPORT_FEAT(val, zap, "CONCEAL_FROM_PT",       VMX_VMCS_CTRL_PROC_EXEC2_CONCEAL_FROM_PT);
    1462         HMVMX_REPORT_FEAT(val, zap, "XSAVES_XRSTORS",        VMX_VMCS_CTRL_PROC_EXEC2_XSAVES_XRSTORS);
    1463         HMVMX_REPORT_FEAT(val, zap, "TSC_SCALING",           VMX_VMCS_CTRL_PROC_EXEC2_TSC_SCALING);
    1464     }
    1465 
    1466     LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS         = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxEntry.u));
    1467     val = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1;
    1468     zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0;
     1450}
     1451
     1452
     1453/**
     1454 * Reports MSR_IA32_VMX_PROCBASED_CTLS2 MSR to the log.
     1455 *
     1456 * @param   pVmxMsr    Pointer to the VMX MSR.
     1457 */
     1458static void hmR3VmxReportProcBasedCtls2Msr(PCVMXCTLSMSR pVmxMsr)
     1459{
     1460    uint64_t const val = pVmxMsr->n.allowed1;
     1461    uint64_t const zap = pVmxMsr->n.disallowed0;
     1462    LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2      = %#RX64\n", pVmxMsr->u));
     1463    HMVMX_REPORT_FEAT(val, zap, "VIRT_APIC",             VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC);
     1464    HMVMX_REPORT_FEAT(val, zap, "EPT",                   VMX_VMCS_CTRL_PROC_EXEC2_EPT);
     1465    HMVMX_REPORT_FEAT(val, zap, "DESCRIPTOR_TABLE_EXIT", VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT);
     1466    HMVMX_REPORT_FEAT(val, zap, "RDTSCP",                VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP);
     1467    HMVMX_REPORT_FEAT(val, zap, "VIRT_X2APIC",           VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC);
     1468    HMVMX_REPORT_FEAT(val, zap, "VPID",                  VMX_VMCS_CTRL_PROC_EXEC2_VPID);
     1469    HMVMX_REPORT_FEAT(val, zap, "WBINVD_EXIT",           VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT);
     1470    HMVMX_REPORT_FEAT(val, zap, "UNRESTRICTED_GUEST",    VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST);
     1471    HMVMX_REPORT_FEAT(val, zap, "APIC_REG_VIRT",         VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT);
     1472    HMVMX_REPORT_FEAT(val, zap, "VIRT_INTR_DELIVERY",    VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY);
     1473    HMVMX_REPORT_FEAT(val, zap, "PAUSE_LOOP_EXIT",       VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT);
     1474    HMVMX_REPORT_FEAT(val, zap, "RDRAND_EXIT",           VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT);
     1475    HMVMX_REPORT_FEAT(val, zap, "INVPCID",               VMX_VMCS_CTRL_PROC_EXEC2_INVPCID);
     1476    HMVMX_REPORT_FEAT(val, zap, "VMFUNC",                VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC);
     1477    HMVMX_REPORT_FEAT(val, zap, "VMCS_SHADOWING",        VMX_VMCS_CTRL_PROC_EXEC2_VMCS_SHADOWING);
     1478    HMVMX_REPORT_FEAT(val, zap, "ENCLS_EXIT",            VMX_VMCS_CTRL_PROC_EXEC2_ENCLS_EXIT);
     1479    HMVMX_REPORT_FEAT(val, zap, "RDSEED_EXIT",           VMX_VMCS_CTRL_PROC_EXEC2_RDSEED_EXIT);
     1480    HMVMX_REPORT_FEAT(val, zap, "PML",                   VMX_VMCS_CTRL_PROC_EXEC2_PML);
     1481    HMVMX_REPORT_FEAT(val, zap, "EPT_VE",                VMX_VMCS_CTRL_PROC_EXEC2_EPT_VE);
     1482    HMVMX_REPORT_FEAT(val, zap, "CONCEAL_FROM_PT",       VMX_VMCS_CTRL_PROC_EXEC2_CONCEAL_FROM_PT);
     1483    HMVMX_REPORT_FEAT(val, zap, "XSAVES_XRSTORS",        VMX_VMCS_CTRL_PROC_EXEC2_XSAVES_XRSTORS);
     1484    HMVMX_REPORT_FEAT(val, zap, "TSC_SCALING",           VMX_VMCS_CTRL_PROC_EXEC2_TSC_SCALING);
     1485}
     1486
     1487
     1488/**
     1489 * Reports MSR_IA32_VMX_ENTRY_CTLS to the log.
     1490 *
     1491 * @param   pVmxMsr    Pointer to the VMX MSR.
     1492 */
     1493static void hmR3VmxReportEntryCtlsMsr(PCVMXCTLSMSR pVmxMsr)
     1494{
     1495    uint64_t const val = pVmxMsr->n.allowed1;
     1496    uint64_t const zap = pVmxMsr->n.disallowed0;
     1497    LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS           = %#RX64\n", pVmxMsr->u));
    14691498    HMVMX_REPORT_FEAT(val, zap, "LOAD_DEBUG",          VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG);
    14701499    HMVMX_REPORT_FEAT(val, zap, "IA32E_MODE_GUEST",    VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
     
    14741503    HMVMX_REPORT_FEAT(val, zap, "LOAD_GUEST_PAT_MSR",  VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR);
    14751504    HMVMX_REPORT_FEAT(val, zap, "LOAD_GUEST_EFER_MSR", VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR);
    1476 
    1477     LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS          = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxExit.u));
    1478     val = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1;
    1479     zap = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0;
     1505}
     1506
     1507
     1508/**
     1509 * Reports MSR_IA32_VMX_EXIT_CTLS to the log.
     1510 *
     1511 * @param   pVmxMsr    Pointer to the VMX MSR.
     1512 */
     1513static void hmR3VmxReportExitCtlsMsr(PCVMXCTLSMSR pVmxMsr)
     1514{
     1515    uint64_t const val = pVmxMsr->n.allowed1;
     1516    uint64_t const zap = pVmxMsr->n.disallowed0;
     1517    LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS            = %#RX64\n", pVmxMsr->u));
    14801518    HMVMX_REPORT_FEAT(val, zap, "SAVE_DEBUG",             VMX_VMCS_CTRL_EXIT_SAVE_DEBUG);
    14811519    HMVMX_REPORT_FEAT(val, zap, "HOST_ADDR_SPACE_SIZE",   VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE);
     
    14871525    HMVMX_REPORT_FEAT(val, zap, "LOAD_HOST_EFER_MSR",     VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR);
    14881526    HMVMX_REPORT_FEAT(val, zap, "SAVE_VMX_PREEMPT_TIMER", VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER);
    1489 
     1527}
     1528
     1529
     1530/**
     1531 * Reports MSR_IA32_VMX_EPT_VPID_CAP MSR to the log.
     1532 *
     1533 * @param   fCaps    The VMX EPT/VPID capability MSR value.
     1534 */
     1535static void hmR3VmxReportEptVpidCapsMsr(uint64_t fCaps)
     1536{
     1537    LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP         = %#RX64\n", fCaps));
     1538    HMVMX_REPORT_MSR_CAP(fCaps, "RWX_X_ONLY",                            MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
     1539    HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_4",                    MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4);
     1540    HMVMX_REPORT_MSR_CAP(fCaps, "EMT_UC",                                MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC);
     1541    HMVMX_REPORT_MSR_CAP(fCaps, "EMT_WB",                                MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB);
     1542    HMVMX_REPORT_MSR_CAP(fCaps, "PDE_2M",                                MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M);
     1543    HMVMX_REPORT_MSR_CAP(fCaps, "PDPTE_1G",                              MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G);
     1544    HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT",                                MSR_IA32_VMX_EPT_VPID_CAP_INVEPT);
     1545    HMVMX_REPORT_MSR_CAP(fCaps, "EPT_ACCESS_DIRTY",                      MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY);
     1546    HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_SINGLE_CONTEXT",                 MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT);
     1547    HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_ALL_CONTEXTS",                   MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
     1548    HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID",                               MSR_IA32_VMX_EPT_VPID_CAP_INVVPID);
     1549    HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_INDIV_ADDR",                    MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
     1550    HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT",                MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT);
     1551    HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_ALL_CONTEXTS",                  MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS);
     1552    HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS);
     1553}
     1554
     1555
     1556/**
     1557 * Reports MSR_IA32_VMX_MISC MSR to the log.
     1558 *
     1559 * @param   fMisc    The VMX misc. MSR value.
     1560 */
     1561static void hmR3VmxReportMiscMsr(PVM pVM, uint64_t fMisc)
     1562{
     1563    LogRel(("HM: MSR_IA32_VMX_MISC                 = %#RX64\n", fMisc));
     1564    if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(fMisc) == pVM->hm.s.vmx.cPreemptTimerShift)
     1565        LogRel(("HM:   PREEMPT_TSC_BIT                   = %#x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(fMisc)));
     1566    else
     1567        LogRel(("HM:   PREEMPT_TSC_BIT                   = %#x - erratum detected, using %#x instead\n",
     1568                MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(fMisc), pVM->hm.s.vmx.cPreemptTimerShift));
     1569    LogRel(("HM:   STORE_EFERLMA_VMEXIT              = %RTbool\n", MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(fMisc)));
     1570    uint8_t const fActivityState = MSR_IA32_VMX_MISC_ACTIVITY_STATES(fMisc);
     1571    LogRel(("HM:   ACTIVITY_STATES                   = %#x\n",     fActivityState));
     1572    HMVMX_REPORT_MSR_CAP(fActivityState, "  HLT",       VMX_VMCS_GUEST_ACTIVITY_HLT);
     1573    HMVMX_REPORT_MSR_CAP(fActivityState, "  SHUTDOWN",  VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN);
     1574    HMVMX_REPORT_MSR_CAP(fActivityState, "  SIPI_WAIT", VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT);
     1575    LogRel(("HM:   CR3_TARGET                        = %#x\n",     MSR_IA32_VMX_MISC_CR3_TARGET(fMisc)));
     1576    LogRel(("HM:   MAX_MSR                           = %u\n",      MSR_IA32_VMX_MISC_MAX_MSR(fMisc)));
     1577    LogRel(("HM:   RDMSR_SMBASE_MSR_SMM              = %RTbool\n", MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(fMisc)));
     1578    LogRel(("HM:   SMM_MONITOR_CTL_B2                = %RTbool\n", MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(fMisc)));
     1579    LogRel(("HM:   VMWRITE_VMEXIT_INFO               = %RTbool\n", MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(fMisc)));
     1580    LogRel(("HM:   MSEG_ID                           = %#x\n",     MSR_IA32_VMX_MISC_MSEG_ID(fMisc)));
     1581}
     1582
     1583
     1584/**
     1585 * Reports MSR_IA32_VMX_VMCS_ENUM MSR to the log.
     1586 *
     1587 * @param   uVmcsEnum    The VMX VMCS enum MSR value.
     1588 */
     1589static void hmR3VmxReportVmcsEnumMsr(uint64_t uVmcsEnum)
     1590{
     1591    uint64_t const val = uVmcsEnum;
     1592    LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM            = %#RX64\n", val));
     1593    LogRel(("HM:   HIGHEST_INDEX                     = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(val)));
     1594}
     1595
     1596
     1597/**
     1598 * Reports MSR_IA32_VMX_VMFUNC MSR to the log.
     1599 *
     1600 * @param   uVmFunc    The VMX VMFUNC MSR value.
     1601 */
     1602static void hmR3VmxReportVmfuncMsr(uint64_t uVmFunc)
     1603{
     1604    LogRel(("HM: MSR_IA32_VMX_VMFUNC               = %#RX64\n", uVmFunc));
     1605    HMVMX_REPORT_ALLOWED_FEAT(uVmFunc, "EPTP_SWITCHING", VMX_VMCS_CTRL_VMFUNC_EPTP_SWITCHING);
     1606}
     1607
     1608
     1609/**
     1610 * Reports VMX CR0, CR4 fixed MSRs.
     1611 *
     1612 * @param   pMsrs    Pointer to the VMX MSRs.
     1613 */
     1614static void hmR3VmxReportCrFixedMsrs(PVMXMSRS pMsrs)
     1615{
     1616    LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0           = %#RX64\n", pMsrs->u64Cr0Fixed0));
     1617    LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1           = %#RX64\n", pMsrs->u64Cr0Fixed1));
     1618    LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0           = %#RX64\n", pMsrs->u64Cr4Fixed0));
     1619    LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1           = %#RX64\n", pMsrs->u64Cr4Fixed1));
     1620}
     1621
     1622
     1623/**
     1624 * Finish VT-x initialization (after ring-0 init).
     1625 *
     1626 * @returns VBox status code.
     1627 * @param   pVM                The cross context VM structure.
     1628 */
     1629static int hmR3InitFinalizeR0Intel(PVM pVM)
     1630{
     1631    int rc;
     1632
     1633    Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
     1634    AssertLogRelReturn(pVM->hm.s.vmx.Msrs.u64FeatCtrl != 0, VERR_HM_IPE_4);
     1635
     1636    LogRel(("HM: Using VT-x implementation 2.0\n"));
     1637    LogRel(("HM: Max resume loops                  = %u\n", pVM->hm.s.cMaxResumeLoops));
     1638    LogRel(("HM: Host CR4                          = %#RX64\n", pVM->hm.s.vmx.u64HostCr4));
     1639    LogRel(("HM: Host EFER                         = %#RX64\n", pVM->hm.s.vmx.u64HostEfer));
     1640    LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL          = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl));
     1641
     1642    hmR3VmxReportFeatCtlMsr(pVM->hm.s.vmx.Msrs.u64FeatCtrl);
     1643    hmR3VmxReportBasicMsr(pVM->hm.s.vmx.Msrs.u64Basic);
     1644
     1645    hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.vmx.Msrs.PinCtls);
     1646    hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.vmx.Msrs.ProcCtls);
     1647    if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     1648        hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.vmx.Msrs.ProcCtls2);
     1649
     1650    hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.vmx.Msrs.EntryCtls);
     1651    hmR3VmxReportExitCtlsMsr(&pVM->hm.s.vmx.Msrs.ExitCtls);
     1652
     1653    if (MSR_IA32_VMX_BASIC_TRUE_CONTROLS(pVM->hm.s.vmx.Msrs.u64Basic))
     1654    {
     1655        /* We don't do extensive dumping of the true capability MSRs as we don't use them yet. */
     1656        /** @todo Consider using true capability MSRs and dumping them extensively. */
     1657        LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS   = %#RX64\n", pVM->hm.s.vmx.Msrs.TruePinCtls));
     1658        LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS  = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueProcCtls));
     1659        LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS      = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueEntryCtls));
     1660        LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS       = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueExitCtls));
     1661    }
     1662
     1663    hmR3VmxReportMiscMsr(pVM, pVM->hm.s.vmx.Msrs.u64Misc);
     1664    hmR3VmxReportVmcsEnumMsr(pVM->hm.s.vmx.Msrs.u64VmcsEnum);
    14901665    if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps)
    1491     {
    1492         val = pVM->hm.s.vmx.Msrs.u64EptVpidCaps;
    1493         LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP       = %#RX64\n", val));
    1494         HMVMX_REPORT_MSR_CAP(val, "RWX_X_ONLY",                            MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
    1495         HMVMX_REPORT_MSR_CAP(val, "PAGE_WALK_LENGTH_4",                    MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4);
    1496         HMVMX_REPORT_MSR_CAP(val, "EMT_UC",                                MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC);
    1497         HMVMX_REPORT_MSR_CAP(val, "EMT_WB",                                MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB);
    1498         HMVMX_REPORT_MSR_CAP(val, "PDE_2M",                                MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M);
    1499         HMVMX_REPORT_MSR_CAP(val, "PDPTE_1G",                              MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G);
    1500         HMVMX_REPORT_MSR_CAP(val, "INVEPT",                                MSR_IA32_VMX_EPT_VPID_CAP_INVEPT);
    1501         HMVMX_REPORT_MSR_CAP(val, "EPT_ACCESS_DIRTY",                      MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY);
    1502         HMVMX_REPORT_MSR_CAP(val, "INVEPT_SINGLE_CONTEXT",                 MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT);
    1503         HMVMX_REPORT_MSR_CAP(val, "INVEPT_ALL_CONTEXTS",                   MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
    1504         HMVMX_REPORT_MSR_CAP(val, "INVVPID",                               MSR_IA32_VMX_EPT_VPID_CAP_INVVPID);
    1505         HMVMX_REPORT_MSR_CAP(val, "INVVPID_INDIV_ADDR",                    MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
    1506         HMVMX_REPORT_MSR_CAP(val, "INVVPID_SINGLE_CONTEXT",                MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT);
    1507         HMVMX_REPORT_MSR_CAP(val, "INVVPID_ALL_CONTEXTS",                  MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS);
    1508         HMVMX_REPORT_MSR_CAP(val, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS);
    1509     }
    1510 
    1511     val = pVM->hm.s.vmx.Msrs.u64Misc;
    1512     LogRel(("HM: MSR_IA32_VMX_MISC               = %#RX64\n", val));
    1513     if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val) == pVM->hm.s.vmx.cPreemptTimerShift)
    1514         LogRel(("HM:   PREEMPT_TSC_BIT                 = %#x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val)));
    1515     else
    1516     {
    1517         LogRel(("HM:   PREEMPT_TSC_BIT                 = %#x - erratum detected, using %#x instead\n",
    1518                 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val), pVM->hm.s.vmx.cPreemptTimerShift));
    1519     }
    1520 
    1521     LogRel(("HM:   STORE_EFERLMA_VMEXIT            = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(val))));
    1522     LogRel(("HM:   ACTIVITY_STATES                 = %#x\n",     MSR_IA32_VMX_MISC_ACTIVITY_STATES(val)));
    1523     LogRel(("HM:   CR3_TARGET                      = %#x\n",     MSR_IA32_VMX_MISC_CR3_TARGET(val)));
    1524     LogRel(("HM:   MAX_MSR                         = %u\n",      MSR_IA32_VMX_MISC_MAX_MSR(val)));
    1525     LogRel(("HM:   RDMSR_SMBASE_MSR_SMM            = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(val))));
    1526     LogRel(("HM:   SMM_MONITOR_CTL_B2              = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(val))));
    1527     LogRel(("HM:   VMWRITE_VMEXIT_INFO             = %RTbool\n", RT_BOOL(MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(val))));
    1528     LogRel(("HM:   MSEG_ID                         = %#x\n",     MSR_IA32_VMX_MISC_MSEG_ID(val)));
    1529 
    1530     /* Paranoia */
    1531     AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc) >= 512);
    1532 
    1533     LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0         = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr0Fixed0));
    1534     LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1         = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr0Fixed1));
    1535     LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0         = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr4Fixed0));
    1536     LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1         = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr4Fixed1));
    1537 
    1538     val = pVM->hm.s.vmx.Msrs.u64VmcsEnum;
    1539     LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM          = %#RX64\n", val));
    1540     LogRel(("HM:   HIGHEST_INDEX                   = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(val)));
    1541 
    1542     val = pVM->hm.s.vmx.Msrs.u64Vmfunc;
    1543     if (val)
    1544     {
    1545         LogRel(("HM: MSR_IA32_VMX_VMFUNC             = %#RX64\n", val));
    1546         HMVMX_REPORT_ALLOWED_FEAT(val, "EPTP_SWITCHING", VMX_VMCS_CTRL_VMFUNC_EPTP_SWITCHING);
    1547     }
    1548 
    1549     LogRel(("HM: APIC-access page physaddr       = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
    1550 
     1666        hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.vmx.Msrs.u64EptVpidCaps);
     1667    if (pVM->hm.s.vmx.Msrs.u64Vmfunc)
     1668        hmR3VmxReportVmfuncMsr(pVM->hm.s.vmx.Msrs.u64Vmfunc);
     1669    hmR3VmxReportCrFixedMsrs(&pVM->hm.s.vmx.Msrs);
     1670
     1671    LogRel(("HM: APIC-access page physaddr         = %#RHp\n",  pVM->hm.s.vmx.HCPhysApicAccess));
    15511672    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    15521673    {
    1553         LogRel(("HM: VCPU%3d: MSR bitmap physaddr    = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
    1554         LogRel(("HM: VCPU%3d: VMCS physaddr          = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs));
     1674        LogRel(("HM: VCPU%3d: MSR bitmap physaddr      = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
     1675        LogRel(("HM: VCPU%3d: VMCS physaddr            = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs));
    15551676    }
    15561677
     
    15591680     */
    15601681    AssertLogRelReturn(   !pVM->hm.s.fNestedPaging
    1561                        || (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT),
     1682                       || (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT),
    15621683                       VERR_HM_IPE_1);
    15631684    AssertLogRelReturn(   !pVM->hm.s.vmx.fUnrestrictedGuest
    1564                        || (   (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST)
     1685                       || (   (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST)
    15651686                           && pVM->hm.s.fNestedPaging),
    15661687                       VERR_HM_IPE_1);
     
    15691690     * Enable VPID if configured and supported.
    15701691     */
    1571     if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
     1692    if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
    15721693        pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid;
    15731694
     
    15761697     * Enable APIC register virtualization and virtual-interrupt delivery if supported.
    15771698     */
    1578     if (   (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT)
    1579         && (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY))
     1699    if (   (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT)
     1700        && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY))
    15801701        pVM->hm.s.fVirtApicRegs = true;
    15811702
     
    15851706    /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI
    15861707     *        here. */
    1587     if (   (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR)
    1588         && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT))
     1708    if (   (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR)
     1709        && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT))
    15891710        pVM->hm.s.fPostedIntrs = true;
    15901711#endif
     
    15951716     * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
    15961717     */
    1597     if (   !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     1718    if (   !(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    15981719        && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
    15991720    {
     
    16441765                rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
    16451766                AssertRCReturn(rc, rc);
    1646                 LogRel(("HM: Real Mode TSS guest physaddr    = %#RGp\n", GCPhys));
     1767                LogRel(("HM: Real Mode TSS guest physaddr      = %#RGp\n", GCPhys));
    16471768
    16481769                rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
    16491770                AssertRCReturn(rc, rc);
    1650                 LogRel(("HM: Non-Paging Mode EPT CR3         = %#RGp\n", GCPhys));
     1771                LogRel(("HM: Non-Paging Mode EPT CR3           = %#RGp\n", GCPhys));
    16511772            }
    16521773        }
     
    16831804    }
    16841805
    1685     LogRel(("HM: Supports VMCS EFER fields       = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer));
     1806    LogRel(("HM: Supports VMCS EFER fields         = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer));
    16861807    LogRel(("HM: Enabled VMX\n"));
    16871808    pVM->hm.s.vmx.fEnabled = true;
     
    17191840        LogRel(("HM: Enabled nested paging\n"));
    17201841        if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
    1721             LogRel(("HM:   EPT flush type                = Single context\n"));
     1842            LogRel(("HM:   EPT flush type                  = Single context\n"));
    17221843        else if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_ALL_CONTEXTS)
    1723             LogRel(("HM:   EPT flush type                = All contexts\n"));
     1844            LogRel(("HM:   EPT flush type                  = All contexts\n"));
    17241845        else if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_NOT_SUPPORTED)
    1725             LogRel(("HM:   EPT flush type                = Not supported\n"));
     1846            LogRel(("HM:   EPT flush type                  = Not supported\n"));
    17261847        else
    1727             LogRel(("HM:   EPT flush type                = %#x\n", pVM->hm.s.vmx.enmTlbFlushEpt));
     1848            LogRel(("HM:   EPT flush type                  = %#x\n", pVM->hm.s.vmx.enmTlbFlushEpt));
    17281849
    17291850        if (pVM->hm.s.vmx.fUnrestrictedGuest)
     
    17521873        LogRel(("HM: Enabled VPID\n"));
    17531874        if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_INDIV_ADDR)
    1754             LogRel(("HM:   VPID flush type               = Individual addresses\n"));
     1875            LogRel(("HM:   VPID flush type                 = Individual addresses\n"));
    17551876        else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
    1756             LogRel(("HM:   VPID flush type               = Single context\n"));
     1877            LogRel(("HM:   VPID flush type                 = Single context\n"));
    17571878        else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
    1758             LogRel(("HM:   VPID flush type               = All contexts\n"));
     1879            LogRel(("HM:   VPID flush type                 = All contexts\n"));
    17591880        else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
    1760             LogRel(("HM:   VPID flush type               = Single context retain globals\n"));
     1881            LogRel(("HM:   VPID flush type                 = Single context retain globals\n"));
    17611882        else
    1762             LogRel(("HM:   VPID flush type               = %#x\n", pVM->hm.s.vmx.enmTlbFlushVpid));
     1883            LogRel(("HM:   VPID flush type                 = %#x\n", pVM->hm.s.vmx.enmTlbFlushVpid));
    17631884    }
    17641885    else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_NOT_SUPPORTED)
     
    17921913        LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
    17931914    LogRel(("HM: Max resume loops                  = %u\n",     pVM->hm.s.cMaxResumeLoops));
    1794     LogRel(("HM: CPUID 0x80000001.u32AMDFeatureECX = %#RX32\n", pVM->hm.s.cpuid.u32AMDFeatureECX));
    1795     LogRel(("HM: CPUID 0x80000001.u32AMDFeatureEDX = %#RX32\n", pVM->hm.s.cpuid.u32AMDFeatureEDX));
    17961915    LogRel(("HM: AMD HWCR MSR                      = %#RX64\n", pVM->hm.s.svm.u64MsrHwcr));
    17971916    LogRel(("HM: AMD-V revision                    = %#x\n",    pVM->hm.s.svm.u32Rev));
     
    33993518    if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
    34003519    {
    3401         LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed    %#RX32\n", pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1));
    3402         LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0));
     3520        LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed    %#RX32\n", pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1));
     3521        LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.Msrs.EntryCtls.n.disallowed0));
    34033522    }
    34043523    else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR)
  • trunk/src/VBox/VMM/include/HMInternal.h

    r73287 r73293  
    560560    uint32_t                        cPatches;
    561561    HMTPRPATCH                      aPatches[64];
    562 
    563     struct
    564     {
    565         uint32_t                    u32AMDFeatureECX;
    566         uint32_t                    u32AMDFeatureEDX;
    567     } cpuid;
    568562
    569563    /** Last recorded error code during HM ring-0 init. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette