VirtualBox

Changeset 48212 in vbox


Ignore:
Timestamp:
Aug 30, 2013 11:02:22 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HM: Naming fixes.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r48211 r48212  
    12491249    pVM->hm.s.vmx.fUsePreemptTimer      = g_HvmR0.vmx.fUsePreemptTimer;
    12501250    pVM->hm.s.vmx.cPreemptTimerShift    = g_HvmR0.vmx.cPreemptTimerShift;
    1251     pVM->hm.s.vmx.msr.feature_ctrl      = g_HvmR0.vmx.msr.u64FeatureCtrl;
     1251    pVM->hm.s.vmx.msr.u64FeatureCtrl    = g_HvmR0.vmx.msr.u64FeatureCtrl;
    12521252    pVM->hm.s.vmx.u64HostCr4            = g_HvmR0.vmx.u64HostCr4;
    12531253    pVM->hm.s.vmx.u64HostEfer           = g_HvmR0.vmx.u64HostEfer;
    1254     pVM->hm.s.vmx.msr.vmx_basic_info    = g_HvmR0.vmx.msr.u64BasicInfo;
    1255     pVM->hm.s.vmx.msr.vmx_pin_ctls      = g_HvmR0.vmx.msr.VmxPinCtls;
    1256     pVM->hm.s.vmx.msr.vmx_proc_ctls     = g_HvmR0.vmx.msr.VmxProcCtls;
    1257     pVM->hm.s.vmx.msr.vmx_proc_ctls2    = g_HvmR0.vmx.msr.VmxProcCtls2;
    1258     pVM->hm.s.vmx.msr.vmx_exit          = g_HvmR0.vmx.msr.VmxExit;
    1259     pVM->hm.s.vmx.msr.vmx_entry         = g_HvmR0.vmx.msr.VmxEntry;
    1260     pVM->hm.s.vmx.msr.vmx_misc          = g_HvmR0.vmx.msr.u64Misc;
    1261     pVM->hm.s.vmx.msr.vmx_cr0_fixed0    = g_HvmR0.vmx.msr.u64Cr0Fixed0;
    1262     pVM->hm.s.vmx.msr.vmx_cr0_fixed1    = g_HvmR0.vmx.msr.u64Cr0Fixed1;
    1263     pVM->hm.s.vmx.msr.vmx_cr4_fixed0    = g_HvmR0.vmx.msr.u64Cr4Fixed0;
    1264     pVM->hm.s.vmx.msr.vmx_cr4_fixed1    = g_HvmR0.vmx.msr.u64Cr4Fixed1;
    1265     pVM->hm.s.vmx.msr.vmx_vmcs_enum     = g_HvmR0.vmx.msr.u64VmcsEnum;
    1266     pVM->hm.s.vmx.msr.vmx_vmfunc        = g_HvmR0.vmx.msr.u64Vmfunc;
    1267     pVM->hm.s.vmx.msr.vmx_ept_vpid_caps = g_HvmR0.vmx.msr.u64EptVpidCaps;
     1254    pVM->hm.s.vmx.msr.u64BasicInfo      = g_HvmR0.vmx.msr.u64BasicInfo;
     1255    pVM->hm.s.vmx.msr.VmxPinCtls        = g_HvmR0.vmx.msr.VmxPinCtls;
     1256    pVM->hm.s.vmx.msr.VmxProcCtls       = g_HvmR0.vmx.msr.VmxProcCtls;
     1257    pVM->hm.s.vmx.msr.VmxProcCtls2      = g_HvmR0.vmx.msr.VmxProcCtls2;
     1258    pVM->hm.s.vmx.msr.VmxExit           = g_HvmR0.vmx.msr.VmxExit;
     1259    pVM->hm.s.vmx.msr.VmxEntry          = g_HvmR0.vmx.msr.VmxEntry;
     1260    pVM->hm.s.vmx.msr.u64Misc           = g_HvmR0.vmx.msr.u64Misc;
     1261    pVM->hm.s.vmx.msr.u64Cr0Fixed0      = g_HvmR0.vmx.msr.u64Cr0Fixed0;
     1262    pVM->hm.s.vmx.msr.u64Cr0Fixed1      = g_HvmR0.vmx.msr.u64Cr0Fixed1;
     1263    pVM->hm.s.vmx.msr.u64Cr4Fixed0      = g_HvmR0.vmx.msr.u64Cr4Fixed0;
     1264    pVM->hm.s.vmx.msr.u64Cr4Fixed1      = g_HvmR0.vmx.msr.u64Cr4Fixed1;
     1265    pVM->hm.s.vmx.msr.u64VmcsEnum       = g_HvmR0.vmx.msr.u64VmcsEnum;
     1266    pVM->hm.s.vmx.msr.u64Vmfunc         = g_HvmR0.vmx.msr.u64Vmfunc;
     1267    pVM->hm.s.vmx.msr.u64EptVpidCaps    = g_HvmR0.vmx.msr.u64EptVpidCaps;
    12681268    pVM->hm.s.svm.msrHwcr               = g_HvmR0.svm.msrHwcr;
    12691269    pVM->hm.s.svm.u32Rev                = g_HvmR0.svm.u32Rev;
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r48205 r48212  
    729729    {
    730730        /* Write the VMCS revision dword to the VMXON region. */
    731         *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
     731        *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.u64BasicInfo);
    732732    }
    733733
     
    839839#endif
    840840
    841         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     841        if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    842842            hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    843843
     
    907907
    908908    /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
    909     if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     909    if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    910910    {
    911911        rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
     
    924924
    925925        /* Allocate the VM control structure (VMCS). */
    926         AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
     926        AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.u64BasicInfo) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
    927927        rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
    928928        if (RT_FAILURE(rc))
     
    930930
    931931        /* Allocate the Virtual-APIC page for transparent TPR accesses. */
    932         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
     932        if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    933933        {
    934934            rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
     
    939939
    940940        /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
    941         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     941        if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    942942        {
    943943            rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
     
    10311031    {
    10321032        /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */
    1033         Assert(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
     1033        Assert(pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
    10341034        hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
    10351035        pCpu->fFlushAsidBeforeUse = false;
     
    10421042         *        when later we use a VM with NestedPaging. To fix this properly we will
    10431043         *        have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read
    1044          *        'vmx_ept_vpid_caps' from it. Sigh. */
     1044         *        'u64EptVpidCaps' from it. Sigh. */
    10451045        pCpu->fFlushAsidBeforeUse = true;
    10461046    }
     
    12261226        if (pVM->hm.s.vmx.fVpid)
    12271227        {
    1228             if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
     1228            if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
    12291229            {
    12301230                hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
     
    13831383         * as supported by the CPU.
    13841384         */
    1385         if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
     1385        if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
    13861386        {
    13871387            for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
     
    15541554        {
    15551555            /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
    1556             if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
     1556            if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
    15571557            {
    15581558                for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
     
    16181618    if (pVM->hm.s.fNestedPaging)
    16191619    {
    1620         if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
    1621         {
    1622             if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
     1620        if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
     1621        {
     1622            if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
    16231623                pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
    1624             else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
     1624            else if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
    16251625                pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
    16261626            else
     
    16321632
    16331633            /* Make sure the write-back cacheable memory type for EPT is supported. */
    1634             if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
     1634            if (!(pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
    16351635            {
    1636                 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
     1636                LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.u64EptVpidCaps));
    16371637                pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
    16381638                return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    16521652    if (pVM->hm.s.vmx.fVpid)
    16531653    {
    1654         if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
    1655         {
    1656             if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
     1654        if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
     1655        {
     1656            if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
    16571657                pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
    1658             else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
     1658            else if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
    16591659                pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
    16601660            else
    16611661            {
    16621662                /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
    1663                 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
     1663                if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
    16641664                    LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
    1665                 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
     1665                if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
    16661666                    LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
    16671667                pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
     
    17051705    AssertPtr(pVCpu);
    17061706
    1707     uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;    /* Bits set here must always be set. */
    1708     uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;       /* Bits cleared here must always be cleared. */
     1707    uint32_t val = pVM->hm.s.vmx.msr.VmxPinCtls.n.disallowed0;          /* Bits set here must always be set. */
     1708    uint32_t zap = pVM->hm.s.vmx.msr.VmxPinCtls.n.allowed1;             /* Bits cleared here must always be cleared. */
    17091709
    17101710    val |=   VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT           /* External interrupts causes a VM-exits. */
     
    17151715    if (pVM->hm.s.vmx.fUsePreemptTimer)
    17161716    {
    1717         Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
     1717        Assert(pVM->hm.s.vmx.msr.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
    17181718        val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
    17191719    }
     
    17221722    {
    17231723        LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
    1724                 pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
     1724                pVM->hm.s.vmx.msr.VmxPinCtls.n.disallowed0, val, zap));
    17251725        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
    17261726        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    17491749
    17501750    int rc = VERR_INTERNAL_ERROR_5;
    1751     uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;       /* Bits set here must be set in the VMCS. */
    1752     uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1;          /* Bits cleared here must be cleared in the VMCS. */
     1751    uint32_t val = pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0;         /* Bits set here must be set in the VMCS. */
     1752    uint32_t zap = pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1;            /* Bits cleared here must be cleared in the VMCS. */
    17531753
    17541754    val |=   VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT                  /* HLT causes a VM-exit. */
     
    17611761
    17621762    /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
    1763     if (   !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
    1764         ||  (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
     1763    if (   !(pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
     1764        ||  (pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
    17651765    {
    17661766        LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
     
    17791779
    17801780    /* Use TPR shadowing if supported by the CPU. */
    1781     if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
     1781    if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    17821782    {
    17831783        Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
     
    17991799
    18001800    /* Use MSR-bitmaps if supported by the CPU. */
    1801     if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     1801    if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    18021802    {
    18031803        val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
     
    18241824
    18251825    /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
    1826     if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     1826    if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    18271827        val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
    18281828
     
    18301830    {
    18311831        LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
    1832                 pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
     1832                pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0, val, zap));
    18331833        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
    18341834        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    18461846    if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
    18471847    {
    1848         val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;           /* Bits set here must be set in the VMCS. */
    1849         zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;              /* Bits cleared here must be cleared in the VMCS. */
    1850 
    1851         if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
     1848        val = pVM->hm.s.vmx.msr.VmxProcCtls2.n.disallowed0;             /* Bits set here must be set in the VMCS. */
     1849        zap = pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1;                /* Bits cleared here must be cleared in the VMCS. */
     1850
     1851        if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
    18521852            val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;                /* WBINVD causes a VM-exit. */
    18531853
     
    18611861             * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
    18621862             */
    1863             if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
     1863            if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
    18641864                val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
    18651865        }
     
    18741874        /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
    18751875         *        done dynamically. */
    1876         if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     1876        if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    18771877        {
    18781878            Assert(pVM->hm.s.vmx.HCPhysApicAccess);
     
    18831883        }
    18841884
    1885         if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     1885        if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    18861886        {
    18871887            val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;                     /* Enable RDTSCP support. */
    1888             if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     1888            if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    18891889                hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    18901890        }
     
    18931893        {
    18941894            LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
    1895                     "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
     1895                    "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.VmxProcCtls2.n.disallowed0, val, zap));
    18961896            return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    18971897        }
     
    21312131
    21322132        /* Set revision dword at the beginning of the VMCS structure. */
    2133         *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
     2133        *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.u64BasicInfo);
    21342134
    21352135        /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
     
    25522552
    25532553    /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
    2554     if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
    2555     {
    2556         LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
     2554    if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc)))
     2555    {
     2556        LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc)));
    25572557        pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_HOST_MSR_STORAGE;
    25582558        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    26162616    {
    26172617        PVM pVM      = pVCpu->CTX_SUFF(pVM);
    2618         uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0;            /* Bits set here must be set in the VMCS. */
    2619         uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;               /* Bits cleared here must be cleared in the VMCS. */
     2618        uint32_t val = pVM->hm.s.vmx.msr.VmxEntry.n.disallowed0;            /* Bits set here must be set in the VMCS. */
     2619        uint32_t zap = pVM->hm.s.vmx.msr.VmxEntry.n.allowed1;               /* Bits cleared here must be cleared in the VMCS. */
    26202620
    26212621        /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
     
    26412641        {
    26422642            LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
    2643                     pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0, val, zap));
     2643                    pVM->hm.s.vmx.msr.VmxEntry.n.disallowed0, val, zap));
    26442644            pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
    26452645            return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    26752675    {
    26762676        PVM pVM      = pVCpu->CTX_SUFF(pVM);
    2677         uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0;            /* Bits set here must be set in the VMCS. */
    2678         uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;               /* Bits cleared here must be cleared in the VMCS. */
     2677        uint32_t val = pVM->hm.s.vmx.msr.VmxExit.n.disallowed0;            /* Bits set here must be set in the VMCS. */
     2678        uint32_t zap = pVM->hm.s.vmx.msr.VmxExit.n.allowed1;               /* Bits cleared here must be cleared in the VMCS. */
    26792679
    26802680        /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
     
    27062706         *        VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */
    27072707
    2708         if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
     2708        if (pVM->hm.s.vmx.msr.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
    27092709            val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
    27102710
     
    27122712        {
    27132713            LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
    2714                     pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0, val, zap));
     2714                    pVM->hm.s.vmx.msr.VmxExit.n.disallowed0, val, zap));
    27152715            pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
    27162716            return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    30883088
    30893089        /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
    3090         uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
    3091         uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
     3090        uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 & pVM->hm.s.vmx.msr.u64Cr0Fixed1);
     3091        uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 | pVM->hm.s.vmx.msr.u64Cr0Fixed1);
    30923092        if (pVM->hm.s.vmx.fUnrestrictedGuest)               /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
    30933093            uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
     
    33163316
    33173317        /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
    3318         uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
    3319         uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
     3318        uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 & pVM->hm.s.vmx.msr.u64Cr4Fixed1);
     3319        uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 | pVM->hm.s.vmx.msr.u64Cr4Fixed1);
    33203320        u32GuestCR4 |= uSetCR4;
    33213321        u32GuestCR4 &= uZapCR4;
     
    33793379    {
    33803380        /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
    3381         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
     3381        if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
    33823382        {
    33833383            pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
     
    39783978
    39793979        /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
    3980         if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
     3980        if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc))
    39813981        {
    39823982            LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
     
    63436343DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
    63446344{
    6345     if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
     6345    if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
    63466346    {
    63476347        if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
     
    79137913         * CR0.
    79147914         */
    7915         uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
    7916         uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
     7915        uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 & pVM->hm.s.vmx.msr.u64Cr0Fixed1);
     7916        uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 | pVM->hm.s.vmx.msr.u64Cr0Fixed1);
    79177917        /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
    79187918           See Intel spec. 26.3.1 "Checks on guest Guest Control Registers, Debug Registers and MSRs." */
     
    79347934         * CR4.
    79357935         */
    7936         uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
    7937         uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
     7936        uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 & pVM->hm.s.vmx.msr.u64Cr4Fixed1);
     7937        uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 | pVM->hm.s.vmx.msr.u64Cr4Fixed1);
    79387938        rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);
    79397939        AssertRCBreak(rc);
     
    83548354        AssertRCBreak(rc);
    83558355        HMVMX_CHECK_BREAK(   !u32ActivityState
    8356                           || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.vmx_misc)),
     8356                          || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.u64Misc)),
    83578357                             VMX_IGS_ACTIVITY_STATE_INVALID);
    83588358        HMVMX_CHECK_BREAK(   !(pCtx->ss.Attr.n.u2Dpl)
     
    95129512#if 0 /* Not quite ready, seem iSegReg assertion trigger once... Do we perhaps need to always read that in longjmp / preempt scenario? */
    95139513        AssertReturn(pMixedCtx->dx == uIOPort, VERR_HMVMX_IPE_2);
    9514         if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.vmx_basic_info))
     9514        if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.u64BasicInfo))
    95159515        {
    95169516            rc2  = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r48210 r48212  
    896896    {
    897897        LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.lLastError));
    898         LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.msr.feature_ctrl));
     898        LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.msr.u64FeatureCtrl));
    899899        switch (pVM->hm.s.lLastError)
    900900        {
     
    969969
    970970    Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
    971     AssertLogRelReturn(pVM->hm.s.vmx.msr.feature_ctrl != 0, VERR_HM_IPE_4);
     971    AssertLogRelReturn(pVM->hm.s.vmx.msr.u64FeatureCtrl != 0, VERR_HM_IPE_4);
    972972
    973973    uint64_t    val;
     
    977977    LogRel(("HM: Using VT-x implementation 2.0!\n"));
    978978    LogRel(("HM: Host CR4                        = %#RX64\n", pVM->hm.s.vmx.u64HostCr4));
    979     LogRel(("HM: MSR_IA32_FEATURE_CONTROL        = %#RX64\n", pVM->hm.s.vmx.msr.feature_ctrl));
    980     LogRel(("HM: MSR_IA32_VMX_BASIC_INFO         = %#RX64\n", pVM->hm.s.vmx.msr.vmx_basic_info));
    981     LogRel(("HM: VMCS id                         = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info)));
    982     LogRel(("HM: VMCS size                       = %u\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info)));
    983     LogRel(("HM: VMCS physical address limit     = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
    984     LogRel(("HM: VMCS memory type                = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hm.s.vmx.msr.vmx_basic_info)));
    985     LogRel(("HM: Dual-monitor treatment support  = %RTbool\n", !!MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hm.s.vmx.msr.vmx_basic_info)));
    986     LogRel(("HM: OUTS & INS instruction-info     = %RTbool\n", !!MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.vmx_basic_info)));
     979    LogRel(("HM: MSR_IA32_FEATURE_CONTROL        = %#RX64\n", pVM->hm.s.vmx.msr.u64FeatureCtrl));
     980    LogRel(("HM: MSR_IA32_VMX_BASIC_INFO         = %#RX64\n", pVM->hm.s.vmx.msr.u64BasicInfo));
     981    LogRel(("HM:    VMCS id                                  = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.u64BasicInfo)));
     982    LogRel(("HM:    VMCS size                                = %u\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.u64BasicInfo)));
     983    LogRel(("HM:    VMCS physical address limit              = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.msr.u64BasicInfo) ? "< 4 GB" : "None"));
     984    LogRel(("HM:    VMCS memory type                         = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hm.s.vmx.msr.u64BasicInfo)));
     985    LogRel(("HM:    Dual-monitor treatment support           = %RTbool\n", !!MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hm.s.vmx.msr.u64BasicInfo)));
     986    LogRel(("HM:    OUTS & INS instruction-info              = %RTbool\n", !!MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.u64BasicInfo)));
    987987    LogRel(("HM: Max resume loops                = %u\n", pVM->hm.s.cMaxResumeLoops));
    988988
    989     LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS      = %#RX64\n", pVM->hm.s.vmx.msr.vmx_pin_ctls.u));
    990     val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
    991     zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
     989    LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS      = %#RX64\n", pVM->hm.s.vmx.msr.VmxPinCtls.u));
     990    val = pVM->hm.s.vmx.msr.VmxPinCtls.n.allowed1;
     991    zap = pVM->hm.s.vmx.msr.VmxPinCtls.n.disallowed0;
    992992    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT);
    993993    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT);
     
    995995    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
    996996
    997     LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS     = %#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls.u));
    998     val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
    999     zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
     997    LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS     = %#RX64\n", pVM->hm.s.vmx.msr.VmxProcCtls.u));
     998    val = pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1;
     999    zap = pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0;
    10001000    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
    10011001    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING);
     
    10191019    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT);
    10201020    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL);
    1021     if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    1022     {
    1023         LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2    = %#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.u));
    1024         val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
    1025         zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
     1021    if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     1022    {
     1023        LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2    = %#RX64\n", pVM->hm.s.vmx.msr.VmxProcCtls2.u));
     1024        val = pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1;
     1025        zap = pVM->hm.s.vmx.msr.VmxProcCtls2.n.disallowed0;
    10261026        HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC);
    10271027        HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_EPT);
     
    10381038    }
    10391039
    1040     LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS         = %#RX64\n", pVM->hm.s.vmx.msr.vmx_entry.u));
    1041     val = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;
    1042     zap = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0;
     1040    LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS         = %#RX64\n", pVM->hm.s.vmx.msr.VmxEntry.u));
     1041    val = pVM->hm.s.vmx.msr.VmxEntry.n.allowed1;
     1042    zap = pVM->hm.s.vmx.msr.VmxEntry.n.disallowed0;
    10431043    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG);
    10441044    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
     
    10491049    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR);
    10501050
    1051     LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS          = %#RX64\n", pVM->hm.s.vmx.msr.vmx_exit.u));
    1052     val = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;
    1053     zap = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0;
     1051    LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS          = %#RX64\n", pVM->hm.s.vmx.msr.VmxExit.u));
     1052    val = pVM->hm.s.vmx.msr.VmxExit.n.allowed1;
     1053    zap = pVM->hm.s.vmx.msr.VmxExit.n.disallowed0;
    10541054    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_DEBUG);
    10551055    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE);
     
    10621062    HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER);
    10631063
    1064     if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps)
    1065     {
    1066         val = pVM->hm.s.vmx.msr.vmx_ept_vpid_caps;
     1064    if (pVM->hm.s.vmx.msr.u64EptVpidCaps)
     1065    {
     1066        val = pVM->hm.s.vmx.msr.u64EptVpidCaps;
    10671067        LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP       = %#RX64\n", val));
    10681068        HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
     
    10931093    }
    10941094
    1095     LogRel(("HM: MSR_IA32_VMX_MISC               = %#RX64\n", pVM->hm.s.vmx.msr.vmx_misc));
    1096     if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc) == pVM->hm.s.vmx.cPreemptTimerShift)
     1095    LogRel(("HM: MSR_IA32_VMX_MISC               = %#RX64\n", pVM->hm.s.vmx.msr.u64Misc));
     1096    if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.u64Misc) == pVM->hm.s.vmx.cPreemptTimerShift)
    10971097    {
    10981098        LogRel(("HM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT        = %#x\n",
    1099                 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc)));
     1099                MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.u64Misc)));
    11001100    }
    11011101    else
    11021102    {
    11031103        LogRel(("HM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT        = %#x - erratum detected, using %#x instead\n",
    1104                 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc), pVM->hm.s.vmx.cPreemptTimerShift));
    1105     }
    1106 
    1107     LogRel(("HM:    MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT   = %RTbool\n", !!MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(pVM->hm.s.vmx.msr.vmx_misc)));
    1108     LogRel(("HM:    MSR_IA32_VMX_MISC_ACTIVITY_STATES        = %#x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.vmx_misc)));
    1109     LogRel(("HM:    MSR_IA32_VMX_MISC_CR3_TARGET             = %#x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hm.s.vmx.msr.vmx_misc)));
    1110     LogRel(("HM:    MSR_IA32_VMX_MISC_MAX_MSR                = %u\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
    1111     LogRel(("HM:    MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM   = %RTbool\n", !!MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(pVM->hm.s.vmx.msr.vmx_misc)));
    1112     LogRel(("HM:    MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2     = %RTbool\n", !!MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(pVM->hm.s.vmx.msr.vmx_misc)));
    1113     LogRel(("HM:    MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO    = %RTbool\n", !!MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(pVM->hm.s.vmx.msr.vmx_misc)));
    1114     LogRel(("HM:    MSR_IA32_VMX_MISC_MSEG_ID                = %#x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hm.s.vmx.msr.vmx_misc)));
     1104                MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.u64Misc), pVM->hm.s.vmx.cPreemptTimerShift));
     1105    }
     1106
     1107    val = pVM->hm.s.vmx.msr.u64Misc;
     1108    LogRel(("HM:    MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT   = %RTbool\n", !!MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(val)));
     1109    LogRel(("HM:    MSR_IA32_VMX_MISC_ACTIVITY_STATES        = %#x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(val)));
     1110    LogRel(("HM:    MSR_IA32_VMX_MISC_CR3_TARGET             = %#x\n", MSR_IA32_VMX_MISC_CR3_TARGET(val)));
     1111    LogRel(("HM:    MSR_IA32_VMX_MISC_MAX_MSR                = %u\n", MSR_IA32_VMX_MISC_MAX_MSR(val)));
     1112    LogRel(("HM:    MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM   = %RTbool\n", !!MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(val)));
     1113    LogRel(("HM:    MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2     = %RTbool\n", !!MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(val)));
     1114    LogRel(("HM:    MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO    = %RTbool\n", !!MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(val)));
     1115    LogRel(("HM:    MSR_IA32_VMX_MISC_MSEG_ID                = %#x\n", MSR_IA32_VMX_MISC_MSEG_ID(val)));
    11151116
    11161117    /* Paranoia */
    1117     AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc) >= 512);
    1118 
    1119     LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0         = %#RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed0));
    1120     LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1         = %#RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed1));
    1121     LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0         = %#RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed0));
    1122     LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1         = %#RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed1));
    1123     LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM          = %#RX64\n", pVM->hm.s.vmx.msr.vmx_vmcs_enum));
    1124     LogRel(("HM:    MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX     = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(pVM->hm.s.vmx.msr.vmx_vmcs_enum)));
    1125 
    1126     val = pVM->hm.s.vmx.msr.vmx_vmfunc;
     1118    AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc) >= 512);
     1119
     1120    LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0         = %#RX64\n", pVM->hm.s.vmx.msr.u64Cr0Fixed0));
     1121    LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1         = %#RX64\n", pVM->hm.s.vmx.msr.u64Cr0Fixed1));
     1122    LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0         = %#RX64\n", pVM->hm.s.vmx.msr.u64Cr4Fixed0));
     1123    LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1         = %#RX64\n", pVM->hm.s.vmx.msr.u64Cr4Fixed1));
     1124
     1125    val = pVM->hm.s.vmx.msr.u64VmcsEnum;
     1126    LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM          = %#RX64\n", val));
     1127    LogRel(("HM:    MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX     = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(val)));
     1128
     1129    val = pVM->hm.s.vmx.msr.u64Vmfunc;
    11271130    if (val)
    11281131    {
     
    11391142    }
    11401143
    1141     if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
     1144    if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
    11421145        pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging;
    11431146
    1144     if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
     1147    if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
    11451148        pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid;
    11461149
     
    11501153     * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
    11511154     */
    1152     if (   !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     1155    if (   !(pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    11531156        && CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
    11541157    {
     
    11601163    if (    pVM->hm.s.vmx.fAllowUnrestricted
    11611164        &&  pVM->hm.s.fNestedPaging
    1162         &&  (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST))
     1165        &&  (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST))
    11631166    {
    11641167        pVM->hm.s.vmx.fUnrestrictedGuest = true;
     
    25642567
    25652568        /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
    2566         mask = (uint32_t)pVM->hm.s.vmx.msr.vmx_cr0_fixed0;
     2569        mask = (uint32_t)pVM->hm.s.vmx.msr.u64Cr0Fixed0;
    25672570        /* Note: We ignore the NE bit here on purpose; see vmmr0\hmr0.cpp for details. */
    25682571        mask &= ~X86_CR0_NE;
     
    25822585
    25832586        /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
    2584         mask = (uint32_t)~pVM->hm.s.vmx.msr.vmx_cr0_fixed1;
     2587        mask = (uint32_t)~pVM->hm.s.vmx.msr.u64Cr0Fixed1;
    25852588        if ((pCtx->cr0 & mask) != 0)
    25862589            return false;
    25872590
    25882591        /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
    2589         mask  = (uint32_t)pVM->hm.s.vmx.msr.vmx_cr4_fixed0;
     2592        mask  = (uint32_t)pVM->hm.s.vmx.msr.u64Cr4Fixed0;
    25902593        mask &= ~X86_CR4_VMXE;
    25912594        if ((pCtx->cr4 & mask) != mask)
     
    25932596
    25942597        /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
    2595         mask = (uint32_t)~pVM->hm.s.vmx.msr.vmx_cr4_fixed1;
     2598        mask = (uint32_t)~pVM->hm.s.vmx.msr.u64Cr4Fixed1;
    25962599        if ((pCtx->cr4 & mask) != 0)
    25972600            return false;
     
    29452948    if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
    29462949    {
    2947         LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed    %#RX32\n", pVM->hm.s.vmx.msr.vmx_entry.n.allowed1));
    2948         LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0));
     2950        LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed    %#RX32\n", pVM->hm.s.vmx.msr.VmxEntry.n.allowed1));
     2951        LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.msr.VmxEntry.n.disallowed0));
    29492952    }
    29502953}
  • trunk/src/VBox/VMM/include/HMInternal.h

    r48210 r48212  
    362362        struct
    363363        {
    364             uint64_t                feature_ctrl;
    365             uint64_t                vmx_basic_info;
    366             VMX_CAPABILITY          vmx_pin_ctls;
    367             VMX_CAPABILITY          vmx_proc_ctls;
    368             VMX_CAPABILITY          vmx_proc_ctls2;
    369             VMX_CAPABILITY          vmx_exit;
    370             VMX_CAPABILITY          vmx_entry;
    371             uint64_t                vmx_misc;
    372             uint64_t                vmx_cr0_fixed0;
    373             uint64_t                vmx_cr0_fixed1;
    374             uint64_t                vmx_cr4_fixed0;
    375             uint64_t                vmx_cr4_fixed1;
    376             uint64_t                vmx_vmcs_enum;
    377             uint64_t                vmx_vmfunc;
    378             uint64_t                vmx_ept_vpid_caps;
     364            uint64_t                u64FeatureCtrl;
     365            uint64_t                u64BasicInfo;
     366            VMX_CAPABILITY          VmxPinCtls;
     367            VMX_CAPABILITY          VmxProcCtls;
     368            VMX_CAPABILITY          VmxProcCtls2;
     369            VMX_CAPABILITY          VmxExit;
     370            VMX_CAPABILITY          VmxEntry;
     371            uint64_t                u64Misc;
     372            uint64_t                u64Cr0Fixed0;
     373            uint64_t                u64Cr0Fixed1;
     374            uint64_t                u64Cr4Fixed0;
     375            uint64_t                u64Cr4Fixed1;
     376            uint64_t                u64VmcsEnum;
     377            uint64_t                u64Vmfunc;
     378            uint64_t                u64EptVpidCaps;
    379379        } msr;
    380380
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r48210 r48212  
    406406    CHECK_MEMBER_ALIGNMENT(HM, uMaxAsid, 8);
    407407    CHECK_MEMBER_ALIGNMENT(HM, vmx.u64HostCr4, 8);
    408     CHECK_MEMBER_ALIGNMENT(HM, vmx.msr.feature_ctrl, 8);
     408    CHECK_MEMBER_ALIGNMENT(HM, vmx.msr.u64FeatureCtrl, 8);
    409409    CHECK_MEMBER_ALIGNMENT(HM, StatTprPatchSuccess, 8);
    410410    CHECK_MEMBER_ALIGNMENT(HMCPU, StatEntry, 8);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette