Changeset 48212 in vbox
- Timestamp:
- Aug 30, 2013 11:02:22 PM (11 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r48211 r48212 1249 1249 pVM->hm.s.vmx.fUsePreemptTimer = g_HvmR0.vmx.fUsePreemptTimer; 1250 1250 pVM->hm.s.vmx.cPreemptTimerShift = g_HvmR0.vmx.cPreemptTimerShift; 1251 pVM->hm.s.vmx.msr. feature_ctrl= g_HvmR0.vmx.msr.u64FeatureCtrl;1251 pVM->hm.s.vmx.msr.u64FeatureCtrl = g_HvmR0.vmx.msr.u64FeatureCtrl; 1252 1252 pVM->hm.s.vmx.u64HostCr4 = g_HvmR0.vmx.u64HostCr4; 1253 1253 pVM->hm.s.vmx.u64HostEfer = g_HvmR0.vmx.u64HostEfer; 1254 pVM->hm.s.vmx.msr. vmx_basic_info= g_HvmR0.vmx.msr.u64BasicInfo;1255 pVM->hm.s.vmx.msr. vmx_pin_ctls= g_HvmR0.vmx.msr.VmxPinCtls;1256 pVM->hm.s.vmx.msr. vmx_proc_ctls= g_HvmR0.vmx.msr.VmxProcCtls;1257 pVM->hm.s.vmx.msr. vmx_proc_ctls2= g_HvmR0.vmx.msr.VmxProcCtls2;1258 pVM->hm.s.vmx.msr. vmx_exit= g_HvmR0.vmx.msr.VmxExit;1259 pVM->hm.s.vmx.msr. vmx_entry= g_HvmR0.vmx.msr.VmxEntry;1260 pVM->hm.s.vmx.msr. vmx_misc= g_HvmR0.vmx.msr.u64Misc;1261 pVM->hm.s.vmx.msr. vmx_cr0_fixed0= g_HvmR0.vmx.msr.u64Cr0Fixed0;1262 pVM->hm.s.vmx.msr. vmx_cr0_fixed1= g_HvmR0.vmx.msr.u64Cr0Fixed1;1263 pVM->hm.s.vmx.msr. vmx_cr4_fixed0= g_HvmR0.vmx.msr.u64Cr4Fixed0;1264 pVM->hm.s.vmx.msr. vmx_cr4_fixed1= g_HvmR0.vmx.msr.u64Cr4Fixed1;1265 pVM->hm.s.vmx.msr. vmx_vmcs_enum= g_HvmR0.vmx.msr.u64VmcsEnum;1266 pVM->hm.s.vmx.msr. vmx_vmfunc= g_HvmR0.vmx.msr.u64Vmfunc;1267 pVM->hm.s.vmx.msr. vmx_ept_vpid_caps= g_HvmR0.vmx.msr.u64EptVpidCaps;1254 pVM->hm.s.vmx.msr.u64BasicInfo = g_HvmR0.vmx.msr.u64BasicInfo; 1255 pVM->hm.s.vmx.msr.VmxPinCtls = g_HvmR0.vmx.msr.VmxPinCtls; 1256 pVM->hm.s.vmx.msr.VmxProcCtls = g_HvmR0.vmx.msr.VmxProcCtls; 1257 pVM->hm.s.vmx.msr.VmxProcCtls2 = g_HvmR0.vmx.msr.VmxProcCtls2; 1258 pVM->hm.s.vmx.msr.VmxExit = g_HvmR0.vmx.msr.VmxExit; 1259 pVM->hm.s.vmx.msr.VmxEntry = g_HvmR0.vmx.msr.VmxEntry; 1260 pVM->hm.s.vmx.msr.u64Misc = g_HvmR0.vmx.msr.u64Misc; 1261 pVM->hm.s.vmx.msr.u64Cr0Fixed0 = g_HvmR0.vmx.msr.u64Cr0Fixed0; 1262 pVM->hm.s.vmx.msr.u64Cr0Fixed1 = g_HvmR0.vmx.msr.u64Cr0Fixed1; 1263 pVM->hm.s.vmx.msr.u64Cr4Fixed0 = g_HvmR0.vmx.msr.u64Cr4Fixed0; 1264 pVM->hm.s.vmx.msr.u64Cr4Fixed1 = g_HvmR0.vmx.msr.u64Cr4Fixed1; 1265 pVM->hm.s.vmx.msr.u64VmcsEnum = g_HvmR0.vmx.msr.u64VmcsEnum; 1266 pVM->hm.s.vmx.msr.u64Vmfunc = g_HvmR0.vmx.msr.u64Vmfunc; 1267 pVM->hm.s.vmx.msr.u64EptVpidCaps = g_HvmR0.vmx.msr.u64EptVpidCaps; 1268 1268 pVM->hm.s.svm.msrHwcr = g_HvmR0.svm.msrHwcr; 1269 1269 pVM->hm.s.svm.u32Rev = g_HvmR0.svm.u32Rev; -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r48205 r48212 729 729 { 730 730 /* Write the VMCS revision dword to the VMXON region. */ 731 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr. vmx_basic_info);731 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.u64BasicInfo); 732 732 } 733 733 … … 839 839 #endif 840 840 841 if (pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)841 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 842 842 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap); 843 843 … … 907 907 908 908 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */ 909 if (pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)909 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 910 910 { 911 911 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, … … 924 924 925 925 /* Allocate the VM control structure (VMCS). */ 926 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr. vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);926 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.u64BasicInfo) <= PAGE_SIZE, VERR_INTERNAL_ERROR); 927 927 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs); 928 928 if (RT_FAILURE(rc)) … … 930 930 931 931 /* Allocate the Virtual-APIC page for transparent TPR accesses. */ 932 if (pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)932 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW) 933 933 { 934 934 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, … … 939 939 940 940 /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */ 941 if (pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)941 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 942 942 { 943 943 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, … … 1031 1031 { 1032 1032 /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */ 1033 Assert(pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);1033 Assert(pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS); 1034 1034 hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS); 1035 1035 pCpu->fFlushAsidBeforeUse = false; … … 1042 1042 * when later we use a VM with NestedPaging. To fix this properly we will 1043 1043 * have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read 1044 * ' vmx_ept_vpid_caps' from it. Sigh. */1044 * 'u64EptVpidCaps' from it. Sigh. */ 1045 1045 pCpu->fFlushAsidBeforeUse = true; 1046 1046 } … … 1226 1226 if (pVM->hm.s.vmx.fVpid) 1227 1227 { 1228 if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)1228 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 1229 1229 { 1230 1230 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt); … … 1383 1383 * as supported by the CPU. 1384 1384 */ 1385 if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)1385 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 1386 1386 { 1387 1387 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) … … 1554 1554 { 1555 1555 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */ 1556 if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)1556 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 1557 1557 { 1558 1558 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) … … 1618 1618 if (pVM->hm.s.fNestedPaging) 1619 1619 { 1620 if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)1621 { 1622 if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)1620 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT) 1621 { 1622 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT) 1623 1623 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT; 1624 else if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)1624 else if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS) 1625 1625 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS; 1626 1626 else … … 1632 1632 1633 1633 /* Make sure the write-back cacheable memory type for EPT is supported. */ 1634 if (!(pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))1634 if (!(pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB)) 1635 1635 { 1636 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr. vmx_ept_vpid_caps));1636 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.u64EptVpidCaps)); 1637 1637 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED; 1638 1638 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 1652 1652 if (pVM->hm.s.vmx.fVpid) 1653 1653 { 1654 if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)1655 { 1656 if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)1654 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID) 1655 { 1656 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT) 1657 1657 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT; 1658 else if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)1658 else if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS) 1659 1659 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS; 1660 1660 else 1661 1661 { 1662 1662 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */ 1663 if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)1663 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 1664 1664 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n")); 1665 if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)1665 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS) 1666 1666 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n")); 1667 1667 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED; … … 1705 1705 AssertPtr(pVCpu); 1706 1706 1707 uint32_t val = pVM->hm.s.vmx.msr. vmx_pin_ctls.n.disallowed0;/* Bits set here must always be set. */1708 uint32_t zap = pVM->hm.s.vmx.msr. vmx_pin_ctls.n.allowed1;/* Bits cleared here must always be cleared. */1707 uint32_t val = pVM->hm.s.vmx.msr.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */ 1708 uint32_t zap = pVM->hm.s.vmx.msr.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */ 1709 1709 1710 1710 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts causes a VM-exits. */ … … 1715 1715 if (pVM->hm.s.vmx.fUsePreemptTimer) 1716 1716 { 1717 Assert(pVM->hm.s.vmx.msr. vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);1717 Assert(pVM->hm.s.vmx.msr.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER); 1718 1718 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER; 1719 1719 } … … 1722 1722 { 1723 1723 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n", 1724 pVM->hm.s.vmx.msr. vmx_pin_ctls.n.disallowed0, val, zap));1724 pVM->hm.s.vmx.msr.VmxPinCtls.n.disallowed0, val, zap)); 1725 1725 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC; 1726 1726 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 1749 1749 1750 1750 int rc = VERR_INTERNAL_ERROR_5; 1751 uint32_t val = pVM->hm.s.vmx.msr. vmx_proc_ctls.n.disallowed0;/* Bits set here must be set in the VMCS. */1752 uint32_t zap = pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1;/* Bits cleared here must be cleared in the VMCS. */1751 uint32_t val = pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */ 1752 uint32_t zap = pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 1753 1753 1754 1754 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */ … … 1761 1761 1762 1762 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */ 1763 if ( !(pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)1764 || (pVM->hm.s.vmx.msr. vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))1763 if ( !(pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT) 1764 || (pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)) 1765 1765 { 1766 1766 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!")); … … 1779 1779 1780 1780 /* Use TPR shadowing if supported by the CPU. */ 1781 if (pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)1781 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW) 1782 1782 { 1783 1783 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic); … … 1799 1799 1800 1800 /* Use MSR-bitmaps if supported by the CPU. */ 1801 if (pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)1801 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 1802 1802 { 1803 1803 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS; … … 1824 1824 1825 1825 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */ 1826 if (pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)1826 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1827 1827 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL; 1828 1828 … … 1830 1830 { 1831 1831 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n", 1832 pVM->hm.s.vmx.msr. vmx_proc_ctls.n.disallowed0, val, zap));1832 pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0, val, zap)); 1833 1833 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC; 1834 1834 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 1846 1846 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)) 1847 1847 { 1848 val = pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.disallowed0;/* Bits set here must be set in the VMCS. */1849 zap = pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.allowed1;/* Bits cleared here must be cleared in the VMCS. */1850 1851 if (pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)1848 val = pVM->hm.s.vmx.msr.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */ 1849 zap = pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 1850 1851 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT) 1852 1852 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */ 1853 1853 … … 1861 1861 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation". 1862 1862 */ 1863 if (pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)1863 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID) 1864 1864 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID; 1865 1865 } … … 1874 1874 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be 1875 1875 * done dynamically. */ 1876 if (pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)1876 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 1877 1877 { 1878 1878 Assert(pVM->hm.s.vmx.HCPhysApicAccess); … … 1883 1883 } 1884 1884 1885 if (pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)1885 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 1886 1886 { 1887 1887 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */ 1888 if (pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)1888 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 1889 1889 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 1890 1890 } … … 1893 1893 { 1894 1894 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! " 1895 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.disallowed0, val, zap));1895 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.VmxProcCtls2.n.disallowed0, val, zap)); 1896 1896 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 1897 1897 } … … 2131 2131 2132 2132 /* Set revision dword at the beginning of the VMCS structure. */ 2133 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr. vmx_basic_info);2133 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.u64BasicInfo); 2134 2134 2135 2135 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */ … … 2552 2552 2553 2553 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ 2554 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr. vmx_misc)))2555 { 2556 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr. vmx_misc)));2554 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc))) 2555 { 2556 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc))); 2557 2557 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_HOST_MSR_STORAGE; 2558 2558 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 2616 2616 { 2617 2617 PVM pVM = pVCpu->CTX_SUFF(pVM); 2618 uint32_t val = pVM->hm.s.vmx.msr. vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */2619 uint32_t zap = pVM->hm.s.vmx.msr. vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */2618 uint32_t val = pVM->hm.s.vmx.msr.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2619 uint32_t zap = pVM->hm.s.vmx.msr.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2620 2620 2621 2621 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */ … … 2641 2641 { 2642 2642 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n", 2643 pVM->hm.s.vmx.msr. vmx_entry.n.disallowed0, val, zap));2643 pVM->hm.s.vmx.msr.VmxEntry.n.disallowed0, val, zap)); 2644 2644 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY; 2645 2645 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 2675 2675 { 2676 2676 PVM pVM = pVCpu->CTX_SUFF(pVM); 2677 uint32_t val = pVM->hm.s.vmx.msr. vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */2678 uint32_t zap = pVM->hm.s.vmx.msr. vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */2677 uint32_t val = pVM->hm.s.vmx.msr.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2678 uint32_t zap = pVM->hm.s.vmx.msr.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2679 2679 2680 2680 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */ … … 2706 2706 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */ 2707 2707 2708 if (pVM->hm.s.vmx.msr. vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)2708 if (pVM->hm.s.vmx.msr.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER) 2709 2709 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER; 2710 2710 … … 2712 2712 { 2713 2713 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n", 2714 pVM->hm.s.vmx.msr. vmx_exit.n.disallowed0, val, zap));2714 pVM->hm.s.vmx.msr.VmxExit.n.disallowed0, val, zap)); 2715 2715 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT; 2716 2716 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 3088 3088 3089 3089 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */ 3090 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr. vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);3091 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr. vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);3090 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 & pVM->hm.s.vmx.msr.u64Cr0Fixed1); 3091 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 | pVM->hm.s.vmx.msr.u64Cr0Fixed1); 3092 3092 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */ 3093 3093 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG); … … 3316 3316 3317 3317 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */ 3318 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr. vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);3319 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr. vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);3318 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 & pVM->hm.s.vmx.msr.u64Cr4Fixed1); 3319 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 | pVM->hm.s.vmx.msr.u64Cr4Fixed1); 3320 3320 u32GuestCR4 |= uSetCR4; 3321 3321 u32GuestCR4 &= uZapCR4; … … 3379 3379 { 3380 3380 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */ 3381 if (pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)3381 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG) 3382 3382 { 3383 3383 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG; … … 3978 3978 3979 3979 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ 3980 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr. vmx_misc))3980 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc)) 3981 3981 { 3982 3982 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs)); … … 6343 6343 DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu) 6344 6344 { 6345 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))6345 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)) 6346 6346 { 6347 6347 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)) … … 7913 7913 * CR0. 7914 7914 */ 7915 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr. vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);7916 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr. vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);7915 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 & pVM->hm.s.vmx.msr.u64Cr0Fixed1); 7916 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 | pVM->hm.s.vmx.msr.u64Cr0Fixed1); 7917 7917 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). 7918 7918 See Intel spec. 26.3.1 "Checks on guest Guest Control Registers, Debug Registers and MSRs." */ … … 7934 7934 * CR4. 7935 7935 */ 7936 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr. vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);7937 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr. vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);7936 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 & pVM->hm.s.vmx.msr.u64Cr4Fixed1); 7937 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 | pVM->hm.s.vmx.msr.u64Cr4Fixed1); 7938 7938 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val); 7939 7939 AssertRCBreak(rc); … … 8354 8354 AssertRCBreak(rc); 8355 8355 HMVMX_CHECK_BREAK( !u32ActivityState 8356 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr. vmx_misc)),8356 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.u64Misc)), 8357 8357 VMX_IGS_ACTIVITY_STATE_INVALID); 8358 8358 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl) … … 9512 9512 #if 0 /* Not quite ready, seem iSegReg assertion trigger once... Do we perhaps need to always read that in longjmp / preempt scenario? */ 9513 9513 AssertReturn(pMixedCtx->dx == uIOPort, VERR_HMVMX_IPE_2); 9514 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr. vmx_basic_info))9514 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.u64BasicInfo)) 9515 9515 { 9516 9516 rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient); -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r48210 r48212 896 896 { 897 897 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.lLastError)); 898 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.msr. feature_ctrl));898 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.msr.u64FeatureCtrl)); 899 899 switch (pVM->hm.s.lLastError) 900 900 { … … 969 969 970 970 Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported)); 971 AssertLogRelReturn(pVM->hm.s.vmx.msr. feature_ctrl != 0, VERR_HM_IPE_4);971 AssertLogRelReturn(pVM->hm.s.vmx.msr.u64FeatureCtrl != 0, VERR_HM_IPE_4); 972 972 973 973 uint64_t val; … … 977 977 LogRel(("HM: Using VT-x implementation 2.0!\n")); 978 978 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4)); 979 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", pVM->hm.s.vmx.msr. feature_ctrl));980 LogRel(("HM: MSR_IA32_VMX_BASIC_INFO = %#RX64\n", pVM->hm.s.vmx.msr. vmx_basic_info));981 LogRel(("HM: VMCS id = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info)));982 LogRel(("HM: VMCS size = %u\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info)));983 LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));984 LogRel(("HM: VMCS memory type = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hm.s.vmx.msr.vmx_basic_info)));985 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", !!MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hm.s.vmx.msr.vmx_basic_info)));986 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", !!MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.vmx_basic_info)));979 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", pVM->hm.s.vmx.msr.u64FeatureCtrl)); 980 LogRel(("HM: MSR_IA32_VMX_BASIC_INFO = %#RX64\n", pVM->hm.s.vmx.msr.u64BasicInfo)); 981 LogRel(("HM: VMCS id = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.u64BasicInfo))); 982 LogRel(("HM: VMCS size = %u\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.u64BasicInfo))); 983 LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.msr.u64BasicInfo) ? "< 4 GB" : "None")); 984 LogRel(("HM: VMCS memory type = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hm.s.vmx.msr.u64BasicInfo))); 985 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", !!MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hm.s.vmx.msr.u64BasicInfo))); 986 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", !!MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.u64BasicInfo))); 987 987 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); 988 988 989 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.msr. vmx_pin_ctls.u));990 val = pVM->hm.s.vmx.msr. vmx_pin_ctls.n.allowed1;991 zap = pVM->hm.s.vmx.msr. vmx_pin_ctls.n.disallowed0;989 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.msr.VmxPinCtls.u)); 990 val = pVM->hm.s.vmx.msr.VmxPinCtls.n.allowed1; 991 zap = pVM->hm.s.vmx.msr.VmxPinCtls.n.disallowed0; 992 992 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT); 993 993 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT); … … 995 995 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER); 996 996 997 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.msr. vmx_proc_ctls.u));998 val = pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1;999 zap = pVM->hm.s.vmx.msr. vmx_proc_ctls.n.disallowed0;997 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.msr.VmxProcCtls.u)); 998 val = pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1; 999 zap = pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0; 1000 1000 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT); 1001 1001 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING); … … 1019 1019 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT); 1020 1020 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL); 1021 if (pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)1022 { 1023 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVM->hm.s.vmx.msr. vmx_proc_ctls2.u));1024 val = pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.allowed1;1025 zap = pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.disallowed0;1021 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1022 { 1023 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVM->hm.s.vmx.msr.VmxProcCtls2.u)); 1024 val = pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1; 1025 zap = pVM->hm.s.vmx.msr.VmxProcCtls2.n.disallowed0; 1026 1026 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC); 1027 1027 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC2_EPT); … … 1038 1038 } 1039 1039 1040 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVM->hm.s.vmx.msr. vmx_entry.u));1041 val = pVM->hm.s.vmx.msr. vmx_entry.n.allowed1;1042 zap = pVM->hm.s.vmx.msr. vmx_entry.n.disallowed0;1040 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVM->hm.s.vmx.msr.VmxEntry.u)); 1041 val = pVM->hm.s.vmx.msr.VmxEntry.n.allowed1; 1042 zap = pVM->hm.s.vmx.msr.VmxEntry.n.disallowed0; 1043 1043 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG); 1044 1044 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST); … … 1049 1049 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR); 1050 1050 1051 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVM->hm.s.vmx.msr. vmx_exit.u));1052 val = pVM->hm.s.vmx.msr. vmx_exit.n.allowed1;1053 zap = pVM->hm.s.vmx.msr. vmx_exit.n.disallowed0;1051 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVM->hm.s.vmx.msr.VmxExit.u)); 1052 val = pVM->hm.s.vmx.msr.VmxExit.n.allowed1; 1053 zap = pVM->hm.s.vmx.msr.VmxExit.n.disallowed0; 1054 1054 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_DEBUG); 1055 1055 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE); … … 1062 1062 HMVMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER); 1063 1063 1064 if (pVM->hm.s.vmx.msr. vmx_ept_vpid_caps)1065 { 1066 val = pVM->hm.s.vmx.msr. vmx_ept_vpid_caps;1064 if (pVM->hm.s.vmx.msr.u64EptVpidCaps) 1065 { 1066 val = pVM->hm.s.vmx.msr.u64EptVpidCaps; 1067 1067 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", val)); 1068 1068 HMVMX_REPORT_CAPABILITY(val, MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY); … … 1093 1093 } 1094 1094 1095 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", pVM->hm.s.vmx.msr. vmx_misc));1096 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr. vmx_misc) == pVM->hm.s.vmx.cPreemptTimerShift)1095 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", pVM->hm.s.vmx.msr.u64Misc)); 1096 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.u64Misc) == pVM->hm.s.vmx.cPreemptTimerShift) 1097 1097 { 1098 1098 LogRel(("HM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT = %#x\n", 1099 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr. vmx_misc)));1099 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.u64Misc))); 1100 1100 } 1101 1101 else 1102 1102 { 1103 1103 LogRel(("HM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT = %#x - erratum detected, using %#x instead\n", 1104 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc), pVM->hm.s.vmx.cPreemptTimerShift)); 1105 } 1106 1107 LogRel(("HM: MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT = %RTbool\n", !!MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(pVM->hm.s.vmx.msr.vmx_misc))); 1108 LogRel(("HM: MSR_IA32_VMX_MISC_ACTIVITY_STATES = %#x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.vmx_misc))); 1109 LogRel(("HM: MSR_IA32_VMX_MISC_CR3_TARGET = %#x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hm.s.vmx.msr.vmx_misc))); 1110 LogRel(("HM: MSR_IA32_VMX_MISC_MAX_MSR = %u\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))); 1111 LogRel(("HM: MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM = %RTbool\n", !!MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(pVM->hm.s.vmx.msr.vmx_misc))); 1112 LogRel(("HM: MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2 = %RTbool\n", !!MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(pVM->hm.s.vmx.msr.vmx_misc))); 1113 LogRel(("HM: MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO = %RTbool\n", !!MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(pVM->hm.s.vmx.msr.vmx_misc))); 1114 LogRel(("HM: MSR_IA32_VMX_MISC_MSEG_ID = %#x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hm.s.vmx.msr.vmx_misc))); 1104 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.u64Misc), pVM->hm.s.vmx.cPreemptTimerShift)); 1105 } 1106 1107 val = pVM->hm.s.vmx.msr.u64Misc; 1108 LogRel(("HM: MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT = %RTbool\n", !!MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(val))); 1109 LogRel(("HM: MSR_IA32_VMX_MISC_ACTIVITY_STATES = %#x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(val))); 1110 LogRel(("HM: MSR_IA32_VMX_MISC_CR3_TARGET = %#x\n", MSR_IA32_VMX_MISC_CR3_TARGET(val))); 1111 LogRel(("HM: MSR_IA32_VMX_MISC_MAX_MSR = %u\n", MSR_IA32_VMX_MISC_MAX_MSR(val))); 1112 LogRel(("HM: MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM = %RTbool\n", !!MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(val))); 1113 LogRel(("HM: MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2 = %RTbool\n", !!MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(val))); 1114 LogRel(("HM: MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO = %RTbool\n", !!MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(val))); 1115 LogRel(("HM: MSR_IA32_VMX_MISC_MSEG_ID = %#x\n", MSR_IA32_VMX_MISC_MSEG_ID(val))); 1115 1116 1116 1117 /* Paranoia */ 1117 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc) >= 512); 1118 1119 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed0)); 1120 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed1)); 1121 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed0)); 1122 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed1)); 1123 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", pVM->hm.s.vmx.msr.vmx_vmcs_enum)); 1124 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(pVM->hm.s.vmx.msr.vmx_vmcs_enum))); 1125 1126 val = pVM->hm.s.vmx.msr.vmx_vmfunc; 1118 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc) >= 512); 1119 1120 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pVM->hm.s.vmx.msr.u64Cr0Fixed0)); 1121 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pVM->hm.s.vmx.msr.u64Cr0Fixed1)); 1122 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pVM->hm.s.vmx.msr.u64Cr4Fixed0)); 1123 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pVM->hm.s.vmx.msr.u64Cr4Fixed1)); 1124 1125 val = pVM->hm.s.vmx.msr.u64VmcsEnum; 1126 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", val)); 1127 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX = %#x\n", MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(val))); 1128 1129 val = pVM->hm.s.vmx.msr.u64Vmfunc; 1127 1130 if (val) 1128 1131 { … … 1139 1142 } 1140 1143 1141 if (pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)1144 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT) 1142 1145 pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging; 1143 1146 1144 if (pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)1147 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID) 1145 1148 pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid; 1146 1149 … … 1150 1153 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel... 1151 1154 */ 1152 if ( !(pVM->hm.s.vmx.msr. vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)1155 if ( !(pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1153 1156 && CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)) 1154 1157 { … … 1160 1163 if ( pVM->hm.s.vmx.fAllowUnrestricted 1161 1164 && pVM->hm.s.fNestedPaging 1162 && (pVM->hm.s.vmx.msr. vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST))1165 && (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST)) 1163 1166 { 1164 1167 pVM->hm.s.vmx.fUnrestrictedGuest = true; … … 2564 2567 2565 2568 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */ 2566 mask = (uint32_t)pVM->hm.s.vmx.msr. vmx_cr0_fixed0;2569 mask = (uint32_t)pVM->hm.s.vmx.msr.u64Cr0Fixed0; 2567 2570 /* Note: We ignore the NE bit here on purpose; see vmmr0\hmr0.cpp for details. */ 2568 2571 mask &= ~X86_CR0_NE; … … 2582 2585 2583 2586 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */ 2584 mask = (uint32_t)~pVM->hm.s.vmx.msr. vmx_cr0_fixed1;2587 mask = (uint32_t)~pVM->hm.s.vmx.msr.u64Cr0Fixed1; 2585 2588 if ((pCtx->cr0 & mask) != 0) 2586 2589 return false; 2587 2590 2588 2591 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */ 2589 mask = (uint32_t)pVM->hm.s.vmx.msr. vmx_cr4_fixed0;2592 mask = (uint32_t)pVM->hm.s.vmx.msr.u64Cr4Fixed0; 2590 2593 mask &= ~X86_CR4_VMXE; 2591 2594 if ((pCtx->cr4 & mask) != mask) … … 2593 2596 2594 2597 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */ 2595 mask = (uint32_t)~pVM->hm.s.vmx.msr. vmx_cr4_fixed1;2598 mask = (uint32_t)~pVM->hm.s.vmx.msr.u64Cr4Fixed1; 2596 2599 if ((pCtx->cr4 & mask) != 0) 2597 2600 return false; … … 2945 2948 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM) 2946 2949 { 2947 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %#RX32\n", pVM->hm.s.vmx.msr. vmx_entry.n.allowed1));2948 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.msr. vmx_entry.n.disallowed0));2950 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %#RX32\n", pVM->hm.s.vmx.msr.VmxEntry.n.allowed1)); 2951 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.msr.VmxEntry.n.disallowed0)); 2949 2952 } 2950 2953 } -
trunk/src/VBox/VMM/include/HMInternal.h
r48210 r48212 362 362 struct 363 363 { 364 uint64_t feature_ctrl;365 uint64_t vmx_basic_info;366 VMX_CAPABILITY vmx_pin_ctls;367 VMX_CAPABILITY vmx_proc_ctls;368 VMX_CAPABILITY vmx_proc_ctls2;369 VMX_CAPABILITY vmx_exit;370 VMX_CAPABILITY vmx_entry;371 uint64_t vmx_misc;372 uint64_t vmx_cr0_fixed0;373 uint64_t vmx_cr0_fixed1;374 uint64_t vmx_cr4_fixed0;375 uint64_t vmx_cr4_fixed1;376 uint64_t vmx_vmcs_enum;377 uint64_t vmx_vmfunc;378 uint64_t vmx_ept_vpid_caps;364 uint64_t u64FeatureCtrl; 365 uint64_t u64BasicInfo; 366 VMX_CAPABILITY VmxPinCtls; 367 VMX_CAPABILITY VmxProcCtls; 368 VMX_CAPABILITY VmxProcCtls2; 369 VMX_CAPABILITY VmxExit; 370 VMX_CAPABILITY VmxEntry; 371 uint64_t u64Misc; 372 uint64_t u64Cr0Fixed0; 373 uint64_t u64Cr0Fixed1; 374 uint64_t u64Cr4Fixed0; 375 uint64_t u64Cr4Fixed1; 376 uint64_t u64VmcsEnum; 377 uint64_t u64Vmfunc; 378 uint64_t u64EptVpidCaps; 379 379 } msr; 380 380 -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r48210 r48212 406 406 CHECK_MEMBER_ALIGNMENT(HM, uMaxAsid, 8); 407 407 CHECK_MEMBER_ALIGNMENT(HM, vmx.u64HostCr4, 8); 408 CHECK_MEMBER_ALIGNMENT(HM, vmx.msr. feature_ctrl, 8);408 CHECK_MEMBER_ALIGNMENT(HM, vmx.msr.u64FeatureCtrl, 8); 409 409 CHECK_MEMBER_ALIGNMENT(HM, StatTprPatchSuccess, 8); 410 410 CHECK_MEMBER_ALIGNMENT(HMCPU, StatEntry, 8);
Note:
See TracChangeset
for help on using the changeset viewer.