Changeset 74258 in vbox
- Timestamp:
- Sep 14, 2018 4:20:16 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 125057
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified trunk/include/VBox/vmm/cpumctx.h ¶
r74155 r74258 602 602 uint32_t uVmwriteBitmapR3Padding; 603 603 #endif 604 /** 0x348 - Padding. */ 605 uint8_t abPadding[0x3f0 - 0x348]; 604 /** 0x348 - The MSR auto-load/store area - R0 ptr. */ 605 R0PTRTYPE(PVMXAUTOMSR) pAutoMsrAreaR0; 606 #if HC_ARCH_BITS == 32 607 uint32_t uAutoMsrAreaR0; 608 #endif 609 /** 0x350 - The MSR auto-load/store area - R3 ptr. */ 610 R3PTRTYPE(PVMXAUTOMSR) pAutoMsrAreaR3; 611 #if HC_ARCH_BITS == 32 612 uint32_t uAutoMsrAreaR3; 613 #endif 614 /** 0x358 - Padding. */ 615 uint8_t abPadding[0x3f0 - 0x358]; 606 616 } vmx; 607 617 } CPUM_UNION_NM(s); -
TabularUnified trunk/include/VBox/vmm/hm_vmx.h ¶
r74227 r74258 2922 2922 */ 2923 2923 2924 /** CR0 bits set here must always be set when in VMX operation. */2925 #define VMX_V_CR0_FIXED0 (X86_CR0_PE | X86_CR0_NE | X86_CR0_PG)2926 /** VMX_V_CR0_FIXED0 when unrestricted-guest execution is supported for the guest. */2927 #define VMX_V_CR0_FIXED0_UX (VMX_V_CR0_FIXED0 & ~(X86_CR0_PE | X86_CR0_PG))2928 /** CR4 bits set here must always be set when in VMX operation. */2929 #define VMX_V_CR4_FIXED0 (X86_CR4_VMXE)2930 2931 /** Virtual VMCS revision ID. Bump this arbitarily chosen identifier if incompatible2932 * changes to the layout of VMXVVMCS is done. Bit 31 MBZ. */2933 #define VMX_V_VMCS_REVISION_ID UINT32_C(0x1d000001)2934 AssertCompile(!(VMX_V_VMCS_REVISION_ID & RT_BIT(31)));2935 2936 /** The size of the virtual VMCS region (we use the maximum allowed size to avoid2937 * complications when teleporation may be implemented). */2938 #define VMX_V_VMCS_SIZE X86_PAGE_4K_SIZE2939 /** The size of the virtual VMCS region (in pages). */2940 #define VMX_V_VMCS_PAGES 12941 2942 /** The size of the Virtual-APIC page (in bytes). */2943 #define VMX_V_VIRT_APIC_SIZE X86_PAGE_4K_SIZE2944 /** The size of the Virtual-APIC page (in pages). */2945 #define VMX_V_VIRT_APIC_PAGES 12946 2947 /** The size of the VMREAD/VMWRITE bitmap (in bytes). */2948 #define VMX_V_VMREAD_VMWRITE_BITMAP_SIZE X86_PAGE_4K_SIZE2949 /** The size of the VMREAD/VMWRITE-bitmap (in pages). */2950 #define VMX_V_VMREAD_VMWRITE_BITMAP_PAGES 12951 2952 /** The highest index value used for supported virtual VMCS field encoding. */2953 #define VMX_V_VMCS_MAX_INDEX RT_BF_GET(VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH, VMX_BF_VMCS_ENC_INDEX)2954 2955 2924 /** @name Virtual VMX MSR - Miscellaneous data. 2956 2925 * @{ */ … … 2962 2931 #define VMX_V_PREEMPT_TIMER_SHIFT 5 2963 2932 /** Maximum number of MSRs in the auto-load/store MSR areas, (n+1) * 512. */ 2964 #define VMX_V_ MAX_MSRS02933 #define VMX_V_AUTOMSR_COUNT_MAX 0 2965 2934 /** SMM MSEG revision ID. */ 2966 2935 #define VMX_V_MSEG_REV_ID 0 … … 2974 2943 #define VMX_V_VMCS_STATE_LAUNCHED RT_BIT(2) 2975 2944 /** @} */ 2945 2946 /** CR0 bits set here must always be set when in VMX operation. */ 2947 #define VMX_V_CR0_FIXED0 (X86_CR0_PE | X86_CR0_NE | X86_CR0_PG) 2948 /** VMX_V_CR0_FIXED0 when unrestricted-guest execution is supported for the guest. */ 2949 #define VMX_V_CR0_FIXED0_UX (VMX_V_CR0_FIXED0 & ~(X86_CR0_PE | X86_CR0_PG)) 2950 /** CR4 bits set here must always be set when in VMX operation. */ 2951 #define VMX_V_CR4_FIXED0 (X86_CR4_VMXE) 2952 2953 /** Virtual VMCS revision ID. Bump this arbitarily chosen identifier if incompatible 2954 * changes to the layout of VMXVVMCS is done. Bit 31 MBZ. */ 2955 #define VMX_V_VMCS_REVISION_ID UINT32_C(0x1d000001) 2956 AssertCompile(!(VMX_V_VMCS_REVISION_ID & RT_BIT(31))); 2957 2958 /** The size of the virtual VMCS region (we use the maximum allowed size to avoid 2959 * complications when teleporation may be implemented). */ 2960 #define VMX_V_VMCS_SIZE X86_PAGE_4K_SIZE 2961 /** The size of the virtual VMCS region (in pages). */ 2962 #define VMX_V_VMCS_PAGES 1 2963 2964 /** The size of the Virtual-APIC page (in bytes). */ 2965 #define VMX_V_VIRT_APIC_SIZE X86_PAGE_4K_SIZE 2966 /** The size of the Virtual-APIC page (in pages). */ 2967 #define VMX_V_VIRT_APIC_PAGES 1 2968 2969 /** The size of the VMREAD/VMWRITE bitmap (in bytes). */ 2970 #define VMX_V_VMREAD_VMWRITE_BITMAP_SIZE X86_PAGE_4K_SIZE 2971 /** The size of the VMREAD/VMWRITE-bitmap (in pages). */ 2972 #define VMX_V_VMREAD_VMWRITE_BITMAP_PAGES 1 2973 2974 /** The size of the auto-load/store MSR area (in bytes). */ 2975 #define VMX_V_AUTOMSR_AREA_SIZE ((512 * (VMX_V_AUTOMSR_COUNT_MAX + 1)) * sizeof(VMXAUTOMSR)) 2976 /* Assert that the size is page aligned or adjust the VMX_V_AUTOMSR_AREA_PAGES macro below. */ 2977 AssertCompile(RT_ALIGN_Z(VMX_V_AUTOMSR_AREA_SIZE, X86_PAGE_4K_SIZE) == VMX_V_AUTOMSR_AREA_SIZE); 2978 /** The size of the auto-load/store MSR area (in pages). */ 2979 #define VMX_V_AUTOMSR_AREA_PAGES ((VMX_V_AUTOMSR_AREA_SIZE) >> X86_PAGE_4K_SHIFT) 2980 2981 /** The highest index value used for supported virtual VMCS field encoding. */ 2982 #define VMX_V_VMCS_MAX_INDEX RT_BF_GET(VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH, VMX_BF_VMCS_ENC_INDEX) 2976 2983 2977 2984 /** … … 3761 3768 kVmxVDiag_Vmentry_HostSysenterEspEip, 3762 3769 kVmxVDiag_Vmentry_LongModeCS, 3770 kVmxVDiag_Vmentry_MsrLoad, 3771 kVmxVDiag_Vmentry_MsrLoadCount, 3772 kVmxVDiag_Vmentry_MsrLoadPtrReadPhys, 3773 kVmxVDiag_Vmentry_MsrLoadRing3, 3774 kVmxVDiag_Vmentry_MsrLoadRsvd, 3763 3775 kVmxVDiag_Vmentry_NmiWindowExit, 3764 3776 kVmxVDiag_Vmentry_PinCtlsAllowed1, -
TabularUnified trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp ¶
r74171 r74258 1519 1519 int rc = HMVmxGetHostMsr(pVCpu->CTX_SUFF(pVM), MSR_IA32_VMX_MISC, &uHostMsr); 1520 1520 AssertMsgRC(rc, ("HMVmxGetHostMsr failed. rc=%Rrc\n", rc)); RT_NOREF_PV(rc); 1521 uint8_t const cMaxMsrs = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_ MAX_MSRS);1521 uint8_t const cMaxMsrs = RT_MIN(RT_BF_GET(uHostMsr, VMX_BF_MISC_MAX_MSRS), VMX_V_AUTOMSR_COUNT_MAX); 1522 1522 uint8_t const fActivityState = RT_BF_GET(uHostMsr, VMX_BF_MISC_ACTIVITY_STATES) & VMX_V_GUEST_ACTIVITY_STATE_MASK; 1523 1523 uVmxMsr = RT_BF_MAKE(VMX_BF_MISC_PREEMPT_TIMER_TSC, VMX_V_PREEMPT_TIMER_SHIFT ) -
TabularUnified trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp ¶
r74227 r74258 308 308 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostSysenterEspEip , "HostSysenterEspEip" ), 309 309 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_LongModeCS , "LongModeCS" ), 310 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoad , "MsrLoad" ), 311 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadCount , "MsrLoadCount" ), 312 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadPtrReadPhys , "MsrLoadPtrReadPhys" ), 313 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadRing3 , "MsrLoadRing3" ), 314 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_MsrLoadRsvd , "MsrLoadRsvd" ), 310 315 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_NmiWindowExit , "NmiWindowExit" ), 311 316 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_PinCtlsAllowed1 , "PinCtlsAllowed1" ), -
TabularUnified trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h ¶
r74227 r74258 3987 3987 3988 3988 /** 3989 * Loads the guest auto-load MSRs area as part of VM-entry. 3990 * 3991 * @param pVCpu The cross context virtual CPU structure. 3992 * @param pszInstr The VMX instruction name (for logging purposes). 3993 */ 3994 IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr) 3995 { 3996 /* 3997 * Load guest MSRs. 3998 * See Intel spec. 26.4 "Loading MSRs". 3999 */ 4000 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4001 const char *const pszFailure = "VM-exit"; 4002 4003 /* 4004 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is 4005 * exceeded including possibly raising #MC exceptions during VMX transition. Our 4006 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit. 4007 */ 4008 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu); 4009 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64GuestVmxMiscMsr); 4010 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount; 4011 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR)); 4012 if (cMsrs <= cMaxSupportedMsrs) 4013 { /* likely */ } 4014 else 4015 { 4016 pVmcs->u64ExitQual.u = VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR); 4017 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount); 4018 } 4019 4020 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u; 4021 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), 4022 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE); 4023 if (RT_SUCCESS(rc)) 4024 { 4025 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea); NOREF(pMsr); 4026 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++) 4027 { 4028 if ( !pMsr->u32Reserved 4029 && pMsr->u32Msr != MSR_K8_FS_BASE 4030 && pMsr->u32Msr != MSR_K8_GS_BASE 4031 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8 4032 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL) 4033 { 4034 rc = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value); 4035 if (rc == VINF_SUCCESS) 4036 continue; 4037 4038 /* 4039 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry. 4040 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure 4041 * indicated further with a different diagnostic code. Later, we can try implement handling of 4042 * the MSR in ring-0 if possible, or come up with a better, generic solution. 4043 */ 4044 pVmcs->u64ExitQual.u = idxMsr; 4045 VMXVDIAG const enmDiag = rc == VINF_CPUM_R3_MSR_WRITE 4046 ? kVmxVDiag_Vmentry_MsrLoadRing3 4047 : kVmxVDiag_Vmentry_MsrLoad; 4048 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); 4049 } 4050 else 4051 { 4052 pVmcs->u64ExitQual.u = idxMsr; 4053 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd); 4054 } 4055 } 4056 } 4057 else 4058 { 4059 Log(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc)); 4060 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_MsrLoadPtrReadPhys; 4061 return rc; 4062 } 4063 4064 NOREF(pszInstr); 4065 NOREF(pszFailure); 4066 return VINF_SUCCESS; 4067 } 4068 4069 4070 /** 3989 4071 * Loads the guest-state as part of VM-entry. 3990 4072 * … … 4016 4098 /* Clear address-range monitoring. */ 4017 4099 EMMonitorWaitClear(pVCpu); 4018 4019 /* Load MSRs. */4020 4100 4021 4101 NOREF(pszInstr); … … 4142 4222 if (RT_SUCCESS(rc)) 4143 4223 { 4144 iemVmxVmSucceed(pVCpu); 4145 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4146 return VINF_SUCCESS; 4224 /* Load MSRs from the VM-entry auto-load MSR area. */ 4225 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr); 4226 if (RT_SUCCESS(rc)) 4227 { 4228 Assert(rc != VINF_CPUM_R3_MSR_WRITE); 4229 4230 iemVmxVmSucceed(pVCpu); 4231 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4232 return VINF_SUCCESS; 4233 } 4234 4235 /** @todo NSTVMX: VMExit with VMX_EXIT_ERR_MSR_LOAD and set 4236 * VMX_BF_EXIT_REASON_ENTRY_FAILED. */ 4147 4237 } 4148 4238 } -
TabularUnified trunk/src/VBox/VMM/VMMR3/CPUM.cpp ¶
r74155 r74258 931 931 pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3 = NULL; 932 932 } 933 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3) 934 { 935 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3, VMX_V_AUTOMSR_AREA_PAGES); 936 pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3 = NULL; 937 } 933 938 } 934 939 } … … 952 957 * Allocate the nested-guest current VMCS. 953 958 */ 954 SUPPAGE SupNstGstVmcsPage;955 RT_ZERO(SupNstGstVmcsPage);956 SupNstGstVmcsPage.Phys = NIL_RTHCPHYS;957 959 Assert(VMX_V_VMCS_PAGES == 1); 958 960 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3); 959 961 rc = SUPR3PageAllocEx(VMX_V_VMCS_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3, 960 &pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR0, &SupNstGstVmcsPage);962 &pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR0, NULL /* paPages */); 961 963 if (RT_FAILURE(rc)) 962 964 { … … 1006 1008 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMWRITE-bitmap\n", pVCpu->idCpu, 1007 1009 VMX_V_VMREAD_VMWRITE_BITMAP_PAGES)); 1010 break; 1011 } 1012 1013 /* 1014 * Allocate the MSR auto-load/store area. 1015 */ 1016 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3); 1017 rc = SUPR3PageAllocEx(VMX_V_AUTOMSR_AREA_PAGES, 0 /* fFlags */, 1018 (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3, 1019 &pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR0, NULL /* paPages */); 1020 if (RT_FAILURE(rc)) 1021 { 1022 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3); 1023 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's auto-load/store MSR area\n", pVCpu->idCpu, 1024 VMX_V_AUTOMSR_AREA_PAGES)); 1008 1025 break; 1009 1026 } -
TabularUnified trunk/src/VBox/VMM/testcase/tstVMStruct.h ¶
r74155 r74258 155 155 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR0); 156 156 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR3); 157 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVirtApicPageR0); 158 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVirtApicPageR3); 159 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmreadBitmapR0); 160 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmreadBitmapR3); 161 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmwriteBitmapR0); 162 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmwriteBitmapR3); 163 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pAutoMsrAreaR0); 164 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pAutoMsrAreaR3); 157 165 GEN_CHECK_OFF(CPUMCTX, hwvirt.fLocalForcedActions); 158 166 GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif);
Note:
See TracChangeset
for help on using the changeset viewer.