Changeset 45904 in vbox
- Timestamp:
- May 6, 2013 11:53:53 AM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 85530
- Location:
- trunk
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm_vmx.h
r45894 r45904 1729 1729 #ifdef VBOX_WITH_OLD_VTX_CODE 1730 1730 # if HC_ARCH_BITS == 64 1731 # define VMXWriteVmcs VMXWriteVmcs641731 # define VMXWriteVmcs VMXWriteVmcs64 1732 1732 # else 1733 # define VMXWriteVmcs VMXWriteVmcs321733 # define VMXWriteVmcs VMXWriteVmcs32 1734 1734 # endif 1735 1735 #else /* !VBOX_WITH_OLD_VTX_CODE */ 1736 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1737 # define VMXWriteVmcsHstN VMXWriteVmcs64 1738 # else 1739 # define VMXWriteVmcsHstN VMXWriteVmcs32 1736 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 1737 # define VMXWriteVmcsHstN(idxField, uVal) HMVMX_IS_64BIT_HOST_MODE() ? \ 1738 VMXWriteVmcs64(idxField, uVal) \ 1739 : VMXWriteVmcs32(idxField, uVal) 1740 # define VMXWriteVmcsGstN(idxField, u64Val) (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) ? \ 1741 VMXWriteVmcs64(idxField, u64Val) \ 1742 : VMXWriteVmcs32(idxField, u64Val) 1743 # elif HC_ARCH_BITS == 32 1744 # define VMXWriteVmcsHstN VMXWriteVmcs32 1745 # define VMXWriteVmcsGstN(idxField, u64Val) VMXWriteVmcs64Ex(pVCpu, idxField, u64Val) 1746 # else /* HC_ARCH_BITS == 64 */ 1747 # define VMXWriteVmcsHstN VMXWriteVmcs64 1748 # define VMXWriteVmcsGstN VMXWriteVmcs64 1740 1749 # endif 1741 # define VMXWriteVmcsGstN VMXWriteVmcs64 1742 #endif 1750 #endif /* !VBOX_WITH_OLD_VTX_CODE */ 1743 1751 1744 1752 … … 1837 1845 #endif 1838 1846 1847 #ifdef VBOX_WITH_OLD_VTX_CODE 1839 1848 # if HC_ARCH_BITS == 64 1840 1849 # define VMXReadVmcsField VMXReadVmcs64 … … 1842 1851 # define VMXReadVmcsField VMXReadVmcs32 1843 1852 # endif 1853 #endif 1844 1854 1845 1855 /** -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45896 r45904 2198 2198 /* We need the 64-bit TR base for hybrid darwin. */ 2199 2199 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc); 2200 rc = VMXWriteVmcs HstN(VMX_VMCS_HOST_TR_BASE, u64TRBase);2200 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase); 2201 2201 } 2202 2202 else … … 2223 2223 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE); 2224 2224 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE); 2225 rc = VMXWriteVmcs HstN(VMX_VMCS_HOST_FS_BASE, u64FSBase);2226 rc |= VMXWriteVmcs HstN(VMX_VMCS_HOST_GS_BASE, u64GSBase);2225 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); 2226 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); 2227 2227 AssertRCReturn(rc, rc); 2228 2228 } … … 2303 2303 * Host Sysenter MSRs. 2304 2304 */ 2305 rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));2305 rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 2306 2306 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2307 2307 if (HMVMX_IS_64BIT_HOST_MODE()) 2308 2308 { 2309 rc |= VMXWriteVmcs HstN(VMX_VMCS_HOST_SYSENTER_ESP,ASMRdMsr(MSR_IA32_SYSENTER_ESP));2310 rc |= VMXWriteVmcs HstN(VMX_VMCS_HOST_SYSENTER_EIP,ASMRdMsr(MSR_IA32_SYSENTER_EIP));2309 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 2310 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 2311 2311 } 2312 2312 else 2313 2313 { 2314 rc |= VMXWriteVmcs HstN(VMX_VMCS_HOST_SYSENTER_ESP,ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));2315 rc |= VMXWriteVmcs HstN(VMX_VMCS_HOST_SYSENTER_EIP,ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));2314 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 2315 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2316 2316 } 2317 2317 # elif HC_ARCH_BITS == 32 2318 rc |= VMXWriteVmcs HstN(VMX_VMCS_HOST_SYSENTER_ESP,ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));2319 rc |= VMXWriteVmcs HstN(VMX_VMCS_HOST_SYSENTER_EIP,ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));2318 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 2319 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2320 2320 # else 2321 rc |= VMXWriteVmcs HstN(VMX_VMCS_HOST_SYSENTER_ESP,ASMRdMsr(MSR_IA32_SYSENTER_ESP));2322 rc |= VMXWriteVmcs HstN(VMX_VMCS_HOST_SYSENTER_EIP,ASMRdMsr(MSR_IA32_SYSENTER_EIP));2321 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 2322 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 2323 2323 # endif 2324 2324 AssertRCReturn(rc, rc); … … 2833 2833 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables). 2834 2834 */ 2835 uint64_t u 64CR0Mask = 0;2836 u 64CR0Mask = X86_CR0_PE2835 uint64_t u32CR0Mask = 0; 2836 u32CR0Mask = X86_CR0_PE 2837 2837 | X86_CR0_NE 2838 2838 | X86_CR0_WP … … 2842 2842 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */ 2843 2843 if (pVM->hm.s.vmx.fUnrestrictedGuest) 2844 u 64CR0Mask &= ~X86_CR0_PE;2844 u32CR0Mask &= ~X86_CR0_PE; 2845 2845 if (pVM->hm.s.fNestedPaging) 2846 u 64CR0Mask &= ~X86_CR0_WP;2846 u32CR0Mask &= ~X86_CR0_WP; 2847 2847 2848 2848 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */ 2849 2849 if (fInterceptNM) 2850 u 64CR0Mask |= (X86_CR0_TS | X86_CR0_MP);2850 u32CR0Mask |= (X86_CR0_TS | X86_CR0_MP); 2851 2851 else 2852 u 64CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);2852 u32CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP); 2853 2853 2854 2854 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */ 2855 pVCpu->hm.s.vmx.cr0_mask = u 64CR0Mask;2856 rc |= VMXWriteVmcs HstN(VMX_VMCS_CTRL_CR0_MASK, u64CR0Mask);2855 pVCpu->hm.s.vmx.cr0_mask = u32CR0Mask; 2856 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask); 2857 2857 AssertRCReturn(rc, rc); 2858 2858 … … 2928 2928 GCPhysGuestCR3 = GCPhys; 2929 2929 } 2930 2931 Log(("Load: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", GCPhysGuestCR3)); 2932 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3); 2930 2933 } 2931 2934 else 2932 2935 { 2933 2936 /* Non-nested paging case, just use the hypervisor's CR3. */ 2934 GCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);2935 } 2936 2937 Log(("Load: VMX_VMCS_GUEST_CR3=%#RGv\n", GCPhysGuestCR3));2938 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);2937 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu); 2938 2939 Log(("Load: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", HCPhysGuestCR3)); 2940 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3); 2941 } 2939 2942 AssertRCReturn(rc, rc); 2940 2943 … … 2951 2954 2952 2955 /* The guest's view of its CR4 is unblemished. */ 2953 rc 2956 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4); 2954 2957 AssertRCReturn(rc, rc); 2955 2958 Log(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4)); … … 3025 3028 3026 3029 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */ 3027 uint 64_t u64CR4Mask = 0;3028 u 64CR4Mask = X86_CR4_VME3030 uint32_t u32CR4Mask = 0; 3031 u32CR4Mask = X86_CR4_VME 3029 3032 | X86_CR4_PAE 3030 3033 | X86_CR4_PGE 3031 3034 | X86_CR4_PSE 3032 3035 | X86_CR4_VMXE; 3033 pVCpu->hm.s.vmx.cr4_mask = u 64CR4Mask;3034 rc |= VMXWriteVmcs HstN(VMX_VMCS_CTRL_CR4_MASK, u64CR4Mask);3036 pVCpu->hm.s.vmx.cr4_mask = u32CR4Mask; 3037 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask); 3035 3038 AssertRCReturn(rc, rc); 3036 3039 -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.h
r45786 r45904 69 69 # endif 70 70 # define VMXReadVmcs VMXReadVmcsField 71 #else 72 # if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 73 # define VMXReadVmcsHstN VMXReadVmcs32 74 # define VMXReadVmcsGstN(idxField, pVal) VMXReadCachedVmcsEx(pVCpu, idxField##_CACHE_IDX, pVal) 75 # define VMXReadVmcsGstNByIdxVal(idxField, pVal) VMXReadCachedVmcsEx(pVCpu, idxField, pVal) 76 # else 77 # define VMXReadVmcsHstN VMXReadVmcs64 78 # define VMXReadVmcsGstN VMXReadVmcs64 79 # define VMXReadVmcsGstNByIdxVal VMXReadVmcs64 71 #else /* !VBOX_WITH_OLD_VTX_CODE */ 72 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 73 # define VMXReadVmcsHstN(idxField, pVal) HMVMX_IS_64BIT_HOST_MODE() ? \ 74 VMXReadVmcs64(idxField, pVal) \ 75 : VMXReadVmcs32(idxField, (uint32_t *)pVal) 76 # define VMXReadVmcsGstN(idxField, pVal) (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) ? \ 77 VMXReadVmcs64(idxField, pVal) \ 78 : VMXReadVmcs32(idxField, (uint32_t *)pVal) 79 # define VMXReadVmcsGstNByIdxVal(idxField, pVal) VMXReadVmcsGstN(idxField, pVal) 80 # elif HC_ARCH_BITS == 32 81 # define VMXReadVmcsHstN VMXReadVmcs32 82 # define VMXReadVmcsGstN(idxField, pVal) VMXReadCachedVmcsEx(pVCpu, idxField##_CACHE_IDX, pVal) 83 # define VMXReadVmcsGstNByIdxVal(idxField, pVal) VMXReadCachedVmcsEx(pVCpu, idxField, pVal) 84 # else /* HC_ARCH_BITS == 64 */ 85 # define VMXReadVmcsHstN VMXReadVmcs64 86 # define VMXReadVmcsGstN VMXReadVmcs64 87 # define VMXReadVmcsGstNByIdxVal VMXReadVmcs64 80 88 # endif 81 #endif 89 #endif /* !VBOX_WITH_OLD_VTX_CODE */ 82 90 83 91 #endif /* IN_RING0 */
Note:
See TracChangeset
for help on using the changeset viewer.