Changeset 45912 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 6, 2013 3:01:23 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45908 r45912 1814 1814 AssertPtr(pVCpu); 1815 1815 1816 /** @todo Shouldn't we able to avoid initializing with 0? */ 1816 1817 int rc = VERR_GENERAL_FAILURE; 1817 1818 1818 1819 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/ 1819 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); 1820 1821 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); 1820 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc); 1821 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc); 1822 1822 1823 1823 /* … … 1826 1826 * We thus use the exception bitmap to control it rather than use both. 1827 1827 */ 1828 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);1829 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);1828 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc); 1829 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc); 1830 1830 1831 1831 /** @todo Explore possibility of using IO-bitmaps. */ 1832 1832 /* All IO & IOIO instructions cause VM-exits. */ 1833 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); 1834 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); 1833 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc); 1834 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc); 1835 1836 /* Initialize the MSR-bitmap area. */ 1837 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc); 1838 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc); 1839 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc); 1835 1840 1836 1841 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 1837 /* Setup MSR autoloading/ autostoring. */1842 /* Setup MSR autoloading/storing. */ 1838 1843 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr); 1839 1844 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */ 1840 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);1841 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);1842 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);1843 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);1845 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr); 1846 AssertRCReturn(rc, rc); 1847 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr); 1848 AssertRCReturn(rc, rc); 1844 1849 1845 1850 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr); 1846 1851 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */ 1847 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr); 1848 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); 1849 #else 1850 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); 1851 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); 1852 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); 1852 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr); 1853 AssertRCReturn(rc, rc); 1853 1854 #endif 1854 1855 1855 1856 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */ 1856 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff)); 1857 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff)); 1858 AssertRCReturn(rc, rc); 1857 1859 1858 1860 /* Setup debug controls */ 1859 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */ 1860 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); 1861 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */ 1862 AssertRCReturn(rc, rc); 1863 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); 1861 1864 AssertRCReturn(rc, rc); 1862 1865 return rc; … … 2055 2058 RTCCUINTREG uReg = ASMGetCR0(); 2056 2059 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg); 2060 AssertRCReturn(rc, rc); 2057 2061 2058 2062 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL … … 2061 2065 { 2062 2066 uint64_t uRegCR3 = hmR0Get64bitCR3(); 2063 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);2067 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3); 2064 2068 } 2065 2069 else … … 2067 2071 { 2068 2072 uReg = ASMGetCR3(); 2069 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg); 2070 } 2073 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg); 2074 } 2075 AssertRCReturn(rc, rc); 2071 2076 2072 2077 uReg = ASMGetCR4(); 2073 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);2078 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg); 2074 2079 AssertRCReturn(rc, rc); 2075 2080 return rc; … … 2138 2143 2139 2144 /* Write these host selector fields into the host-state area in the VMCS. */ 2140 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); 2141 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); 2145 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); 2146 AssertRCReturn(rc, rc); 2147 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); 2148 AssertRCReturn(rc, rc); 2142 2149 /* Avoid the VMWRITEs as we set the following segments to 0 and the VMCS fields are already 0 (since g_HvmR0 is static) */ 2143 2150 #if 0 2144 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); 2145 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); 2146 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); 2147 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); 2151 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); 2152 AssertRCReturn(rc, rc); 2153 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); 2154 AssertRCReturn(rc, rc); 2155 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); 2156 AssertRCReturn(rc, rc); 2157 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); 2158 AssertRCReturn(rc, rc); 2148 2159 #endif 2149 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR);2160 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); 2150 2161 AssertRCReturn(rc, rc); 2151 2162 … … 2165 2176 X86XDTR64 Idtr64; 2166 2177 hmR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64); 2167 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); 2168 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); 2178 2179 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); 2180 AssertRCReturn(rc, rc); 2181 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); 2182 AssertRCReturn(rc, rc); 2183 2169 2184 Gdtr.cbGdt = Gdtr64.cb; 2170 2185 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr; … … 2176 2191 ASMGetGDTR(&Gdtr); 2177 2192 ASMGetIDTR(&Idtr); 2178 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); 2179 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); 2180 } 2181 AssertRCReturn(rc, rc); 2193 2194 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); 2195 AssertRCReturn(rc, rc); 2196 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); 2197 AssertRCReturn(rc, rc); 2198 } 2182 2199 2183 2200 /* … … 2222 2239 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE); 2223 2240 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE); 2224 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); 2225 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); 2241 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); 2242 AssertRCReturn(rc, rc); 2243 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); 2226 2244 AssertRCReturn(rc, rc); 2227 2245 } … … 2302 2320 * Host Sysenter MSRs. 2303 2321 */ 2304 rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 2322 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 2323 AssertRCReturn(rc, rc); 2305 2324 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2306 2325 if (HMVMX_IS_64BIT_HOST_MODE()) 2307 2326 { 2308 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 2309 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 2327 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 2328 AssertRCReturn(rc, rc); 2329 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 2310 2330 } 2311 2331 else 2312 2332 { 2313 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 2314 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2333 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 2334 AssertRCReturn(rc, rc); 2335 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2315 2336 } 2316 2337 # elif HC_ARCH_BITS == 32 2317 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 2318 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2338 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 2339 AssertRCReturn(rc, rc); 2340 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2319 2341 # else 2320 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 2321 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 2342 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 2343 AssertRCReturn(rc, rc); 2344 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 2322 2345 # endif 2323 2346 AssertRCReturn(rc, rc); … … 2681 2704 * @remarks No-long-jump zone!!! 2682 2705 */ 2683 static inthmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)2706 DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2684 2707 { 2685 2708 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); 2686 rc |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx); 2687 rc |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx); 2709 AssertRCReturn(rc, rc); 2710 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx); 2711 AssertRCReturn(rc, rc); 2712 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx); 2713 AssertRCReturn(rc, rc); 2688 2714 return rc; 2689 2715 } … … 2823 2849 2824 2850 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */ 2825 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0); 2826 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 2851 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0); 2852 AssertRCReturn(rc, rc); 2853 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 2854 AssertRCReturn(rc, rc); 2827 2855 Log(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0)); 2828 2856 … … 2853 2881 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */ 2854 2882 pVCpu->hm.s.vmx.cr0_mask = u32CR0Mask; 2855 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);2883 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask); 2856 2884 AssertRCReturn(rc, rc); 2857 2885 … … 2898 2926 if (CPUMIsGuestInPAEModeEx(pCtx)) 2899 2927 { 2900 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 2901 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);2902 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);2903 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);2904 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);2928 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc); 2929 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc); 2930 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc); 2931 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc); 2932 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc); 2905 2933 AssertRCReturn(rc, rc); 2906 2934 } … … 3025 3053 Log(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4)); 3026 3054 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4); 3055 AssertRCReturn(rc, rc); 3027 3056 3028 3057 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */ … … 3034 3063 | X86_CR4_VMXE; 3035 3064 pVCpu->hm.s.vmx.cr4_mask = u32CR4Mask; 3036 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);3065 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask); 3037 3066 AssertRCReturn(rc, rc); 3038 3067 … … 3132 3161 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT; 3133 3162 3134 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 3135 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); 3163 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 3164 AssertRCReturn(rc, rc); 3165 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); 3166 AssertRCReturn(rc, rc); 3136 3167 3137 3168 /* The guest's view of its DR7 is unblemished. */ 3138 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]); 3169 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]); 3170 AssertRCReturn(rc, rc); 3139 3171 3140 3172 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG; … … 3334 3366 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx) 3335 3367 { 3336 int rc; 3337 rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */ 3338 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */ 3339 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/ 3368 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */ 3369 AssertRCReturn(rc, rc); 3370 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */ 3371 AssertRCReturn(rc, rc); 3372 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/ 3340 3373 AssertRCReturn(rc, rc); 3341 3374 … … 3421 3454 } 3422 3455 #endif 3423 rc 3456 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE, 3424 3457 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx); 3425 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE, 3458 AssertRCReturn(rc, rc); 3459 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE, 3426 3460 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx); 3427 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE, 3461 AssertRCReturn(rc, rc); 3462 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE, 3428 3463 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx); 3429 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE, 3464 AssertRCReturn(rc, rc); 3465 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE, 3430 3466 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx); 3431 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE, 3467 AssertRCReturn(rc, rc); 3468 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE, 3432 3469 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx); 3433 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE, 3470 AssertRCReturn(rc, rc); 3471 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE, 3434 3472 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx); 3435 3473 AssertRCReturn(rc, rc); … … 3498 3536 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */ 3499 3537 3500 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); 3501 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); 3502 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); 3503 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); 3504 AssertRCReturn(rc, rc); 3538 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc); 3539 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc); 3540 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc); 3541 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc); 3505 3542 3506 3543 Log(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base)); … … 3513 3550 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR) 3514 3551 { 3515 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); 3516 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); 3517 AssertRCReturn(rc, rc); 3552 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc); 3553 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc); 3518 3554 3519 3555 Assert(!(pMixedCtx->gdtr.cbGdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */ … … 3534 3570 u32Access = pMixedCtx->ldtr.Attr.u; 3535 3571 3536 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); 3537 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); 3538 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); 3539 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); 3540 AssertRCReturn(rc, rc); 3572 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc); 3573 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc); 3574 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc); 3575 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc); 3541 3576 3542 3577 /* Validate. */ … … 3564 3599 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR) 3565 3600 { 3566 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); 3567 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); 3568 AssertRCReturn(rc, rc); 3601 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc); 3602 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc); 3569 3603 3570 3604 Assert(!(pMixedCtx->idtr.cbIdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */ … … 3666 3700 /* Update the VCPU's copy of the guest MSR count. */ 3667 3701 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs; 3668 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs); 3669 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); 3670 AssertRCReturn(rc, rc); 3702 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs); AssertRCReturn(rc, rc); 3703 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); AssertRCReturn(rc, rc); 3671 3704 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 3672 3705 … … 3681 3714 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR) 3682 3715 { 3683 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); 3716 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc); 3684 3717 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR; 3685 3718 } 3686 3719 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR) 3687 3720 { 3688 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);3721 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc); 3689 3722 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR; 3690 3723 } 3691 3724 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR) 3692 3725 { 3693 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);3726 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc); 3694 3727 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR; 3695 3728 } 3696 AssertRCReturn(rc, rc);3697 3729 3698 3730 return rc; … … 4762 4794 uint32_t uVal = 0; 4763 4795 uint32_t uShadow = 0; 4764 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal); 4765 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow); 4796 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal); 4766 4797 AssertRCReturn(rc, rc); 4798 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow); 4799 AssertRCReturn(rc, rc); 4800 4767 4801 uVal = (uShadow & pVCpu->hm.s.vmx.cr4_mask) | (uVal & ~pVCpu->hm.s.vmx.cr4_mask); 4768 4802 CPUMSetGuestCR4(pVCpu, uVal); … … 4792 4826 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uVal); 4793 4827 AssertRCReturn(rc, rc); 4828 4794 4829 pMixedCtx->rip = uVal; 4795 4830 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP; … … 4818 4853 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uVal); 4819 4854 AssertRCReturn(rc, rc); 4855 4820 4856 pMixedCtx->rsp = uVal; 4821 4857 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP; … … 4843 4879 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal); 4844 4880 AssertRCReturn(rc, rc); 4881 4845 4882 pMixedCtx->eflags.u32 = uVal; 4846 4847 /* Undo our real-on-v86-mode changes to eflags if necessary. */ 4848 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4849 { 4850 PVM pVM = pVCpu->CTX_SUFF(pVM); 4851 Assert(pVM->hm.s.vmx.pRealModeTSS); 4852 Log(("Saving real-mode RFLAGS VT-x view=%#RX64\n", pMixedCtx->rflags.u64)); 4883 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */ 4884 { 4885 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 4886 Log(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32)); 4887 4853 4888 pMixedCtx->eflags.Bits.u1VM = 0; 4854 4889 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL; … … 4898 4933 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 4899 4934 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 4900 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */4901 4935 AssertRC(rc); 4936 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */ 4937 AssertRC(rc); 4938 4902 4939 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 4903 4940 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); … … 5077 5114 /* Guest CR0. Guest FPU. */ 5078 5115 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 5116 AssertRCReturn(rc, rc); 5079 5117 5080 5118 /* Guest CR4. */ 5081 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);5119 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); 5082 5120 AssertRCReturn(rc, rc); 5083 5121 … … 5099 5137 5100 5138 /* We require EFER to check PAE mode. */ 5101 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 5139 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 5140 AssertRCReturn(rc, rc); 5102 5141 5103 5142 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */ 5104 5143 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR. */ 5105 5144 { 5106 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);5107 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);5108 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);5109 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);5145 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc); 5146 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc); 5147 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc); 5148 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc); 5110 5149 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */ 5111 5150 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES); 5112 5151 } 5113 AssertRCReturn(rc, rc);5114 5152 } 5115 5153 … … 5262 5300 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR)) 5263 5301 { 5264 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &uGCVal);5265 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);AssertRCReturn(rc, rc);5302 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &uGCVal); AssertRCReturn(rc, rc); 5303 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc); 5266 5304 pMixedCtx->gdtr.pGdt = uGCVal; 5267 5305 pMixedCtx->gdtr.cbGdt = u32Val; … … 5272 5310 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR)) 5273 5311 { 5274 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &uGCVal);5275 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);AssertRCReturn(rc, rc);5312 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &uGCVal); AssertRCReturn(rc, rc); 5313 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc); 5276 5314 pMixedCtx->idtr.pIdt = uGCVal; 5277 5315 pMixedCtx->idtr.cbIdt = u32Val; … … 5283 5321 { 5284 5322 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 5323 AssertRCReturn(rc, rc); 5285 5324 5286 5325 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */ 5287 5326 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 5288 rc |= VMXLOCAL_READ_SEG(TR, tr); 5289 AssertRCReturn(rc, rc); 5327 { 5328 rc = VMXLOCAL_READ_SEG(TR, tr); 5329 AssertRCReturn(rc, rc); 5330 } 5290 5331 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR; 5291 5332 } … … 5937 5978 */ 5938 5979 rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS); 5980 AssertRCReturn(rc, rc); 5939 5981 } 5940 5982 } … … 5951 5993 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection". 5952 5994 */ 5953 rc2 |= hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);5995 rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState); 5954 5996 AssertRC(rc2); 5955 5997
Note:
See TracChangeset
for help on using the changeset viewer.