Changeset 92219 in vbox
- Timestamp:
- Nov 4, 2021 7:17:05 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 148052
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
r92189 r92219 119 119 /** Profiling macro. */ 120 120 #ifdef HM_PROFILE_EXIT_DISPATCH 121 # define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatExitDispatch, ed)122 # define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatExitDispatch, ed)121 # define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitDispatch, ed) 122 # define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitDispatch, ed) 123 123 #else 124 124 # define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0) … … 126 126 #endif 127 127 128 #ifdef IN_RING0 128 129 /** Assert that preemption is disabled or covered by thread-context hooks. */ 129 # define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \130 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))130 # define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \ 131 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 131 132 132 133 /** Assert that we haven't migrated CPUs when thread-context hooks are not 133 134 * used. */ 134 #define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \ 135 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \ 136 ("Illegal migration! Entered on CPU %u Current %u\n", \ 137 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId())) 135 # define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \ 136 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \ 137 ("Illegal migration! Entered on CPU %u Current %u\n", \ 138 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId())) 139 #else 140 # define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0) 141 # define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0) 142 #endif 138 143 139 144 /** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU … … 839 844 840 845 846 #ifdef IN_RING0 841 847 /** 842 848 * Checks if the given MSR is part of the lastbranch-from-IP MSR stack. … … 894 900 return false; 895 901 } 902 #endif 896 903 897 904 … … 919 926 * enmGuestMode to be in-sync with the current mode. See @bugref{6398} 920 927 * and @bugref{6944}. */ 928 #ifdef IN_RING0 921 929 PCVMCC pVM = pVCpu->CTX_SUFF(pVM); 930 #else 931 RT_NOREF(pVCpu); 932 #endif 922 933 return ( X86_CR0_PE 923 934 | X86_CR0_NE 935 #ifdef IN_RING0 924 936 | (pVM->hmr0.s.fNestedPaging ? 0 : X86_CR0_WP) 937 #endif 925 938 | X86_CR0_PG 926 939 | VMX_EXIT_HOST_CR0_IGNORE_MASK); … … 978 991 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0)); 979 992 return ~fGstMask; 980 }981 982 983 /**984 * Gets the active (in use) VMCS info. object for the specified VCPU.985 *986 * This is either the guest or nested-guest VMCS info. and need not necessarily987 * pertain to the "current" VMCS (in the VMX definition of the term). For instance,988 * if the VM-entry failed due to an invalid-guest state, we may have "cleared" the989 * current VMCS while returning to ring-3. However, the VMCS info. object for that990 * VMCS would still be active and returned here so that we could dump the VMCS991 * fields to ring-3 for diagnostics. This function is thus only used to992 * distinguish between the nested-guest or guest VMCS.993 *994 * @returns The active VMCS information.995 * @param pVCpu The cross context virtual CPU structure.996 *997 * @thread EMT.998 * @remarks This function may be called with preemption or interrupts disabled!999 */1000 DECLINLINE(PVMXVMCSINFO) hmGetVmxActiveVmcsInfo(PVMCPUCC pVCpu)1001 {1002 if (!pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)1003 return &pVCpu->hmr0.s.vmx.VmcsInfo;1004 return &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;1005 993 } 1006 994 … … 1165 1153 { 1166 1154 /* Validate we are not removing any essential exception intercepts. */ 1155 #ifdef IN_RING0 1167 1156 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF))); 1157 #else 1158 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF))); 1159 #endif 1168 1160 NOREF(pVCpu); 1169 1161 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB))); … … 1201 1193 1202 1194 1203 /**1204 * Loads the VMCS specified by the VMCS info. object.1205 *1206 * @returns VBox status code.1207 * @param pVmcsInfo The VMCS info. object.1208 *1209 * @remarks Can be called with interrupts disabled.1210 */1211 static int vmxHCLoadVmcs(PVMXVMCSINFO pVmcsInfo)1212 {1213 Assert(pVmcsInfo->HCPhysVmcs != 0 && pVmcsInfo->HCPhysVmcs != NIL_RTHCPHYS);1214 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1215 1216 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs);1217 if (RT_SUCCESS(rc))1218 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;1219 return rc;1220 }1221 1222 1223 /**1224 * Clears the VMCS specified by the VMCS info. object.1225 *1226 * @returns VBox status code.1227 * @param pVmcsInfo The VMCS info. object.1228 *1229 * @remarks Can be called with interrupts disabled.1230 */1231 static int vmxHCClearVmcs(PVMXVMCSINFO pVmcsInfo)1232 {1233 Assert(pVmcsInfo->HCPhysVmcs != 0 && pVmcsInfo->HCPhysVmcs != NIL_RTHCPHYS);1234 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1235 1236 int rc = VMXClearVmcs(pVmcsInfo->HCPhysVmcs);1237 if (RT_SUCCESS(rc))1238 pVmcsInfo->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;1239 return rc;1240 }1241 1242 1243 1195 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1244 1196 /** … … 1373 1325 { 1374 1326 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs; 1375 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;1327 VCPU_2_VMXSTATE(pVCpu).vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs; 1376 1328 1377 1329 /* … … 1389 1341 { /* likely */ } 1390 1342 else 1391 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);1343 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE); 1392 1344 1393 1345 ASMSetFlags(fEFlags); … … 1425 1377 { 1426 1378 AssertPtrReturnVoid(pVCpu); 1427 VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError); 1428 } 1379 VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_VM_INSTR_ERROR, &VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32InstrError); 1380 } 1381 #if IN_RING0 1429 1382 pVCpu->CTX_SUFF(pVM)->hm.s.ForR3.rcInit = rc; 1383 #endif 1430 1384 } 1431 1385 … … 1826 1780 1827 1781 LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs)); 1828 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;1782 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE; 1829 1783 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 1830 1784 } … … 1847 1801 */ 1848 1802 static int vmxHCAddAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue, 1849 1803 bool fSetReadWrite, bool fUpdateHostMsr) 1850 1804 { 1851 1805 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; … … 1904 1858 pHostMsr[i].u32Msr = idMsr; 1905 1859 1860 #ifdef IN_RING0 1906 1861 /* 1907 1862 * Only if the caller requests to update the host MSR value AND we've newly added the … … 1925 1880 } 1926 1881 } 1882 #else 1883 RT_NOREF(fUpdateHostMsr); 1884 #endif 1927 1885 return VINF_SUCCESS; 1928 1886 } … … 2027 1985 2028 1986 /** 2029 * Updates the value of all host MSRs in the VM-exit MSR-load area.2030 *2031 * @param pVCpu The cross context virtual CPU structure.2032 * @param pVmcsInfo The VMCS info. object.2033 *2034 * @remarks No-long-jump zone!!!2035 */2036 static void vmxHCUpdateAutoLoadHostMsrs(PCVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)2037 {2038 RT_NOREF(pVCpu);2039 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));2040 2041 PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;2042 uint32_t const cMsrs = pVmcsInfo->cExitMsrLoad;2043 Assert(pHostMsrLoad);2044 Assert(sizeof(*pHostMsrLoad) * cMsrs <= X86_PAGE_4K_SIZE);2045 LogFlowFunc(("pVCpu=%p cMsrs=%u\n", pVCpu, cMsrs));2046 for (uint32_t i = 0; i < cMsrs; i++)2047 {2048 /*2049 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.2050 * Strict builds will catch mismatches in vmxHCCheckAutoLoadStoreMsrs(). See @bugref{7368}.2051 */2052 if (pHostMsrLoad[i].u32Msr == MSR_K6_EFER)2053 pHostMsrLoad[i].u64Value = g_uHmVmxHostMsrEfer;2054 else2055 pHostMsrLoad[i].u64Value = ASMRdMsr(pHostMsrLoad[i].u32Msr);2056 }2057 }2058 2059 2060 /**2061 * Saves a set of host MSRs to allow read/write passthru access to the guest and2062 * perform lazy restoration of the host MSRs while leaving VT-x.2063 *2064 * @param pVCpu The cross context virtual CPU structure.2065 *2066 * @remarks No-long-jump zone!!!2067 */2068 static void vmxHCLazySaveHostMsrs(PVMCPUCC pVCpu)2069 {2070 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));2071 2072 /*2073 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap accesses in vmxHCSetupVmcsProcCtls().2074 */2075 if (!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))2076 {2077 Assert(!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)); /* Guest MSRs better not be loaded now. */2078 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)2079 {2080 pVCpu->hmr0.s.vmx.u64HostMsrLStar = ASMRdMsr(MSR_K8_LSTAR);2081 pVCpu->hmr0.s.vmx.u64HostMsrStar = ASMRdMsr(MSR_K6_STAR);2082 pVCpu->hmr0.s.vmx.u64HostMsrSfMask = ASMRdMsr(MSR_K8_SF_MASK);2083 pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);2084 }2085 pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;2086 }2087 }2088 2089 2090 /**2091 * Checks whether the MSR belongs to the set of guest MSRs that we restore2092 * lazily while leaving VT-x.2093 *2094 * @returns true if it does, false otherwise.2095 * @param pVCpu The cross context virtual CPU structure.2096 * @param idMsr The MSR to check.2097 */2098 static bool vmxHCIsLazyGuestMsr(PCVMCPUCC pVCpu, uint32_t idMsr)2099 {2100 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)2101 {2102 switch (idMsr)2103 {2104 case MSR_K8_LSTAR:2105 case MSR_K6_STAR:2106 case MSR_K8_SF_MASK:2107 case MSR_K8_KERNEL_GS_BASE:2108 return true;2109 }2110 }2111 return false;2112 }2113 2114 2115 /**2116 * Loads a set of guests MSRs to allow read/passthru to the guest.2117 *2118 * The name of this function is slightly confusing. This function does NOT2119 * postpone loading, but loads the MSR right now. "vmxHCLazy" is simply a2120 * common prefix for functions dealing with "lazy restoration" of the shared2121 * MSRs.2122 *2123 * @param pVCpu The cross context virtual CPU structure.2124 *2125 * @remarks No-long-jump zone!!!2126 */2127 static void vmxHCLazyLoadGuestMsrs(PVMCPUCC pVCpu)2128 {2129 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));2130 Assert(!VMMRZCallRing3IsEnabled(pVCpu));2131 2132 Assert(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);2133 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)2134 {2135 /*2136 * If the guest MSRs are not loaded -and- if all the guest MSRs are identical2137 * to the MSRs on the CPU (which are the saved host MSRs, see assertion above) then2138 * we can skip a few MSR writes.2139 *2140 * Otherwise, it implies either 1. they're not loaded, or 2. they're loaded but the2141 * guest MSR values in the guest-CPU context might be different to what's currently2142 * loaded in the CPU. In either case, we need to write the new guest MSR values to the2143 * CPU, see @bugref{8728}.2144 */2145 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;2146 if ( !(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)2147 && pCtx->msrKERNELGSBASE == pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase2148 && pCtx->msrLSTAR == pVCpu->hmr0.s.vmx.u64HostMsrLStar2149 && pCtx->msrSTAR == pVCpu->hmr0.s.vmx.u64HostMsrStar2150 && pCtx->msrSFMASK == pVCpu->hmr0.s.vmx.u64HostMsrSfMask)2151 {2152 #ifdef VBOX_STRICT2153 Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pCtx->msrKERNELGSBASE);2154 Assert(ASMRdMsr(MSR_K8_LSTAR) == pCtx->msrLSTAR);2155 Assert(ASMRdMsr(MSR_K6_STAR) == pCtx->msrSTAR);2156 Assert(ASMRdMsr(MSR_K8_SF_MASK) == pCtx->msrSFMASK);2157 #endif2158 }2159 else2160 {2161 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);2162 ASMWrMsr(MSR_K8_LSTAR, pCtx->msrLSTAR);2163 ASMWrMsr(MSR_K6_STAR, pCtx->msrSTAR);2164 /* The system call flag mask register isn't as benign and accepting of all2165 values as the above, so mask it to avoid #GP'ing on corrupted input. */2166 Assert(!(pCtx->msrSFMASK & ~(uint64_t)UINT32_MAX));2167 ASMWrMsr(MSR_K8_SF_MASK, pCtx->msrSFMASK & UINT32_MAX);2168 }2169 }2170 pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;2171 }2172 2173 2174 /**2175 * Performs lazy restoration of the set of host MSRs if they were previously2176 * loaded with guest MSR values.2177 *2178 * @param pVCpu The cross context virtual CPU structure.2179 *2180 * @remarks No-long-jump zone!!!2181 * @remarks The guest MSRs should have been saved back into the guest-CPU2182 * context by vmxHCImportGuestState()!!!2183 */2184 static void vmxHCLazyRestoreHostMsrs(PVMCPUCC pVCpu)2185 {2186 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));2187 Assert(!VMMRZCallRing3IsEnabled(pVCpu));2188 2189 if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)2190 {2191 Assert(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);2192 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)2193 {2194 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hmr0.s.vmx.u64HostMsrLStar);2195 ASMWrMsr(MSR_K6_STAR, pVCpu->hmr0.s.vmx.u64HostMsrStar);2196 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hmr0.s.vmx.u64HostMsrSfMask);2197 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase);2198 }2199 }2200 pVCpu->hmr0.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);2201 }2202 2203 2204 /**2205 1987 * Verifies that our cached values of the VMCS fields are all consistent with 2206 1988 * what's actually present in the VMCS. … … 2224 2006 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val, 2225 2007 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val), 2226 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,2008 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY, 2227 2009 VERR_VMX_VMCS_FIELD_CACHE_INVALID); 2228 2010 … … 2231 2013 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val, 2232 2014 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val), 2233 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,2015 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT, 2234 2016 VERR_VMX_VMCS_FIELD_CACHE_INVALID); 2235 2017 … … 2238 2020 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val, 2239 2021 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val), 2240 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,2022 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC, 2241 2023 VERR_VMX_VMCS_FIELD_CACHE_INVALID); 2242 2024 … … 2245 2027 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val, 2246 2028 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val), 2247 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,2029 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC, 2248 2030 VERR_VMX_VMCS_FIELD_CACHE_INVALID); 2249 2031 … … 2254 2036 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val, 2255 2037 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val), 2256 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,2038 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2, 2257 2039 VERR_VMX_VMCS_FIELD_CACHE_INVALID); 2258 2040 } … … 2265 2047 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val, 2266 2048 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val), 2267 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC3,2049 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3, 2268 2050 VERR_VMX_VMCS_FIELD_CACHE_INVALID); 2269 2051 } … … 2273 2055 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val, 2274 2056 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val), 2275 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,2057 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP, 2276 2058 VERR_VMX_VMCS_FIELD_CACHE_INVALID); 2277 2059 … … 2280 2062 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val, 2281 2063 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val), 2282 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,2064 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET, 2283 2065 VERR_VMX_VMCS_FIELD_CACHE_INVALID); 2284 2066 … … 2287 2069 } 2288 2070 2289 #ifdef VBOX_STRICT 2290 2291 /** 2292 * Verifies that our cached host EFER MSR value has not changed since we cached it. 2293 * 2294 * @param pVmcsInfo The VMCS info. object. 2295 */ 2296 static void vmxHCCheckHostEferMsr(PCVMXVMCSINFO pVmcsInfo) 2297 { 2298 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2299 2300 if (pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR) 2301 { 2302 uint64_t const uHostEferMsr = ASMRdMsr(MSR_K6_EFER); 2303 uint64_t const uHostEferMsrCache = g_uHmVmxHostMsrEfer; 2304 uint64_t uVmcsEferMsrVmcs; 2305 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_HOST_EFER_FULL, &uVmcsEferMsrVmcs); 2306 AssertRC(rc); 2307 2308 AssertMsgReturnVoid(uHostEferMsr == uVmcsEferMsrVmcs, 2309 ("EFER Host/VMCS mismatch! host=%#RX64 vmcs=%#RX64\n", uHostEferMsr, uVmcsEferMsrVmcs)); 2310 AssertMsgReturnVoid(uHostEferMsr == uHostEferMsrCache, 2311 ("EFER Host/Cache mismatch! host=%#RX64 cache=%#RX64\n", uHostEferMsr, uHostEferMsrCache)); 2312 } 2313 } 2314 2315 2316 /** 2317 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the 2318 * VMCS are correct. 2319 * 2320 * @param pVCpu The cross context virtual CPU structure. 2321 * @param pVmcsInfo The VMCS info. object. 2322 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS. 2323 */ 2324 static void vmxHCCheckAutoLoadStoreMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs) 2325 { 2326 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2327 2328 /* Read the various MSR-area counts from the VMCS. */ 2329 uint32_t cEntryLoadMsrs; 2330 uint32_t cExitStoreMsrs; 2331 uint32_t cExitLoadMsrs; 2332 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cEntryLoadMsrs); AssertRC(rc); 2333 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cExitStoreMsrs); AssertRC(rc); 2334 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cExitLoadMsrs); AssertRC(rc); 2335 2336 /* Verify all the MSR counts are the same. */ 2337 Assert(cEntryLoadMsrs == cExitStoreMsrs); 2338 Assert(cExitStoreMsrs == cExitLoadMsrs); 2339 uint32_t const cMsrs = cExitLoadMsrs; 2340 2341 /* Verify the MSR counts do not exceed the maximum count supported by the hardware. */ 2342 Assert(cMsrs < VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc)); 2343 2344 /* Verify the MSR counts are within the allocated page size. */ 2345 Assert(sizeof(VMXAUTOMSR) * cMsrs <= X86_PAGE_4K_SIZE); 2346 2347 /* Verify the relevant contents of the MSR areas match. */ 2348 PCVMXAUTOMSR pGuestMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad; 2349 PCVMXAUTOMSR pGuestMsrStore = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 2350 PCVMXAUTOMSR pHostMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 2351 bool const fSeparateExitMsrStorePage = vmxHCIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo); 2352 for (uint32_t i = 0; i < cMsrs; i++) 2353 { 2354 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */ 2355 if (fSeparateExitMsrStorePage) 2356 { 2357 AssertMsgReturnVoid(pGuestMsrLoad->u32Msr == pGuestMsrStore->u32Msr, 2358 ("GuestMsrLoad=%#RX32 GuestMsrStore=%#RX32 cMsrs=%u\n", 2359 pGuestMsrLoad->u32Msr, pGuestMsrStore->u32Msr, cMsrs)); 2360 } 2361 2362 AssertMsgReturnVoid(pHostMsrLoad->u32Msr == pGuestMsrLoad->u32Msr, 2363 ("HostMsrLoad=%#RX32 GuestMsrLoad=%#RX32 cMsrs=%u\n", 2364 pHostMsrLoad->u32Msr, pGuestMsrLoad->u32Msr, cMsrs)); 2365 2366 uint64_t const u64HostMsr = ASMRdMsr(pHostMsrLoad->u32Msr); 2367 AssertMsgReturnVoid(pHostMsrLoad->u64Value == u64HostMsr, 2368 ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n", 2369 pHostMsrLoad->u32Msr, pHostMsrLoad->u64Value, u64HostMsr, cMsrs)); 2370 2371 /* Verify that cached host EFER MSR matches what's loaded on the CPU. */ 2372 bool const fIsEferMsr = RT_BOOL(pHostMsrLoad->u32Msr == MSR_K6_EFER); 2373 AssertMsgReturnVoid(!fIsEferMsr || u64HostMsr == g_uHmVmxHostMsrEfer, 2374 ("Cached=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n", g_uHmVmxHostMsrEfer, u64HostMsr, cMsrs)); 2375 2376 /* Verify that the accesses are as expected in the MSR bitmap for auto-load/store MSRs. */ 2377 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 2378 { 2379 uint32_t const fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr); 2380 if (fIsEferMsr) 2381 { 2382 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_RD), ("Passthru read for EFER MSR!?\n")); 2383 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_WR), ("Passthru write for EFER MSR!?\n")); 2384 } 2385 else 2386 { 2387 /* Verify LBR MSRs (used only for debugging) are intercepted. We don't passthru these MSRs to the guest yet. */ 2388 PCVMCC pVM = pVCpu->CTX_SUFF(pVM); 2389 if ( pVM->hmr0.s.vmx.fLbr 2390 && ( vmxHCIsLbrBranchFromMsr(pVM, pGuestMsrLoad->u32Msr, NULL /* pidxMsr */) 2391 || vmxHCIsLbrBranchToMsr(pVM, pGuestMsrLoad->u32Msr, NULL /* pidxMsr */) 2392 || pGuestMsrLoad->u32Msr == pVM->hmr0.s.vmx.idLbrTosMsr)) 2393 { 2394 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_MASK) == VMXMSRPM_EXIT_RD_WR, 2395 ("u32Msr=%#RX32 cMsrs=%u Passthru read/write for LBR MSRs!\n", 2396 pGuestMsrLoad->u32Msr, cMsrs)); 2397 } 2398 else if (!fIsNstGstVmcs) 2399 { 2400 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_MASK) == VMXMSRPM_ALLOW_RD_WR, 2401 ("u32Msr=%#RX32 cMsrs=%u No passthru read/write!\n", pGuestMsrLoad->u32Msr, cMsrs)); 2402 } 2403 else 2404 { 2405 /* 2406 * A nested-guest VMCS must -also- allow read/write passthrough for the MSR for us to 2407 * execute a nested-guest with MSR passthrough. 2408 * 2409 * Check if the nested-guest MSR bitmap allows passthrough, and if so, assert that we 2410 * allow passthrough too. 2411 */ 2412 void const *pvMsrBitmapNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap; 2413 Assert(pvMsrBitmapNstGst); 2414 uint32_t const fMsrpmNstGst = CPUMGetVmxMsrPermission(pvMsrBitmapNstGst, pGuestMsrLoad->u32Msr); 2415 AssertMsgReturnVoid(fMsrpm == fMsrpmNstGst, 2416 ("u32Msr=%#RX32 cMsrs=%u Permission mismatch fMsrpm=%#x fMsrpmNstGst=%#x!\n", 2417 pGuestMsrLoad->u32Msr, cMsrs, fMsrpm, fMsrpmNstGst)); 2418 } 2419 } 2420 } 2421 2422 /* Move to the next MSR. */ 2423 pHostMsrLoad++; 2424 pGuestMsrLoad++; 2425 pGuestMsrStore++; 2426 } 2427 } 2428 2429 #endif /* VBOX_STRICT */ 2430 2431 /** 2432 * Flushes the TLB using EPT. 2433 * 2434 * @returns VBox status code. 2435 * @param pVCpu The cross context virtual CPU structure of the calling 2436 * EMT. Can be NULL depending on @a enmTlbFlush. 2437 * @param pVmcsInfo The VMCS info. object. Can be NULL depending on @a 2438 * enmTlbFlush. 2439 * @param enmTlbFlush Type of flush. 2440 * 2441 * @remarks Caller is responsible for making sure this function is called only 2442 * when NestedPaging is supported and providing @a enmTlbFlush that is 2443 * supported by the CPU. 2444 * @remarks Can be called with interrupts disabled. 2445 */ 2446 static void vmxHCFlushEpt(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, VMXTLBFLUSHEPT enmTlbFlush) 2447 { 2448 uint64_t au64Descriptor[2]; 2449 if (enmTlbFlush == VMXTLBFLUSHEPT_ALL_CONTEXTS) 2450 au64Descriptor[0] = 0; 2451 else 2452 { 2453 Assert(pVCpu); 2454 Assert(pVmcsInfo); 2455 au64Descriptor[0] = pVmcsInfo->HCPhysEPTP; 2456 } 2457 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */ 2458 2459 int rc = VMXR0InvEPT(enmTlbFlush, &au64Descriptor[0]); 2460 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %#RHp failed. rc=%Rrc\n", enmTlbFlush, au64Descriptor[0], rc)); 2461 2462 if ( RT_SUCCESS(rc) 2463 && pVCpu) 2464 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging); 2465 } 2466 2467 2468 /** 2469 * Flushes the TLB using VPID. 2470 * 2471 * @returns VBox status code. 2472 * @param pVCpu The cross context virtual CPU structure of the calling 2473 * EMT. Can be NULL depending on @a enmTlbFlush. 2474 * @param enmTlbFlush Type of flush. 2475 * @param GCPtr Virtual address of the page to flush (can be 0 depending 2476 * on @a enmTlbFlush). 2477 * 2478 * @remarks Can be called with interrupts disabled. 2479 */ 2480 static void vmxHCFlushVpid(PVMCPUCC pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr) 2481 { 2482 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fVpid); 2483 2484 uint64_t au64Descriptor[2]; 2485 if (enmTlbFlush == VMXTLBFLUSHVPID_ALL_CONTEXTS) 2486 { 2487 au64Descriptor[0] = 0; 2488 au64Descriptor[1] = 0; 2489 } 2490 else 2491 { 2492 AssertPtr(pVCpu); 2493 AssertMsg(pVCpu->hmr0.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hmr0.s.uCurrentAsid)); 2494 AssertMsg(pVCpu->hmr0.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hmr0.s.uCurrentAsid)); 2495 au64Descriptor[0] = pVCpu->hmr0.s.uCurrentAsid; 2496 au64Descriptor[1] = GCPtr; 2497 } 2498 2499 int rc = VMXR0InvVPID(enmTlbFlush, &au64Descriptor[0]); 2500 AssertMsg(rc == VINF_SUCCESS, 2501 ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hmr0.s.uCurrentAsid : 0, GCPtr, rc)); 2502 2503 if ( RT_SUCCESS(rc) 2504 && pVCpu) 2505 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid); 2506 NOREF(rc); 2507 } 2508 2509 2510 /** 2511 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the 2512 * case where neither EPT nor VPID is supported by the CPU. 2513 * 2514 * @param pHostCpu The HM physical-CPU structure. 2515 * @param pVCpu The cross context virtual CPU structure. 2516 * 2517 * @remarks Called with interrupts disabled. 2518 */ 2519 static void vmxHCFlushTaggedTlbNone(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu) 2520 { 2521 AssertPtr(pVCpu); 2522 AssertPtr(pHostCpu); 2523 2524 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); 2525 2526 Assert(pHostCpu->idCpu != NIL_RTCPUID); 2527 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 2528 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes; 2529 pVCpu->hmr0.s.fForceTLBFlush = false; 2530 return; 2531 } 2532 2533 2534 /** 2535 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary. 2536 * 2537 * @param pHostCpu The HM physical-CPU structure. 2538 * @param pVCpu The cross context virtual CPU structure. 2539 * @param pVmcsInfo The VMCS info. object. 2540 * 2541 * @remarks All references to "ASID" in this function pertains to "VPID" in Intel's 2542 * nomenclature. The reason is, to avoid confusion in compare statements 2543 * since the host-CPU copies are named "ASID". 2544 * 2545 * @remarks Called with interrupts disabled. 2546 */ 2547 static void vmxHCFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo) 2548 { 2549 #ifdef VBOX_WITH_STATISTICS 2550 bool fTlbFlushed = false; 2551 # define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0) 2552 # define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \ 2553 if (!fTlbFlushed) \ 2554 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \ 2555 } while (0) 2556 #else 2557 # define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0) 2558 # define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0) 2559 #endif 2560 2561 AssertPtr(pVCpu); 2562 AssertPtr(pHostCpu); 2563 Assert(pHostCpu->idCpu != NIL_RTCPUID); 2564 2565 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2566 AssertMsg(pVM->hmr0.s.fNestedPaging && pVM->hmr0.s.vmx.fVpid, 2567 ("vmxHCFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled." 2568 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hmr0.s.fNestedPaging, pVM->hmr0.s.vmx.fVpid)); 2569 2570 /* 2571 * Force a TLB flush for the first world-switch if the current CPU differs from the one we 2572 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID 2573 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we 2574 * cannot reuse the current ASID anymore. 2575 */ 2576 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu 2577 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes) 2578 { 2579 ++pHostCpu->uCurrentAsid; 2580 if (pHostCpu->uCurrentAsid >= g_uHmMaxAsid) 2581 { 2582 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */ 2583 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */ 2584 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */ 2585 } 2586 2587 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid; 2588 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 2589 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes; 2590 2591 /* 2592 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also 2593 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}. 2594 */ 2595 vmxHCFlushEpt(pVCpu, pVmcsInfo, pVM->hmr0.s.vmx.enmTlbFlushEpt); 2596 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch); 2597 HMVMX_SET_TAGGED_TLB_FLUSHED(); 2598 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); 2599 } 2600 else if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) /* Check for explicit TLB flushes. */ 2601 { 2602 /* 2603 * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU 2604 * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT 2605 * tables when EPT is in use. Flushing-by-VPID will only flush linear (only 2606 * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical 2607 * mappings, see @bugref{6568}. 2608 * 2609 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". 2610 */ 2611 vmxHCFlushEpt(pVCpu, pVmcsInfo, pVM->hmr0.s.vmx.enmTlbFlushEpt); 2612 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb); 2613 HMVMX_SET_TAGGED_TLB_FLUSHED(); 2614 } 2615 else if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb) 2616 { 2617 /* 2618 * The nested-guest specifies its own guest-physical address to use as the APIC-access 2619 * address which requires flushing the TLB of EPT cached structures. 2620 * 2621 * See Intel spec. 28.3.3.4 "Guidelines for Use of the INVEPT Instruction". 2622 */ 2623 vmxHCFlushEpt(pVCpu, pVmcsInfo, pVM->hmr0.s.vmx.enmTlbFlushEpt); 2624 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false; 2625 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst); 2626 HMVMX_SET_TAGGED_TLB_FLUSHED(); 2627 } 2628 2629 2630 pVCpu->hmr0.s.fForceTLBFlush = false; 2631 HMVMX_UPDATE_FLUSH_SKIPPED_STAT(); 2632 2633 Assert(pVCpu->hmr0.s.idLastCpu == pHostCpu->idCpu); 2634 Assert(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes); 2635 AssertMsg(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes, 2636 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hmr0.s.cTlbFlushes, pHostCpu->cTlbFlushes)); 2637 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < g_uHmMaxAsid, 2638 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu, 2639 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hmr0.s.idLastCpu, pVCpu->hmr0.s.cTlbFlushes)); 2640 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < g_uHmMaxAsid, 2641 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid)); 2642 2643 /* Update VMCS with the VPID. */ 2644 int rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_VPID, pVCpu->hmr0.s.uCurrentAsid); 2645 AssertRC(rc); 2646 2647 #undef HMVMX_SET_TAGGED_TLB_FLUSHED 2648 } 2649 2650 2651 /** 2652 * Flushes the tagged-TLB entries for EPT CPUs as necessary. 2653 * 2654 * @param pHostCpu The HM physical-CPU structure. 2655 * @param pVCpu The cross context virtual CPU structure. 2656 * @param pVmcsInfo The VMCS info. object. 2657 * 2658 * @remarks Called with interrupts disabled. 2659 */ 2660 static void vmxHCFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo) 2661 { 2662 AssertPtr(pVCpu); 2663 AssertPtr(pHostCpu); 2664 Assert(pHostCpu->idCpu != NIL_RTCPUID); 2665 AssertMsg(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging, ("vmxHCFlushTaggedTlbEpt cannot be invoked without NestedPaging.")); 2666 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fVpid, ("vmxHCFlushTaggedTlbEpt cannot be invoked with VPID.")); 2667 2668 /* 2669 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last. 2670 * A change in the TLB flush count implies the host CPU is online after a suspend/resume. 2671 */ 2672 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu 2673 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes) 2674 { 2675 pVCpu->hmr0.s.fForceTLBFlush = true; 2676 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch); 2677 } 2678 2679 /* Check for explicit TLB flushes. */ 2680 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2681 { 2682 pVCpu->hmr0.s.fForceTLBFlush = true; 2683 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb); 2684 } 2685 2686 /* Check for TLB flushes while switching to/from a nested-guest. */ 2687 if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb) 2688 { 2689 pVCpu->hmr0.s.fForceTLBFlush = true; 2690 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false; 2691 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst); 2692 } 2693 2694 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 2695 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes; 2696 2697 if (pVCpu->hmr0.s.fForceTLBFlush) 2698 { 2699 vmxHCFlushEpt(pVCpu, pVmcsInfo, pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.enmTlbFlushEpt); 2700 pVCpu->hmr0.s.fForceTLBFlush = false; 2701 } 2702 } 2703 2704 2705 /** 2706 * Flushes the tagged-TLB entries for VPID CPUs as necessary. 2707 * 2708 * @param pHostCpu The HM physical-CPU structure. 2709 * @param pVCpu The cross context virtual CPU structure. 2710 * 2711 * @remarks Called with interrupts disabled. 2712 */ 2713 static void vmxHCFlushTaggedTlbVpid(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu) 2714 { 2715 AssertPtr(pVCpu); 2716 AssertPtr(pHostCpu); 2717 Assert(pHostCpu->idCpu != NIL_RTCPUID); 2718 AssertMsg(pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fVpid, ("vmxHCFlushTlbVpid cannot be invoked without VPID.")); 2719 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging, ("vmxHCFlushTlbVpid cannot be invoked with NestedPaging")); 2720 2721 /* 2722 * Force a TLB flush for the first world switch if the current CPU differs from the one we 2723 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID 2724 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we 2725 * cannot reuse the current ASID anymore. 2726 */ 2727 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu 2728 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes) 2729 { 2730 pVCpu->hmr0.s.fForceTLBFlush = true; 2731 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch); 2732 } 2733 2734 /* Check for explicit TLB flushes. */ 2735 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2736 { 2737 /* 2738 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see 2739 * vmxHCSetupTaggedTlb()) we would need to explicitly flush in this case (add an 2740 * fExplicitFlush = true here and change the pHostCpu->fFlushAsidBeforeUse check below to 2741 * include fExplicitFlush's too) - an obscure corner case. 2742 */ 2743 pVCpu->hmr0.s.fForceTLBFlush = true; 2744 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb); 2745 } 2746 2747 /* Check for TLB flushes while switching to/from a nested-guest. */ 2748 if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb) 2749 { 2750 pVCpu->hmr0.s.fForceTLBFlush = true; 2751 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false; 2752 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst); 2753 } 2754 2755 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2756 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 2757 if (pVCpu->hmr0.s.fForceTLBFlush) 2758 { 2759 ++pHostCpu->uCurrentAsid; 2760 if (pHostCpu->uCurrentAsid >= g_uHmMaxAsid) 2761 { 2762 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */ 2763 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */ 2764 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */ 2765 } 2766 2767 pVCpu->hmr0.s.fForceTLBFlush = false; 2768 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes; 2769 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid; 2770 if (pHostCpu->fFlushAsidBeforeUse) 2771 { 2772 if (pVM->hmr0.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT) 2773 vmxHCFlushVpid(pVCpu, VMXTLBFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */); 2774 else if (pVM->hmr0.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS) 2775 { 2776 vmxHCFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */); 2777 pHostCpu->fFlushAsidBeforeUse = false; 2778 } 2779 else 2780 { 2781 /* vmxHCSetupTaggedTlb() ensures we never get here. Paranoia. */ 2782 AssertMsgFailed(("Unsupported VPID-flush context type.\n")); 2783 } 2784 } 2785 } 2786 2787 AssertMsg(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes, 2788 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hmr0.s.cTlbFlushes, pHostCpu->cTlbFlushes)); 2789 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < g_uHmMaxAsid, 2790 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu, 2791 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hmr0.s.idLastCpu, pVCpu->hmr0.s.cTlbFlushes)); 2792 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < g_uHmMaxAsid, 2793 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid)); 2794 2795 int rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_VPID, pVCpu->hmr0.s.uCurrentAsid); 2796 AssertRC(rc); 2797 } 2798 2799 2800 /** 2801 * Flushes the guest TLB entry based on CPU capabilities. 2802 * 2803 * @param pHostCpu The HM physical-CPU structure. 2804 * @param pVCpu The cross context virtual CPU structure. 2805 * @param pVmcsInfo The VMCS info. object. 2806 * 2807 * @remarks Called with interrupts disabled. 2808 */ 2809 static void vmxHCFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo) 2810 { 2811 #ifdef HMVMX_ALWAYS_FLUSH_TLB 2812 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 2813 #endif 2814 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2815 switch (pVM->hmr0.s.vmx.enmTlbFlushType) 2816 { 2817 case VMXTLBFLUSHTYPE_EPT_VPID: vmxHCFlushTaggedTlbBoth(pHostCpu, pVCpu, pVmcsInfo); break; 2818 case VMXTLBFLUSHTYPE_EPT: vmxHCFlushTaggedTlbEpt(pHostCpu, pVCpu, pVmcsInfo); break; 2819 case VMXTLBFLUSHTYPE_VPID: vmxHCFlushTaggedTlbVpid(pHostCpu, pVCpu); break; 2820 case VMXTLBFLUSHTYPE_NONE: vmxHCFlushTaggedTlbNone(pHostCpu, pVCpu); break; 2821 default: 2822 AssertMsgFailed(("Invalid flush-tag function identifier\n")); 2823 break; 2824 } 2825 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */ 2826 } 2827 2828 2829 /** 2830 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest 2831 * TLB entries from the host TLB before VM-entry. 2832 * 2833 * @returns VBox status code. 2834 * @param pVM The cross context VM structure. 2835 */ 2836 static int vmxHCSetupTaggedTlb(PVMCC pVM) 2837 { 2838 /* 2839 * Determine optimal flush type for nested paging. 2840 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup 2841 * unrestricted guest execution (see hmR3InitFinalizeR0()). 2842 */ 2843 if (pVM->hmr0.s.fNestedPaging) 2844 { 2845 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT) 2846 { 2847 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT) 2848 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_SINGLE_CONTEXT; 2849 else if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS) 2850 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_ALL_CONTEXTS; 2851 else 2852 { 2853 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */ 2854 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED; 2855 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_FLUSH_TYPE_UNSUPPORTED; 2856 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2857 } 2858 2859 /* Make sure the write-back cacheable memory type for EPT is supported. */ 2860 if (RT_UNLIKELY(!(g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_WB))) 2861 { 2862 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED; 2863 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_MEM_TYPE_NOT_WB; 2864 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2865 } 2866 2867 /* EPT requires a page-walk length of 4. */ 2868 if (RT_UNLIKELY(!(g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4))) 2869 { 2870 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED; 2871 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED; 2872 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2873 } 2874 } 2875 else 2876 { 2877 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */ 2878 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED; 2879 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_INVEPT_UNAVAILABLE; 2880 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2881 } 2882 } 2883 2884 /* 2885 * Determine optimal flush type for VPID. 2886 */ 2887 if (pVM->hmr0.s.vmx.fVpid) 2888 { 2889 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID) 2890 { 2891 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT) 2892 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_SINGLE_CONTEXT; 2893 else if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS) 2894 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_ALL_CONTEXTS; 2895 else 2896 { 2897 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */ 2898 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 2899 LogRelFunc(("Only INDIV_ADDR supported. Ignoring VPID.\n")); 2900 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS) 2901 LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n")); 2902 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED; 2903 pVM->hmr0.s.vmx.fVpid = false; 2904 } 2905 } 2906 else 2907 { 2908 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */ 2909 Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n")); 2910 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED; 2911 pVM->hmr0.s.vmx.fVpid = false; 2912 } 2913 } 2914 2915 /* 2916 * Setup the handler for flushing tagged-TLBs. 2917 */ 2918 if (pVM->hmr0.s.fNestedPaging && pVM->hmr0.s.vmx.fVpid) 2919 pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT_VPID; 2920 else if (pVM->hmr0.s.fNestedPaging) 2921 pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT; 2922 else if (pVM->hmr0.s.vmx.fVpid) 2923 pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_VPID; 2924 else 2925 pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_NONE; 2926 2927 2928 /* 2929 * Copy out the result to ring-3. 2930 */ 2931 pVM->hm.s.ForR3.vmx.fVpid = pVM->hmr0.s.vmx.fVpid; 2932 pVM->hm.s.ForR3.vmx.enmTlbFlushType = pVM->hmr0.s.vmx.enmTlbFlushType; 2933 pVM->hm.s.ForR3.vmx.enmTlbFlushEpt = pVM->hmr0.s.vmx.enmTlbFlushEpt; 2934 pVM->hm.s.ForR3.vmx.enmTlbFlushVpid = pVM->hmr0.s.vmx.enmTlbFlushVpid; 2935 return VINF_SUCCESS; 2936 } 2937 2938 2071 2072 #ifdef IN_RING0 2939 2073 /** 2940 2074 * Sets up the LBR MSR ranges based on the host CPU. … … 3166 2300 3167 2301 /** 3168 * Sets up the virtual-APIC page address for the VMCS.3169 *3170 * @param pVmcsInfo The VMCS info. object.3171 */3172 DECLINLINE(void) vmxHCSetupVmcsVirtApicAddr(PCVMXVMCSINFO pVmcsInfo)3173 {3174 RTHCPHYS const HCPhysVirtApic = pVmcsInfo->HCPhysVirtApic;3175 Assert(HCPhysVirtApic != NIL_RTHCPHYS);3176 Assert(!(HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */3177 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);3178 AssertRC(rc);3179 }3180 3181 3182 /**3183 * Sets up the MSR-bitmap address for the VMCS.3184 *3185 * @param pVmcsInfo The VMCS info. object.3186 */3187 DECLINLINE(void) vmxHCSetupVmcsMsrBitmapAddr(PCVMXVMCSINFO pVmcsInfo)3188 {3189 RTHCPHYS const HCPhysMsrBitmap = pVmcsInfo->HCPhysMsrBitmap;3190 Assert(HCPhysMsrBitmap != NIL_RTHCPHYS);3191 Assert(!(HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */3192 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_MSR_BITMAP_FULL, HCPhysMsrBitmap);3193 AssertRC(rc);3194 }3195 3196 3197 /**3198 2302 * Sets up the APIC-access page address for the VMCS. 3199 2303 * … … 3241 2345 3242 2346 #endif 3243 3244 /**3245 * Sets up the VM-entry MSR load, VM-exit MSR-store and VM-exit MSR-load addresses3246 * in the VMCS.3247 *3248 * @returns VBox status code.3249 * @param pVmcsInfo The VMCS info. object.3250 */3251 DECLINLINE(int) vmxHCSetupVmcsAutoLoadStoreMsrAddrs(PVMXVMCSINFO pVmcsInfo)3252 {3253 RTHCPHYS const HCPhysGuestMsrLoad = pVmcsInfo->HCPhysGuestMsrLoad;3254 Assert(HCPhysGuestMsrLoad != NIL_RTHCPHYS);3255 Assert(!(HCPhysGuestMsrLoad & 0xf)); /* Bits 3:0 MBZ. */3256 3257 RTHCPHYS const HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrStore;3258 Assert(HCPhysGuestMsrStore != NIL_RTHCPHYS);3259 Assert(!(HCPhysGuestMsrStore & 0xf)); /* Bits 3:0 MBZ. */3260 3261 RTHCPHYS const HCPhysHostMsrLoad = pVmcsInfo->HCPhysHostMsrLoad;3262 Assert(HCPhysHostMsrLoad != NIL_RTHCPHYS);3263 Assert(!(HCPhysHostMsrLoad & 0xf)); /* Bits 3:0 MBZ. */3264 3265 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, HCPhysGuestMsrLoad); AssertRC(rc);3266 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, HCPhysGuestMsrStore); AssertRC(rc);3267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, HCPhysHostMsrLoad); AssertRC(rc);3268 return VINF_SUCCESS;3269 }3270 3271 2347 3272 2348 /** … … 3381 2457 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n", 3382 2458 g_HmMsrs.u.vmx.PinCtls.n.allowed0, fVal, fZap)); 3383 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;2459 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PIN_EXEC; 3384 2460 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3385 2461 } … … 3473 2549 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n", 3474 2550 g_HmMsrs.u.vmx.ProcCtls2.n.allowed0, fVal, fZap)); 3475 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;2551 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_EXEC2; 3476 2552 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3477 2553 } … … 3511 2587 || (g_HmMsrs.u.vmx.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT)) 3512 2588 { 3513 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;2589 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT; 3514 2590 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3515 2591 } … … 3524 2600 } 3525 2601 2602 #ifdef IN_INRG0 3526 2603 /* Use TPR shadowing if supported by the CPU. */ 3527 2604 if ( PDMHasApic(pVM) … … 3549 2626 vmxHCSetupVmcsMsrBitmapAddr(pVmcsInfo); 3550 2627 } 2628 #endif 3551 2629 3552 2630 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */ … … 3558 2636 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n", 3559 2637 g_HmMsrs.u.vmx.ProcCtls.n.allowed0, fVal, fZap)); 3560 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;2638 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_EXEC; 3561 2639 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3562 2640 } … … 3580 2658 else 3581 2659 { 3582 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;2660 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_INVALID_UX_COMBO; 3583 2661 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3584 2662 } … … 3702 2780 3703 2781 /** 3704 * Sets pfnStartVm to the best suited variant.3705 *3706 * This must be called whenever anything changes relative to the vmxHCStartVm3707 * variant selection:3708 * - pVCpu->hm.s.fLoadSaveGuestXcr03709 * - HM_WSF_IBPB_ENTRY in pVCpu->hmr0.s.fWorldSwitcher3710 * - HM_WSF_IBPB_EXIT in pVCpu->hmr0.s.fWorldSwitcher3711 * - Perhaps: CPUMIsGuestFPUStateActive() (windows only)3712 * - Perhaps: CPUMCTX.fXStateMask (windows only)3713 *3714 * We currently ASSUME that neither HM_WSF_IBPB_ENTRY nor HM_WSF_IBPB_EXIT3715 * cannot be changed at runtime.3716 */3717 static void vmxHCUpdateStartVmFunction(PVMCPUCC pVCpu)3718 {3719 static const struct CLANGWORKAROUND { PFNHMVMXSTARTVM pfn; } s_avmxHCStartVmFunctions[] =3720 {3721 { vmxHCStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },3722 { vmxHCStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },3723 { vmxHCStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },3724 { vmxHCStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },3725 { vmxHCStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },3726 { vmxHCStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },3727 { vmxHCStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },3728 { vmxHCStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },3729 { vmxHCStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },3730 { vmxHCStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },3731 { vmxHCStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },3732 { vmxHCStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },3733 { vmxHCStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },3734 { vmxHCStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },3735 { vmxHCStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },3736 { vmxHCStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },3737 { vmxHCStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },3738 { vmxHCStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },3739 { vmxHCStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },3740 { vmxHCStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },3741 { vmxHCStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },3742 { vmxHCStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },3743 { vmxHCStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },3744 { vmxHCStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },3745 { vmxHCStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },3746 { vmxHCStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },3747 { vmxHCStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },3748 { vmxHCStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },3749 { vmxHCStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },3750 { vmxHCStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },3751 { vmxHCStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },3752 { vmxHCStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },3753 };3754 uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0 ? 1 : 0)3755 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ? 2 : 0)3756 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_L1D_ENTRY ? 4 : 0)3757 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_MDS_ENTRY ? 8 : 0)3758 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT ? 16 : 0);3759 PFNHMVMXSTARTVM const pfnStartVm = s_avmxHCStartVmFunctions[idx].pfn;3760 if (pVCpu->hmr0.s.vmx.pfnStartVm != pfnStartVm)3761 pVCpu->hmr0.s.vmx.pfnStartVm = pfnStartVm;3762 }3763 3764 3765 /**3766 * Selector FNHMSVMVMRUN implementation.3767 */3768 static DECLCALLBACK(int) vmxHCStartVmSelector(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume)3769 {3770 vmxHCUpdateStartVmFunction(pVCpu);3771 return pVCpu->hmr0.s.vmx.pfnStartVm(pVmcsInfo, pVCpu, fResume);3772 }3773 3774 3775 /**3776 * Sets up the VMCS for executing a guest (or nested-guest) using hardware-assisted3777 * VMX.3778 *3779 * @returns VBox status code.3780 * @param pVCpu The cross context virtual CPU structure.3781 * @param pVmcsInfo The VMCS info. object.3782 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.3783 */3784 static int vmxHCSetupVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)3785 {3786 Assert(pVmcsInfo->pvVmcs);3787 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));3788 3789 /* Set the CPU specified revision identifier at the beginning of the VMCS structure. */3790 *(uint32_t *)pVmcsInfo->pvVmcs = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID);3791 const char * const pszVmcs = fIsNstGstVmcs ? "nested-guest VMCS" : "guest VMCS";3792 3793 LogFlowFunc(("\n"));3794 3795 /*3796 * Initialize the VMCS using VMCLEAR before loading the VMCS.3797 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".3798 */3799 int rc = vmxHCClearVmcs(pVmcsInfo);3800 if (RT_SUCCESS(rc))3801 {3802 rc = vmxHCLoadVmcs(pVmcsInfo);3803 if (RT_SUCCESS(rc))3804 {3805 /*3806 * Initialize the hardware-assisted VMX execution handler for guest and nested-guest VMCS.3807 * The host is always 64-bit since we no longer support 32-bit hosts.3808 * Currently we have just a single handler for all guest modes as well, see @bugref{6208#c73}.3809 */3810 if (!fIsNstGstVmcs)3811 {3812 rc = vmxHCSetupVmcsPinCtls(pVCpu, pVmcsInfo);3813 if (RT_SUCCESS(rc))3814 {3815 rc = vmxHCSetupVmcsProcCtls(pVCpu, pVmcsInfo);3816 if (RT_SUCCESS(rc))3817 {3818 rc = vmxHCSetupVmcsMiscCtls(pVCpu, pVmcsInfo);3819 if (RT_SUCCESS(rc))3820 {3821 vmxHCSetupVmcsXcptBitmap(pVCpu, pVmcsInfo);3822 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX3823 /*3824 * If a shadow VMCS is allocated for the VMCS info. object, initialize the3825 * VMCS revision ID and shadow VMCS indicator bit. Also, clear the VMCS3826 * making it fit for use when VMCS shadowing is later enabled.3827 */3828 if (pVmcsInfo->pvShadowVmcs)3829 {3830 VMXVMCSREVID VmcsRevId;3831 VmcsRevId.u = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID);3832 VmcsRevId.n.fIsShadowVmcs = 1;3833 *(uint32_t *)pVmcsInfo->pvShadowVmcs = VmcsRevId.u;3834 rc = vmxHCClearShadowVmcs(pVmcsInfo);3835 if (RT_SUCCESS(rc))3836 { /* likely */ }3837 else3838 LogRelFunc(("Failed to initialize shadow VMCS. rc=%Rrc\n", rc));3839 }3840 #endif3841 }3842 else3843 LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc));3844 }3845 else3846 LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc));3847 }3848 else3849 LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc));3850 }3851 else3852 {3853 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX3854 rc = vmxHCSetupVmcsCtlsNested(pVmcsInfo);3855 if (RT_SUCCESS(rc))3856 { /* likely */ }3857 else3858 LogRelFunc(("Failed to initialize nested-guest VMCS. rc=%Rrc\n", rc));3859 #else3860 AssertFailed();3861 #endif3862 }3863 }3864 else3865 LogRelFunc(("Failed to load the %s. rc=%Rrc\n", rc, pszVmcs));3866 }3867 else3868 LogRelFunc(("Failed to clear the %s. rc=%Rrc\n", rc, pszVmcs));3869 3870 /* Sync any CPU internal VMCS data back into our VMCS in memory. */3871 if (RT_SUCCESS(rc))3872 {3873 rc = vmxHCClearVmcs(pVmcsInfo);3874 if (RT_SUCCESS(rc))3875 { /* likely */ }3876 else3877 LogRelFunc(("Failed to clear the %s post setup. rc=%Rrc\n", rc, pszVmcs));3878 }3879 3880 /*3881 * Update the last-error record both for failures and success, so we3882 * can propagate the status code back to ring-3 for diagnostics.3883 */3884 vmxHCUpdateErrorRecord(pVCpu, rc);3885 NOREF(pszVmcs);3886 return rc;3887 }3888 3889 3890 /**3891 2782 * Exports the guest state with appropriate VM-entry and VM-exit controls in the 3892 2783 * VMCS. … … 3903 2794 static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient) 3904 2795 { 3905 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)2796 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS) 3906 2797 { 3907 2798 PVMCC pVM = pVCpu->CTX_SUFF(pVM); … … 3970 2861 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n", 3971 2862 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap)); 3972 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;2863 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY; 3973 2864 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3974 2865 } … … 4048 2939 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n", 4049 2940 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap)); 4050 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;2941 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT; 4051 2942 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 4052 2943 } … … 4061 2952 } 4062 2953 4063 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);2954 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS); 4064 2955 } 4065 2956 return VINF_SUCCESS; 4066 2957 } 2958 #endif /* !IN_RING0 */ 4067 2959 4068 2960 … … 4070 2962 * Sets the TPR threshold in the VMCS. 4071 2963 * 2964 * @param pVCpu The cross context virtual CPU structure. 4072 2965 * @param pVmcsInfo The VMCS info. object. 4073 2966 * @param u32TprThreshold The TPR threshold (task-priority class only). 4074 2967 */ 4075 DECLINLINE(void) vmxHCApicSetTprThreshold(PVM XVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)2968 DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold) 4076 2969 { 4077 2970 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */ … … 4093 2986 static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient) 4094 2987 { 4095 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)2988 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR) 4096 2989 { 4097 2990 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR); … … 4132 3025 } 4133 3026 4134 vmxHCApicSetTprThreshold(pV mcsInfo, u32TprThreshold);3027 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold); 4135 3028 } 4136 3029 } 4137 3030 } 4138 3031 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */ 4139 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);3032 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR); 4140 3033 } 4141 3034 } … … 4204 3097 } 4205 3098 4206 3099 #ifdef IN_RING0 4207 3100 /** 4208 3101 * Exports the exception intercepts required for guest execution in the VMCS. … … 4215 3108 static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient) 4216 3109 { 4217 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)3110 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS) 4218 3111 { 4219 3112 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */ 4220 3113 if ( !pVmxTransient->fIsNestedGuest 4221 && pVCpu->hm.s.fGIMTrapXcptUD)3114 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD) 4222 3115 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD); 4223 3116 else … … 4225 3118 4226 3119 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */ 4227 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);3120 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS); 4228 3121 } 4229 3122 } … … 4239 3132 static void vmxHCExportGuestRip(PVMCPUCC pVCpu) 4240 3133 { 4241 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)3134 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP) 4242 3135 { 4243 3136 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP); … … 4246 3139 AssertRC(rc); 4247 3140 4248 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);3141 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP); 4249 3142 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip)); 4250 3143 } … … 4261 3154 static void vmxHCExportGuestRsp(PVMCPUCC pVCpu) 4262 3155 { 4263 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)3156 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RSP) 4264 3157 { 4265 3158 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP); … … 4268 3161 AssertRC(rc); 4269 3162 4270 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP);3163 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RSP); 4271 3164 Log4Func(("rsp=%#RX64\n", pVCpu->cpum.GstCtx.rsp)); 4272 3165 } … … 4284 3177 static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient) 4285 3178 { 4286 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)3179 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS) 4287 3180 { 4288 3181 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); … … 4314 3207 AssertRC(rc); 4315 3208 4316 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);3209 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS); 4317 3210 Log4Func(("eflags=%#RX32\n", fEFlags.u32)); 4318 3211 } … … 4498 3391 static int vmxHCExportGuestHwvirtState(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient) 4499 3392 { 4500 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_HWVIRT)3393 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_HWVIRT) 4501 3394 { 4502 3395 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX … … 4528 3421 * was newly loaded or modified before copying it to the shadow VMCS. 4529 3422 */ 4530 if (! pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs)3423 if (!VCPU_2_VMXSTATE(pVCpu).vmx.fCopiedNstGstToShadowVmcs) 4531 3424 { 4532 3425 int rc = vmxHCCopyNstGstToShadowVmcs(pVCpu, pVmcsInfo); 4533 3426 AssertRCReturn(rc, rc); 4534 pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs = true;3427 VCPU_2_VMXSTATE(pVCpu).vmx.fCopiedNstGstToShadowVmcs = true; 4535 3428 } 4536 3429 vmxHCEnableVmcsShadowing(pVmcsInfo); … … 4542 3435 NOREF(pVmxTransient); 4543 3436 #endif 4544 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_HWVIRT);3437 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_HWVIRT); 4545 3438 } 4546 3439 return VINF_SUCCESS; … … 4562 3455 static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient) 4563 3456 { 4564 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)3457 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0) 4565 3458 { 4566 3459 PVMCC pVM = pVCpu->CTX_SUFF(pVM); … … 4659 3552 uXcptBitmap |= RT_BIT(X86_XCPT_PF); 4660 3553 #endif 4661 if ( pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv)3554 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv) 4662 3555 uXcptBitmap |= RT_BIT(X86_XCPT_GP); 4663 3556 Assert(pVM->hmr0.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF))); … … 4715 3608 } 4716 3609 4717 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);3610 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0); 4718 3611 } 4719 3612 … … 4749 3642 * Guest CR3. 4750 3643 */ 4751 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)3644 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3) 4752 3645 { 4753 3646 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3); … … 4840 3733 } 4841 3734 4842 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3);3735 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3); 4843 3736 } 4844 3737 … … 4847 3740 * ASSUMES this is done everytime we get in from ring-3! (XCR0) 4848 3741 */ 4849 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)3742 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4) 4850 3743 { 4851 3744 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 4905 3798 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables. 4906 3799 */ 4907 switch ( pVCpu->hm.s.enmShadowMode)3800 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode) 4908 3801 { 4909 3802 case PGMMODE_REAL: /* Real-mode. */ … … 4953 3846 } 4954 3847 4955 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);3848 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4); 4956 3849 4957 3850 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4)); … … 5014 3907 bool fInterceptMovDRx = false; 5015 3908 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls; 5016 if ( pVCpu->hm.s.fSingleInstruction)3909 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction) 5017 3910 { 5018 3911 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */ … … 5025 3918 { 5026 3919 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF; 5027 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;3920 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_RFLAGS; 5028 3921 pVCpu->hmr0.s.fClearTrapFlag = true; 5029 3922 fSteppingDB = true; … … 5068 3961 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 5069 3962 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 5070 STAM_COUNTER_INC(& pVCpu->hm.s.StatDRxArmed);3963 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatDRxArmed); 5071 3964 } 5072 3965 Assert(!fInterceptMovDRx); … … 5118 4011 if (fSteppingDB) 5119 4012 { 5120 Assert( pVCpu->hm.s.fSingleInstruction);4013 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction); 5121 4014 Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF); 5122 4015 … … 5396 4289 * Guest Segment registers: CS, SS, DS, ES, FS, GS. 5397 4290 */ 5398 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)5399 { 5400 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)4291 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK) 4292 { 4293 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS) 5401 4294 { 5402 4295 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS); … … 5405 4298 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs); 5406 4299 AssertRC(rc); 5407 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);5408 } 5409 5410 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS)4300 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS); 4301 } 4302 4303 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS) 5411 4304 { 5412 4305 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS); … … 5415 4308 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss); 5416 4309 AssertRC(rc); 5417 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);5418 } 5419 5420 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS)4310 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS); 4311 } 4312 4313 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS) 5421 4314 { 5422 4315 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS); … … 5425 4318 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds); 5426 4319 AssertRC(rc); 5427 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);5428 } 5429 5430 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES)4320 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS); 4321 } 4322 4323 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES) 5431 4324 { 5432 4325 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES); … … 5435 4328 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es); 5436 4329 AssertRC(rc); 5437 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);5438 } 5439 5440 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS)4330 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES); 4331 } 4332 4333 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS) 5441 4334 { 5442 4335 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS); … … 5445 4338 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs); 5446 4339 AssertRC(rc); 5447 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);5448 } 5449 5450 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS)4340 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS); 4341 } 4342 4343 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS) 5451 4344 { 5452 4345 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS); … … 5455 4348 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs); 5456 4349 AssertRC(rc); 5457 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);4350 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS); 5458 4351 } 5459 4352 … … 5468 4361 * Guest TR. 5469 4362 */ 5470 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)4363 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR) 5471 4364 { 5472 4365 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR); … … 5529 4422 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc); 5530 4423 5531 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);4424 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR); 5532 4425 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit)); 5533 4426 } … … 5536 4429 * Guest GDTR. 5537 4430 */ 5538 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)4431 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR) 5539 4432 { 5540 4433 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR); … … 5546 4439 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 5547 4440 5548 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);4441 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR); 5549 4442 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt)); 5550 4443 } … … 5553 4446 * Guest LDTR. 5554 4447 */ 5555 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)4448 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR) 5556 4449 { 5557 4450 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR); … … 5585 4478 } 5586 4479 5587 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);4480 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR); 5588 4481 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit)); 5589 4482 } … … 5592 4485 * Guest IDTR. 5593 4486 */ 5594 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)4487 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR) 5595 4488 { 5596 4489 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR); … … 5602 4495 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 5603 4496 5604 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);4497 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR); 5605 4498 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt)); 5606 4499 } … … 5649 4542 * those MSRs into the auto-load/store MSR area. Nothing to do here. 5650 4543 */ 5651 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)4544 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS) 5652 4545 { 5653 4546 /* No auto-load/store MSRs currently. */ 5654 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);4547 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS); 5655 4548 } 5656 4549 … … 5658 4551 * Guest Sysenter MSRs. 5659 4552 */ 5660 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)4553 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK) 5661 4554 { 5662 4555 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS); 5663 4556 5664 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)4557 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR) 5665 4558 { 5666 4559 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs); 5667 4560 AssertRC(rc); 5668 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);5669 } 5670 5671 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)4561 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR); 4562 } 4563 4564 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR) 5672 4565 { 5673 4566 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip); 5674 4567 AssertRC(rc); 5675 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);5676 } 5677 5678 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)4568 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 4569 } 4570 4571 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR) 5679 4572 { 5680 4573 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp); 5681 4574 AssertRC(rc); 5682 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);4575 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 5683 4576 } 5684 4577 } … … 5687 4580 * Guest/host EFER MSR. 5688 4581 */ 5689 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)4582 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR) 5690 4583 { 5691 4584 /* Whether we are using the VMCS to swap the EFER MSR must have been 5692 4585 determined earlier while exporting VM-entry/VM-exit controls. */ 5693 Assert(!(ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS));4586 Assert(!(ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)); 5694 4587 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER); 5695 4588 … … 5739 4632 vmxHCRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER); 5740 4633 5741 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);4634 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR); 5742 4635 } 5743 4636 … … 5745 4638 * Other MSRs. 5746 4639 */ 5747 if (ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS)4640 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS) 5748 4641 { 5749 4642 /* Speculation Control (R/W). */ … … 5788 4681 } 5789 4682 5790 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);4683 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS); 5791 4684 } 5792 4685 5793 4686 return VINF_SUCCESS; 5794 }5795 5796 5797 /**5798 * Wrapper for running the guest code in VT-x.5799 *5800 * @returns VBox status code, no informational status codes.5801 * @param pVCpu The cross context virtual CPU structure.5802 * @param pVmxTransient The VMX-transient structure.5803 *5804 * @remarks No-long-jump zone!!!5805 */5806 DECLINLINE(int) vmxHCRunGuest(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)5807 {5808 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */5809 pVCpu->cpum.GstCtx.fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;5810 5811 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;5812 bool const fResumeVM = RT_BOOL(pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_LAUNCHED);5813 #ifdef VBOX_WITH_STATISTICS5814 if (fResumeVM)5815 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxVmResume);5816 else5817 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxVmLaunch);5818 #endif5819 int rc = pVCpu->hmr0.s.vmx.pfnStartVm(pVmcsInfo, pVCpu, fResumeVM);5820 AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc));5821 return rc;5822 }5823 5824 5825 /**5826 * Reports world-switch error and dumps some useful debug info.5827 *5828 * @param pVCpu The cross context virtual CPU structure.5829 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.5830 * @param pVmxTransient The VMX-transient structure (only5831 * exitReason updated).5832 */5833 static void vmxHCReportWorldSwitchError(PVMCPUCC pVCpu, int rcVMRun, PVMXTRANSIENT pVmxTransient)5834 {5835 Assert(pVCpu);5836 Assert(pVmxTransient);5837 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);5838 5839 Log4Func(("VM-entry failure: %Rrc\n", rcVMRun));5840 switch (rcVMRun)5841 {5842 case VERR_VMX_INVALID_VMXON_PTR:5843 AssertFailed();5844 break;5845 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */5846 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */5847 {5848 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);5849 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);5850 AssertRC(rc);5851 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);5852 5853 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;5854 /* LastError.idCurrentCpu was already updated in vmxHCPreRunGuestCommitted().5855 Cannot do it here as we may have been long preempted. */5856 5857 #ifdef VBOX_STRICT5858 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);5859 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,5860 pVmxTransient->uExitReason));5861 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual));5862 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));5863 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)5864 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));5865 else5866 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));5867 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));5868 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));5869 5870 static struct5871 {5872 /** Name of the field to log. */5873 const char *pszName;5874 /** The VMCS field. */5875 uint32_t uVmcsField;5876 /** Whether host support of this field needs to be checked. */5877 bool fCheckSupport;5878 } const s_aVmcsFields[] =5879 {5880 { "VMX_VMCS32_CTRL_PIN_EXEC", VMX_VMCS32_CTRL_PIN_EXEC, false },5881 { "VMX_VMCS32_CTRL_PROC_EXEC", VMX_VMCS32_CTRL_PROC_EXEC, false },5882 { "VMX_VMCS32_CTRL_PROC_EXEC2", VMX_VMCS32_CTRL_PROC_EXEC2, true },5883 { "VMX_VMCS32_CTRL_ENTRY", VMX_VMCS32_CTRL_ENTRY, false },5884 { "VMX_VMCS32_CTRL_EXIT", VMX_VMCS32_CTRL_EXIT, false },5885 { "VMX_VMCS32_CTRL_CR3_TARGET_COUNT", VMX_VMCS32_CTRL_CR3_TARGET_COUNT, false },5886 { "VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO", VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, false },5887 { "VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE", VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, false },5888 { "VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH", VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, false },5889 { "VMX_VMCS32_CTRL_TPR_THRESHOLD", VMX_VMCS32_CTRL_TPR_THRESHOLD, false },5890 { "VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT", VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, false },5891 { "VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT", VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, false },5892 { "VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT", VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, false },5893 { "VMX_VMCS32_CTRL_EXCEPTION_BITMAP", VMX_VMCS32_CTRL_EXCEPTION_BITMAP, false },5894 { "VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK", VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, false },5895 { "VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH", VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, false },5896 { "VMX_VMCS_CTRL_CR0_MASK", VMX_VMCS_CTRL_CR0_MASK, false },5897 { "VMX_VMCS_CTRL_CR0_READ_SHADOW", VMX_VMCS_CTRL_CR0_READ_SHADOW, false },5898 { "VMX_VMCS_CTRL_CR4_MASK", VMX_VMCS_CTRL_CR4_MASK, false },5899 { "VMX_VMCS_CTRL_CR4_READ_SHADOW", VMX_VMCS_CTRL_CR4_READ_SHADOW, false },5900 { "VMX_VMCS64_CTRL_EPTP_FULL", VMX_VMCS64_CTRL_EPTP_FULL, true },5901 { "VMX_VMCS_GUEST_RIP", VMX_VMCS_GUEST_RIP, false },5902 { "VMX_VMCS_GUEST_RSP", VMX_VMCS_GUEST_RSP, false },5903 { "VMX_VMCS_GUEST_RFLAGS", VMX_VMCS_GUEST_RFLAGS, false },5904 { "VMX_VMCS16_VPID", VMX_VMCS16_VPID, true, },5905 { "VMX_VMCS_HOST_CR0", VMX_VMCS_HOST_CR0, false },5906 { "VMX_VMCS_HOST_CR3", VMX_VMCS_HOST_CR3, false },5907 { "VMX_VMCS_HOST_CR4", VMX_VMCS_HOST_CR4, false },5908 /* The order of selector fields below are fixed! */5909 { "VMX_VMCS16_HOST_ES_SEL", VMX_VMCS16_HOST_ES_SEL, false },5910 { "VMX_VMCS16_HOST_CS_SEL", VMX_VMCS16_HOST_CS_SEL, false },5911 { "VMX_VMCS16_HOST_SS_SEL", VMX_VMCS16_HOST_SS_SEL, false },5912 { "VMX_VMCS16_HOST_DS_SEL", VMX_VMCS16_HOST_DS_SEL, false },5913 { "VMX_VMCS16_HOST_FS_SEL", VMX_VMCS16_HOST_FS_SEL, false },5914 { "VMX_VMCS16_HOST_GS_SEL", VMX_VMCS16_HOST_GS_SEL, false },5915 { "VMX_VMCS16_HOST_TR_SEL", VMX_VMCS16_HOST_TR_SEL, false },5916 /* End of ordered selector fields. */5917 { "VMX_VMCS_HOST_TR_BASE", VMX_VMCS_HOST_TR_BASE, false },5918 { "VMX_VMCS_HOST_GDTR_BASE", VMX_VMCS_HOST_GDTR_BASE, false },5919 { "VMX_VMCS_HOST_IDTR_BASE", VMX_VMCS_HOST_IDTR_BASE, false },5920 { "VMX_VMCS32_HOST_SYSENTER_CS", VMX_VMCS32_HOST_SYSENTER_CS, false },5921 { "VMX_VMCS_HOST_SYSENTER_EIP", VMX_VMCS_HOST_SYSENTER_EIP, false },5922 { "VMX_VMCS_HOST_SYSENTER_ESP", VMX_VMCS_HOST_SYSENTER_ESP, false },5923 { "VMX_VMCS_HOST_RSP", VMX_VMCS_HOST_RSP, false },5924 { "VMX_VMCS_HOST_RIP", VMX_VMCS_HOST_RIP, false }5925 };5926 5927 RTGDTR HostGdtr;5928 ASMGetGDTR(&HostGdtr);5929 5930 uint32_t const cVmcsFields = RT_ELEMENTS(s_aVmcsFields);5931 for (uint32_t i = 0; i < cVmcsFields; i++)5932 {5933 uint32_t const uVmcsField = s_aVmcsFields[i].uVmcsField;5934 5935 bool fSupported;5936 if (!s_aVmcsFields[i].fCheckSupport)5937 fSupported = true;5938 else5939 {5940 PVMCC pVM = pVCpu->CTX_SUFF(pVM);5941 switch (uVmcsField)5942 {5943 case VMX_VMCS64_CTRL_EPTP_FULL: fSupported = pVM->hmr0.s.fNestedPaging; break;5944 case VMX_VMCS16_VPID: fSupported = pVM->hmr0.s.vmx.fVpid; break;5945 case VMX_VMCS32_CTRL_PROC_EXEC2:5946 fSupported = RT_BOOL(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS);5947 break;5948 default:5949 AssertMsgFailedReturnVoid(("Failed to provide VMCS field support for %#RX32\n", uVmcsField));5950 }5951 }5952 5953 if (fSupported)5954 {5955 uint8_t const uWidth = RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH);5956 switch (uWidth)5957 {5958 case VMX_VMCSFIELD_WIDTH_16BIT:5959 {5960 uint16_t u16Val;5961 rc = VMX_VMCS_READ_16(pVCpu, uVmcsField, &u16Val);5962 AssertRC(rc);5963 Log4(("%-40s = %#RX16\n", s_aVmcsFields[i].pszName, u16Val));5964 5965 if ( uVmcsField >= VMX_VMCS16_HOST_ES_SEL5966 && uVmcsField <= VMX_VMCS16_HOST_TR_SEL)5967 {5968 if (u16Val < HostGdtr.cbGdt)5969 {5970 /* Order of selectors in s_apszSel is fixed and matches the order in s_aVmcsFields. */5971 static const char * const s_apszSel[] = { "Host ES", "Host CS", "Host SS", "Host DS",5972 "Host FS", "Host GS", "Host TR" };5973 uint8_t const idxSel = RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_INDEX);5974 Assert(idxSel < RT_ELEMENTS(s_apszSel));5975 PCX86DESCHC pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u16Val & X86_SEL_MASK));5976 hmR0DumpDescriptor(pDesc, u16Val, s_apszSel[idxSel]);5977 }5978 else5979 Log4((" Selector value exceeds GDT limit!\n"));5980 }5981 break;5982 }5983 5984 case VMX_VMCSFIELD_WIDTH_32BIT:5985 {5986 uint32_t u32Val;5987 rc = VMX_VMCS_READ_32(pVCpu, uVmcsField, &u32Val);5988 AssertRC(rc);5989 Log4(("%-40s = %#RX32\n", s_aVmcsFields[i].pszName, u32Val));5990 break;5991 }5992 5993 case VMX_VMCSFIELD_WIDTH_64BIT:5994 case VMX_VMCSFIELD_WIDTH_NATURAL:5995 {5996 uint64_t u64Val;5997 rc = VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);5998 AssertRC(rc);5999 Log4(("%-40s = %#RX64\n", s_aVmcsFields[i].pszName, u64Val));6000 break;6001 }6002 }6003 }6004 }6005 6006 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));6007 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));6008 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));6009 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));6010 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));6011 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));6012 #endif /* VBOX_STRICT */6013 break;6014 }6015 6016 default:6017 /* Impossible */6018 AssertMsgFailed(("vmxHCReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));6019 break;6020 }6021 4687 } 6022 4688 … … 6051 4717 && TMVirtualSyncIsCurrentDeadlineVersion(pVM, pVCpu->hmr0.s.vmx.uTscDeadlineVersion)) 6052 4718 { 6053 STAM_REL_COUNTER_INC(& pVCpu->hm.s.StatVmxPreemptionReusingDeadline);4719 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatVmxPreemptionReusingDeadline); 6054 4720 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc); 6055 4721 cTicksToDeadline = pVCpu->hmr0.s.vmx.uTscDeadline - SUPReadTsc(); … … 6058 4724 else 6059 4725 { 6060 STAM_REL_COUNTER_INC(& pVCpu->hm.s.StatVmxPreemptionReusingDeadlineExpired);4726 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatVmxPreemptionReusingDeadlineExpired); 6061 4727 cTicksToDeadline = 0; 6062 4728 } … … 6064 4730 else 6065 4731 { 6066 STAM_REL_COUNTER_INC(& pVCpu->hm.s.StatVmxPreemptionRecalcingDeadline);4732 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatVmxPreemptionRecalcingDeadline); 6067 4733 cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc, 6068 4734 &pVCpu->hmr0.s.vmx.uTscDeadline, … … 6072 4738 { /* hopefully */ } 6073 4739 else 6074 STAM_REL_COUNTER_INC(& pVCpu->hm.s.StatVmxPreemptionRecalcingDeadlineExpired);4740 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatVmxPreemptionRecalcingDeadlineExpired); 6075 4741 } 6076 4742 … … 6099 4765 AssertRC(rc); 6100 4766 #endif 6101 STAM_COUNTER_INC(& pVCpu->hm.s.StatTscParavirt);4767 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatTscParavirt); 6102 4768 } 6103 4769 … … 6116 4782 } 6117 4783 } 4784 #endif /* !IN_RING0 */ 6118 4785 6119 4786 … … 6191 4858 RTGCUINTPTR GCPtrFaultAddress) 6192 4859 { 6193 Assert(! pVCpu->hm.s.Event.fPending);6194 pVCpu->hm.s.Event.fPending = true;6195 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;6196 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;6197 pVCpu->hm.s.Event.cbInstr = cbInstr;6198 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;4860 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending); 4861 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true; 4862 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo; 4863 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode; 4864 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr; 4865 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress; 6199 4866 } 6200 4867 … … 6348 5015 6349 5016 #ifdef VBOX_STRICT 5017 # ifdef IN_RING0 6350 5018 VMMRZCallRing3Disable(pVCpu); 5019 # endif 6351 5020 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u)); 6352 5021 # ifdef DEBUG_bird … … 6355 5024 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit)); 6356 5025 # endif 5026 # ifdef IN_RING0 6357 5027 VMMRZCallRing3Enable(pVCpu); 5028 # endif 6358 5029 NOREF(uAttr); 6359 5030 #endif … … 6474 5145 6475 5146 pCtx->rip = u64Val; 6476 EM R0HistoryUpdatePC(pVCpu, pCtx->rip, false);5147 EMHistoryUpdatePC(pVCpu, pCtx->rip, false); 6477 5148 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP; 6478 5149 } … … 6500 5171 6501 5172 pCtx->rflags.u64 = u64Val; 5173 #ifdef IN_RING0 6502 5174 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared; 6503 5175 if (pVmcsInfoShared->RealMode.fRealOnV86Active) … … 6506 5178 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL; 6507 5179 } 5180 #else 5181 RT_NOREF(pVmcsInfo); 5182 #endif 6508 5183 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS; 6509 5184 } … … 6565 5240 { 6566 5241 int rc = VINF_SUCCESS; 5242 #ifdef IN_RING0 6567 5243 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 5244 #endif 6568 5245 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6569 5246 uint32_t u32Val; … … 6578 5255 * Update: This is very likely a compiler optimization bug, see @bugref{9180}. 6579 5256 */ 6580 # ifdef RT_OS_WINDOWS5257 # ifdef RT_OS_WINDOWS 6581 5258 if (pVM == 0 || pVM == (void *)(uintptr_t)-1) 6582 5259 return VERR_HM_IPE_1; 6583 #endif 6584 6585 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x); 6586 5260 # endif 5261 5262 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatImportGuestState, x); 5263 5264 #ifdef IN_RING0 6587 5265 /* 6588 5266 * We disable interrupts to make the updating of the state and in particular … … 6590 5268 */ 6591 5269 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 5270 #endif 6592 5271 6593 5272 fWhat &= pCtx->fExtrn; … … 6621 5300 if (fRealOnV86Active) 6622 5301 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u; 6623 EM R0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);5302 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */); 6624 5303 } 6625 5304 if (fWhat & CPUMCTX_EXTRN_SS) … … 6678 5357 if (fWhat & CPUMCTX_EXTRN_TR) 6679 5358 { 5359 #ifdef IN_RING0 6680 5360 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, 6681 5361 don't need to import that one. */ 6682 5362 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active) 5363 #endif 6683 5364 vmxHCImportGuestTr(pVCpu); 6684 5365 } … … 6687 5368 if (fWhat & CPUMCTX_EXTRN_DR7) 6688 5369 { 5370 #ifdef IN_RING0 6689 5371 if (!pVCpu->hmr0.s.fUsingHyperDR7) 5372 #endif 6690 5373 { 6691 5374 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]); … … 6702 5385 } 6703 5386 5387 #ifdef IN_RING0 6704 5388 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 6705 5389 { … … 6761 5445 } 6762 5446 pCtx->fExtrn = 0; 6763 pVCpu->hm.s.u32HMError = pMsrs->u32Msr;5447 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr; 6764 5448 ASMSetFlags(fEFlags); 6765 5449 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs)); … … 6769 5453 } 6770 5454 } 5455 #endif 6771 5456 6772 5457 if (fWhat & CPUMCTX_EXTRN_CR_MASK) … … 6801 5486 } 6802 5487 #endif 5488 #ifdef IN_RING0 6803 5489 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */ 5490 #endif 6804 5491 CPUMSetGuestCR0(pVCpu, u64Cr0); 5492 #ifdef IN_RING0 6805 5493 VMMRZCallRing3Enable(pVCpu); 5494 #endif 6806 5495 } 6807 5496 … … 6841 5530 { 6842 5531 /* CR0.PG bit changes are always intercepted, so it's up to date. */ 5532 #ifdef IN_RING0 /* With R3 we always have unresitricted guest support. */ 6843 5533 if ( pVM->hmr0.s.vmx.fUnrestrictedGuest 6844 5534 || ( pVM->hmr0.s.fNestedPaging 6845 5535 && CPUMIsGuestPagingEnabledEx(pCtx))) 5536 #endif 6846 5537 { 6847 5538 uint64_t u64Cr3; … … 6905 5596 } 6906 5597 } 5598 #ifdef IN_RING0 6907 5599 else 6908 5600 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn)); … … 6912 5604 */ 6913 5605 ASMSetFlags(fEFlags); 6914 6915 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatImportGuestState, x); 5606 #endif 5607 5608 STAM_PROFILE_ADV_STOP(& VCPU_2_VMXSTATE(pVCpu).StatImportGuestState, x); 6916 5609 6917 5610 if (RT_SUCCESS(rc)) … … 6937 5630 */ 6938 5631 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) 6939 && VMMRZCallRing3IsEnabled(pVCpu)) 5632 #ifdef IN_RING0 5633 && VMMRZCallRing3IsEnabled(pVCpu) 5634 #endif 5635 ) 6940 5636 { 6941 5637 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3)); … … 6965 5661 * 6966 5662 * @param pVCpu The cross context virtual CPU structure. 6967 * @param pVmxTransient The VMX-transient structure.5663 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event. 6968 5664 * @param fStepping Whether we are single-stepping the guest using the 6969 5665 * hypervisor debugger. … … 6972 5668 * is no longer in VMX non-root mode. 6973 5669 */ 6974 static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, bool fStepping) 6975 { 5670 static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping) 5671 { 5672 #ifdef IN_RING0 6976 5673 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 5674 #endif 6977 5675 6978 5676 /* … … 7012 5710 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 7013 5711 { 7014 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchHmToR3FF);5712 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchHmToR3FF); 7015 5713 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY; 7016 5714 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc)); … … 7022 5720 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 7023 5721 { 7024 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchVmReq);5722 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchVmReq); 7025 5723 Log4Func(("Pending VM request forcing us back to ring-3\n")); 7026 5724 return VINF_EM_PENDING_REQUEST; … … 7030 5728 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 7031 5729 { 7032 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchPgmPoolFlush);5730 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchPgmPoolFlush); 7033 5731 Log4Func(("PGM pool flush pending forcing us back to ring-3\n")); 7034 5732 return VINF_PGM_POOL_FLUSH_PENDING; … … 7038 5736 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA)) 7039 5737 { 7040 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchDma);5738 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchDma); 7041 5739 Log4Func(("Pending DMA request forcing us back to ring-3\n")); 7042 5740 return VINF_EM_RAW_TO_R3; … … 7051 5749 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts". 7052 5750 */ 7053 if ( pVmxTransient->fIsNestedGuest)5751 if (fIsNestedGuest) 7054 5752 { 7055 5753 /* Pending nested-guest APIC-write. */ … … 7081 5779 } 7082 5780 #else 7083 NOREF( pVmxTransient);5781 NOREF(fIsNestedGuest); 7084 5782 #endif 7085 5783 … … 7097 5795 { 7098 5796 Assert(TRPMHasTrap(pVCpu)); 7099 Assert(! pVCpu->hm.s.Event.fPending);5797 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending); 7100 5798 7101 5799 uint8_t uVector; … … 7129 5827 static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu) 7130 5828 { 7131 Assert( pVCpu->hm.s.Event.fPending);5829 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending); 7132 5830 7133 5831 /* If a trap was already pending, we did something wrong! */ 7134 5832 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP); 7135 5833 7136 uint32_t const u32IntInfo = pVCpu->hm.s.Event.u64IntInfo;5834 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo; 7137 5835 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo); 7138 5836 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo); … … 7144 5842 7145 5843 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo)) 7146 TRPMSetErrorCode(pVCpu, pVCpu->hm.s.Event.u32ErrCode);5844 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode); 7147 5845 7148 5846 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo)) 7149 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);5847 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress); 7150 5848 else 7151 5849 { … … 7164 5862 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */), 7165 5863 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType)); 7166 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);5864 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr); 7167 5865 break; 7168 5866 } … … 7171 5869 7172 5870 /* We're now done converting the pending event. */ 7173 pVCpu->hm.s.Event.fPending = false;5871 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; 7174 5872 } 7175 5873 … … 7179 5877 * cause a VM-exit as soon as the guest is in a state to receive interrupts. 7180 5878 * 5879 * @param pVCpu The cross context virtual CPU structure. 7181 5880 * @param pVmcsInfo The VMCS info. object. 7182 5881 */ 7183 static void vmxHCSetIntWindowExitVmcs(PVM XVMCSINFO pVmcsInfo)5882 static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo) 7184 5883 { 7185 5884 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT) … … 7198 5897 * Clears the interrupt-window exiting control in the VMCS. 7199 5898 * 5899 * @param pVCpu The cross context virtual CPU structure. 7200 5900 * @param pVmcsInfo The VMCS info. object. 7201 5901 */ 7202 DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVM XVMCSINFO pVmcsInfo)5902 DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo) 7203 5903 { 7204 5904 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT) … … 7215 5915 * cause a VM-exit as soon as the guest is in a state to receive NMIs. 7216 5916 * 5917 * @param pVCpu The cross context virtual CPU structure. 7217 5918 * @param pVmcsInfo The VMCS info. object. 7218 5919 */ 7219 static void vmxHCSetNmiWindowExitVmcs(PVM XVMCSINFO pVmcsInfo)5920 static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo) 7220 5921 { 7221 5922 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT) … … 7235 5936 * Clears the NMI-window exiting control in the VMCS. 7236 5937 * 5938 * @param pVCpu The cross context virtual CPU structure. 7237 5939 * @param pVmcsInfo The VMCS info. object. 7238 5940 */ 7239 DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVM XVMCSINFO pVmcsInfo)5941 DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo) 7240 5942 { 7241 5943 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT) … … 7248 5950 7249 5951 5952 #ifdef IN_RING0 7250 5953 /** 7251 5954 * Does the necessary state syncing before returning to ring-3 for any reason … … 7319 6022 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false; 7320 6023 7321 STAM_PROFILE_ADV_SET_STOPPED(& pVCpu->hm.s.StatEntry);7322 STAM_PROFILE_ADV_SET_STOPPED(& pVCpu->hm.s.StatImportGuestState);7323 STAM_PROFILE_ADV_SET_STOPPED(& pVCpu->hm.s.StatExportGuestState);7324 STAM_PROFILE_ADV_SET_STOPPED(& pVCpu->hm.s.StatPreExit);7325 STAM_PROFILE_ADV_SET_STOPPED(& pVCpu->hm.s.StatExitHandling);7326 STAM_PROFILE_ADV_SET_STOPPED(& pVCpu->hm.s.StatExitIO);7327 STAM_PROFILE_ADV_SET_STOPPED(& pVCpu->hm.s.StatExitMovCRx);7328 STAM_PROFILE_ADV_SET_STOPPED(& pVCpu->hm.s.StatExitXcptNmi);7329 STAM_PROFILE_ADV_SET_STOPPED(& pVCpu->hm.s.StatExitVmentry);7330 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchLongJmpToR3);6024 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatEntry); 6025 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatImportGuestState); 6026 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExportGuestState); 6027 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatPreExit); 6028 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExitHandling); 6029 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExitIO); 6030 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExitMovCRx); 6031 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExitXcptNmi); 6032 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExitVmentry); 6033 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchLongJmpToR3); 7331 6034 7332 6035 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); … … 7446 6149 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR)) 7447 6150 { 7448 VMXGetCurrentVmcs(& pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs);7449 pVCpu->hm.s.vmx.LastError.u32VmcsRev = *(uint32_t *)pVmcsInfo->pvVmcs;7450 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;6151 VMXGetCurrentVmcs(&VCPU_2_VMXSTATE(pVCpu).vmx.LastError.HCPhysCurrentVmcs); 6152 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32VmcsRev = *(uint32_t *)pVmcsInfo->pvVmcs; 6153 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu; 7451 6154 /* LastError.idCurrentCpu was updated in vmxHCPreRunGuestCommitted(). */ 7452 6155 } … … 7463 6166 * the event from there (hence place it back in TRPM). 7464 6167 */ 7465 if ( pVCpu->hm.s.Event.fPending)6168 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending) 7466 6169 { 7467 6170 vmxHCPendingEventToTrpmTrap(pVCpu); 7468 Assert(! pVCpu->hm.s.Event.fPending);6171 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending); 7469 6172 7470 6173 /* Clear the events from the VMCS. */ … … 7507 6210 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 7508 6211 { 7509 vmxHCClearIntWindowExitVmcs(pV mcsInfo);7510 vmxHCClearNmiWindowExitVmcs(pV mcsInfo);6212 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo); 6213 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo); 7511 6214 } 7512 6215 … … 7521 6224 int rc = vmxHCLeaveSession(pVCpu); 7522 6225 AssertRCReturn(rc, rc); 7523 STAM_COUNTER_DEC(& pVCpu->hm.s.StatSwitchLongJmpToR3);6226 STAM_COUNTER_DEC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchLongJmpToR3); 7524 6227 7525 6228 /* Thread-context hooks are unregistered at this point!!! */ … … 7541 6244 7542 6245 /* Update the exit-to-ring 3 reason. */ 7543 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);6246 VCPU_2_VMXSTATE(pVCpu).rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit); 7544 6247 7545 6248 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */ … … 7548 6251 { 7549 6252 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL)); 7550 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);7551 } 7552 7553 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchExitToR3);6253 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 6254 } 6255 6256 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchExitToR3); 7554 6257 VMMRZCallRing3Enable(pVCpu); 7555 6258 return rc; … … 7581 6284 return rc; 7582 6285 } 7583 6286 #endif /* !IN_RING */ 7584 6287 7585 6288 /** … … 7601 6304 * VM-entry). 7602 6305 */ 7603 static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, P CVMXTRANSIENT pVmxTransient, PCHMEVENT pEvent, bool fStepping,6306 static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent, bool fStepping, 7604 6307 uint32_t *pfIntrState) 7605 6308 { … … 7656 6359 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI); 7657 6360 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB); 7658 STAM_COUNTER_INC(& pVCpu->hm.s.aStatInjectedXcpts[uVector]);6361 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).aStatInjectedXcpts[uVector]); 7659 6362 } 7660 6363 else 7661 STAM_COUNTER_INC(& pVCpu->hm.s.aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);6364 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]); 7662 6365 7663 6366 /* … … 7671 6374 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */ 7672 6375 { 6376 #ifdef IN_RING0 7673 6377 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest) 6378 #endif 7674 6379 { 7675 6380 /* … … 7681 6386 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID; 7682 6387 } 6388 #ifdef IN_RING0 7683 6389 else 7684 6390 { … … 7689 6395 7690 6396 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */ 7691 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;7692 6397 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK 7693 6398 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS); … … 7713 6418 RT_ZERO(EventXcptDf); 7714 6419 EventXcptDf.u64IntInfo = uXcptDfInfo; 7715 return vmxHCInjectEventVmcs(pVCpu, pVm xTransient, &EventXcptDf, fStepping, pfIntrState);6420 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState); 7716 6421 } 7717 6422 … … 7729 6434 RT_ZERO(EventXcptGp); 7730 6435 EventXcptGp.u64IntInfo = uXcptGpInfo; 7731 return vmxHCInjectEventVmcs(pVCpu, pVm xTransient, &EventXcptGp, fStepping, pfIntrState);6436 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState); 7732 6437 } 7733 6438 … … 7771 6476 pCtx->cr2 = GCPtrFault; 7772 6477 7773 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR26478 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2 7774 6479 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS 7775 6480 | HM_CHANGED_GUEST_RSP); … … 7794 6499 * we don't attempt to undo it if we are returning to ring-3 before executing guest code. 7795 6500 */ 7796 pVCpu->hm.s.Event.fPending = false;6501 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; 7797 6502 7798 6503 /* … … 7800 6505 * we should set fInterceptEvents here. 7801 6506 */ 7802 Assert(! pVmxTransient->fIsNestedGuest);6507 Assert(!fIsNestedGuest); 7803 6508 7804 6509 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */ … … 7810 6515 return rcStrict; 7811 6516 } 6517 #else 6518 RT_NOREF(pVmcsInfo); 6519 #endif 7812 6520 } 7813 6521 … … 7848 6556 * @returns Strict VBox status code (i.e. informational status codes too). 7849 6557 * @param pVCpu The cross context virtual CPU structure. 7850 * @param pVmxTransient The VMX-transient structure. 6558 * @param pVmcsInfo The VMCS information structure. 6559 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest. 7851 6560 * @param pfIntrState Where to store the VT-x guest-interruptibility state. 7852 6561 */ 7853 static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, P CVMXTRANSIENT pVmxTransient, uint32_t *pfIntrState)6562 static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState) 7854 6563 { 7855 6564 Assert(pfIntrState); … … 7866 6575 * An event that's already pending has already performed all necessary checks. 7867 6576 */ 7868 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 7869 bool const fIsNestedGuest = pVmxTransient->fIsNestedGuest; 7870 if ( !pVCpu->hm.s.Event.fPending 6577 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending 7871 6578 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 7872 6579 { … … 7877 6584 * NMIs take priority over external interrupts. 7878 6585 */ 6586 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 7879 6587 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6588 #endif 7880 6589 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 7881 6590 { … … 7902 6611 } 7903 6612 else if (!fIsNestedGuest) 7904 vmxHCSetNmiWindowExitVmcs(pV mcsInfo);6613 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo); 7905 6614 } 7906 6615 … … 7911 6620 */ 7912 6621 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 7913 && ! pVCpu->hm.s.fSingleInstruction)6622 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction) 7914 6623 { 7915 6624 Assert(!DBGFIsStepping(pVCpu)); … … 7955 6664 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 7956 6665 { 7957 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchTprMaskedIrq);6666 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchTprMaskedIrq); 7958 6667 7959 6668 if ( !fIsNestedGuest 7960 6669 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)) 7961 vmxHCApicSetTprThreshold(pV mcsInfo, u8Interrupt >> 4);6670 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4); 7962 6671 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */ 7963 6672 … … 7969 6678 } 7970 6679 else 7971 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchGuestIrq);6680 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchGuestIrq); 7972 6681 7973 6682 /* We've injected the interrupt or taken necessary action, bail. */ … … 7975 6684 } 7976 6685 if (!fIsNestedGuest) 7977 vmxHCSetIntWindowExitVmcs(pV mcsInfo);6686 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo); 7978 6687 } 7979 6688 } … … 7986 6695 */ 7987 6696 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 7988 vmxHCSetNmiWindowExitVmcs(pV mcsInfo);6697 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo); 7989 6698 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 7990 && ! pVCpu->hm.s.fSingleInstruction)7991 vmxHCSetIntWindowExitVmcs(pV mcsInfo);6699 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction) 6700 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo); 7992 6701 } 7993 6702 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */ … … 8003 6712 * @returns Strict VBox status code (i.e. informational status codes too). 8004 6713 * @param pVCpu The cross context virtual CPU structure. 8005 * @param pVmxTransient The VMX-transient structure.6714 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest. 8006 6715 * @param fIntrState The VT-x guest-interruptibility state. 8007 6716 * @param fStepping Whether we are single-stepping the guest using the … … 8010 6719 * directly. 8011 6720 */ 8012 static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, P CVMXTRANSIENT pVmxTransient, uint32_t fIntrState, bool fStepping)6721 static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t fIntrState, bool fStepping) 8013 6722 { 8014 6723 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu); 6724 #ifdef IN_RING0 8015 6725 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 6726 #endif 8016 6727 8017 6728 #ifdef VBOX_STRICT … … 8035 6746 8036 6747 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 8037 if ( pVCpu->hm.s.Event.fPending)6748 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending) 8038 6749 { 8039 6750 /* … … 8044 6755 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery". 8045 6756 */ 8046 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE( pVCpu->hm.s.Event.u64IntInfo);6757 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo); 8047 6758 #ifdef VBOX_STRICT 8048 6759 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT) … … 8059 6770 } 8060 6771 #endif 8061 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,6772 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo, 8062 6773 uIntType)); 8063 6774 … … 8068 6779 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts). 8069 6780 */ 8070 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVm xTransient, &pVCpu->hm.s.Event, fStepping, &fIntrState);6781 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState); 8071 6782 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict); 8072 6783 8073 6784 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT) 8074 STAM_COUNTER_INC(& pVCpu->hm.s.StatInjectInterrupt);6785 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectInterrupt); 8075 6786 else 8076 STAM_COUNTER_INC(& pVCpu->hm.s.StatInjectXcpt);6787 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectXcpt); 8077 6788 } 8078 6789 … … 8082 6793 */ 8083 6794 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)) 8084 && ! pVmxTransient->fIsNestedGuest)6795 && !fIsNestedGuest) 8085 6796 { 8086 6797 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); 8087 6798 8088 if (! pVCpu->hm.s.fSingleInstruction)6799 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction) 8089 6800 { 8090 6801 /* … … 8105 6816 * we use MTF, so just make sure it's called before executing guest-code. 8106 6817 */ 8107 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR_MASK);6818 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK); 8108 6819 } 8109 6820 } … … 8130 6841 } 8131 6842 8132 6843 #ifdef IN_RING0 8133 6844 /** 8134 6845 * Exports the guest state into the VMCS guest-state area. … … 8158 6869 LogFlowFunc(("pVCpu=%p\n", pVCpu)); 8159 6870 8160 STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatExportGuestState, x);6871 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExportGuestState, x); 8161 6872 8162 6873 /* … … 8209 6920 8210 6921 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */ 8211 ASMAtomicUoAndU64(& pVCpu->hm.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)6922 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP) 8212 6923 | HM_CHANGED_GUEST_CR2 8213 6924 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7) … … 8222 6933 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK))); 8223 6934 8224 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatExportGuestState, x);6935 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExportGuestState, x); 8225 6936 return rc; 8226 6937 } … … 8240 6951 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 8241 6952 8242 if ( pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)6953 if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_GUEST_DR_MASK) 8243 6954 { 8244 6955 int rc = vmxHCExportSharedDebugState(pVCpu, pVmxTransient); 8245 6956 AssertRC(rc); 8246 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;6957 VCPU_2_VMXSTATE(pVCpu).fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK; 8247 6958 8248 6959 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */ 8249 if ( pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)6960 if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_GUEST_RFLAGS) 8250 6961 vmxHCExportGuestRflags(pVCpu, pVmxTransient); 8251 6962 } 8252 6963 8253 if ( pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)6964 if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS) 8254 6965 { 8255 6966 vmxHCLazyLoadGuestMsrs(pVCpu); 8256 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;8257 } 8258 8259 AssertMsg(!( pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),8260 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));6967 VCPU_2_VMXSTATE(pVCpu).fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS; 6968 } 6969 6970 AssertMsg(!(VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE), 6971 ("fCtxChanged=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).fCtxChanged)); 8261 6972 } 8262 6973 … … 8281 6992 8282 6993 #ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE 8283 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);6994 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 8284 6995 #endif 8285 6996 … … 8291 7002 uint64_t const fCtxMask = HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE; 8292 7003 uint64_t const fMinimalMask = HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT; 8293 uint64_t const fCtxChanged = ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged);7004 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged); 8294 7005 8295 7006 /* If only RIP/RSP/RFLAGS/HWVIRT changed, export only those (quicker, happens more often).*/ … … 8301 7012 vmxHCExportGuestRflags(pVCpu, pVmxTransient); 8302 7013 rcStrict = vmxHCExportGuestHwvirtState(pVCpu, pVmxTransient); 8303 STAM_COUNTER_INC(& pVCpu->hm.s.StatExportMinimal);7014 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExportMinimal); 8304 7015 } 8305 7016 /* If anything else also changed, go through the full export routine and export as required. */ … … 8316 7027 return rcStrict; 8317 7028 } 8318 STAM_COUNTER_INC(& pVCpu->hm.s.StatExportFull);7029 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExportFull); 8319 7030 } 8320 7031 /* Nothing changed, nothing to load here. */ … … 8324 7035 #ifdef VBOX_STRICT 8325 7036 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */ 8326 uint64_t const fCtxChangedCur = ASMAtomicUoReadU64(& pVCpu->hm.s.fCtxChanged);7037 uint64_t const fCtxChangedCur = ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged); 8327 7038 AssertMsg(!(fCtxChangedCur & fCtxMask), ("fCtxChangedCur=%#RX64\n", fCtxChangedCur)); 8328 7039 #endif 8329 7040 return rcStrict; 8330 7041 } 7042 #endif /* !IN_RING0 */ 8331 7043 8332 7044 … … 8352 7064 } while (0) 8353 7065 8354 PVMCC pVM = pVCpu->CTX_SUFF(pVM);8355 7066 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 8356 7067 uint32_t uError = VMX_IGS_ERROR; 8357 7068 uint32_t u32IntrState = 0; 7069 #ifdef IN_RING0 7070 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 8358 7071 bool const fUnrestrictedGuest = pVM->hmr0.s.vmx.fUnrestrictedGuest; 7072 #else 7073 bool const fUnrestrictedGuest = true; 7074 #endif 8359 7075 do 8360 7076 { … … 8523 7239 } 8524 7240 7241 #ifdef IN_RING0 8525 7242 /* 8526 7243 * EFER MSR. … … 8543 7260 VMX_IGS_EFER_LMA_LME_MISMATCH); 8544 7261 } 7262 #endif 8545 7263 8546 7264 /* … … 8844 7562 } 8845 7563 7564 #ifdef IN_RING0 8846 7565 /* VMCS link pointer. */ 8847 7566 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val); … … 8884 7603 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED); 8885 7604 } 7605 #endif 8886 7606 8887 7607 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */ … … 8890 7610 } while (0); 8891 7611 8892 pVCpu->hm.s.u32HMError = uError;8893 pVCpu->hm.s.vmx.LastError.u32GuestIntrState = u32IntrState;7612 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError; 7613 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState; 8894 7614 return uError; 8895 7615 … … 8899 7619 8900 7620 7621 #ifdef IN_RING0 8901 7622 /** 8902 7623 * Map the APIC-access page for virtualizing APIC accesses. … … 8929 7650 8930 7651 /* Update the per-VCPU cache of the APIC base MSR. */ 8931 pVCpu->hm.s.vmx.u64GstMsrApicBase = u64MsrApicBase;7652 VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase = u64MsrApicBase; 8932 7653 return VINF_SUCCESS; 8933 7654 } … … 8990 7711 if (fDispatched) 8991 7712 { 8992 STAM_REL_COUNTER_INC(& pVCpu->hm.s.StatExitHostNmiInGC);7713 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitHostNmiInGC); 8993 7714 return VINF_SUCCESS; 8994 7715 } … … 8999 7720 * (to the target CPU) without dispatching the host NMI above. 9000 7721 */ 9001 STAM_REL_COUNTER_INC(& pVCpu->hm.s.StatExitHostNmiInGCIpi);7722 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitHostNmiInGCIpi); 9002 7723 return RTMpOnSpecific(idCpu, &hmR0DispatchHostNmi, NULL /* pvUser1 */, NULL /* pvUser2 */); 9003 7724 } … … 9341 8062 */ 9342 8063 if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS) 9343 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = true;8064 VCPU_2_VMXSTATE(pVCpu).vmx.fSwitchedNstGstFlushTlb = true; 9344 8065 9345 8066 /* … … 9358 8079 9359 8080 /** 9360 * Does the preparations before executing guest code in VT-x.9361 *9362 * This may cause longjmps to ring-3 and may even result in rescheduling to the9363 * recompiler/IEM. We must be cautious what we do here regarding committing9364 * guest-state information into the VMCS assuming we assuredly execute the9365 * guest in VT-x mode.9366 *9367 * If we fall back to the recompiler/IEM after updating the VMCS and clearing9368 * the common-state (TRPM/forceflags), we must undo those changes so that the9369 * recompiler/IEM can (and should) use them when it resumes guest execution.9370 * Otherwise such operations must be done when we can no longer exit to ring-3.9371 *9372 * @returns Strict VBox status code (i.e. informational status codes too).9373 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts9374 * have been disabled.9375 * @retval VINF_VMX_VMEXIT if a nested-guest VM-exit occurs (e.g., while evaluating9376 * pending events).9377 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a9378 * double-fault into the guest.9379 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was9380 * dispatched directly.9381 * @retval VINF_* scheduling changes, we have to go back to ring-3.9382 *9383 * @param pVCpu The cross context virtual CPU structure.9384 * @param pVmxTransient The VMX-transient structure.9385 * @param fStepping Whether we are single-stepping the guest in the9386 * hypervisor debugger. Makes us ignore some of the reasons9387 * for returning to ring-3, and return VINF_EM_DBG_STEPPED9388 * if event dispatching took place.9389 */9390 static VBOXSTRICTRC vmxHCPreRunGuest(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, bool fStepping)9391 {9392 Assert(VMMRZCallRing3IsEnabled(pVCpu));9393 9394 Log4Func(("fIsNested=%RTbool fStepping=%RTbool\n", pVmxTransient->fIsNestedGuest, fStepping));9395 9396 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM9397 if (pVmxTransient->fIsNestedGuest)9398 {9399 RT_NOREF2(pVCpu, fStepping);9400 Log2Func(("Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));9401 return VINF_EM_RESCHEDULE_REM;9402 }9403 #endif9404 9405 /*9406 * Check and process force flag actions, some of which might require us to go back to ring-3.9407 */9408 VBOXSTRICTRC rcStrict = vmxHCCheckForceFlags(pVCpu, pVmxTransient, fStepping);9409 if (rcStrict == VINF_SUCCESS)9410 {9411 /* FFs don't get set all the time. */9412 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9413 if ( pVmxTransient->fIsNestedGuest9414 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))9415 {9416 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);9417 return VINF_VMX_VMEXIT;9418 }9419 #endif9420 }9421 else9422 return rcStrict;9423 9424 /*9425 * Virtualize memory-mapped accesses to the physical APIC (may take locks).9426 */9427 PVMCC pVM = pVCpu->CTX_SUFF(pVM);9428 if ( !pVCpu->hm.s.vmx.u64GstMsrApicBase9429 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)9430 && PDMHasApic(pVM))9431 {9432 int rc = vmxHCMapHCApicAccessPage(pVCpu);9433 AssertRCReturn(rc, rc);9434 }9435 9436 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9437 /*9438 * Merge guest VMCS controls with the nested-guest VMCS controls.9439 *9440 * Even if we have not executed the guest prior to this (e.g. when resuming from a9441 * saved state), we should be okay with merging controls as we initialize the9442 * guest VMCS controls as part of VM setup phase.9443 */9444 if ( pVmxTransient->fIsNestedGuest9445 && !pVCpu->hm.s.vmx.fMergedNstGstCtls)9446 {9447 int rc = vmxHCMergeVmcsNested(pVCpu);9448 AssertRCReturn(rc, rc);9449 pVCpu->hm.s.vmx.fMergedNstGstCtls = true;9450 }9451 #endif9452 9453 /*9454 * Evaluate events to be injected into the guest.9455 *9456 * Events in TRPM can be injected without inspecting the guest state.9457 * If any new events (interrupts/NMI) are pending currently, we try to set up the9458 * guest to cause a VM-exit the next time they are ready to receive the event.9459 */9460 if (TRPMHasTrap(pVCpu))9461 vmxHCTrpmTrapToPendingEvent(pVCpu);9462 9463 uint32_t fIntrState;9464 rcStrict = vmxHCEvaluatePendingEvent(pVCpu, pVmxTransient, &fIntrState);9465 9466 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9467 /*9468 * While evaluating pending events if something failed (unlikely) or if we were9469 * preparing to run a nested-guest but performed a nested-guest VM-exit, we should bail.9470 */9471 if (rcStrict != VINF_SUCCESS)9472 return rcStrict;9473 if ( pVmxTransient->fIsNestedGuest9474 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))9475 {9476 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);9477 return VINF_VMX_VMEXIT;9478 }9479 #else9480 Assert(rcStrict == VINF_SUCCESS);9481 #endif9482 9483 /*9484 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus9485 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might9486 * also result in triple-faulting the VM.9487 *9488 * With nested-guests, the above does not apply since unrestricted guest execution is a9489 * requirement. Regardless, we do this here to avoid duplicating code elsewhere.9490 */9491 rcStrict = vmxHCInjectPendingEvent(pVCpu, pVmxTransient, fIntrState, fStepping);9492 if (RT_LIKELY(rcStrict == VINF_SUCCESS))9493 { /* likely */ }9494 else9495 {9496 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),9497 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));9498 return rcStrict;9499 }9500 9501 /*9502 * A longjump might result in importing CR3 even for VM-exits that don't necessarily9503 * import CR3 themselves. We will need to update them here, as even as late as the above9504 * vmxHCInjectPendingEvent() call may lazily import guest-CPU state on demand causing9505 * the below force flags to be set.9506 */9507 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))9508 {9509 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));9510 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */);9511 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,9512 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);9513 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));9514 }9515 9516 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9517 /* Paranoia. */9518 Assert(!pVmxTransient->fIsNestedGuest || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));9519 #endif9520 9521 /*9522 * No longjmps to ring-3 from this point on!!!9523 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.9524 * This also disables flushing of the R0-logger instance (if any).9525 */9526 VMMRZCallRing3Disable(pVCpu);9527 9528 /*9529 * Export the guest state bits.9530 *9531 * We cannot perform longjmps while loading the guest state because we do not preserve the9532 * host/guest state (although the VMCS will be preserved) across longjmps which can cause9533 * CPU migration.9534 *9535 * If we are injecting events to a real-on-v86 mode guest, we would have updated RIP and some segment9536 * registers. Hence, exporting of the guest state needs to be done -after- injection of events.9537 */9538 rcStrict = vmxHCExportGuestStateOptimal(pVCpu, pVmxTransient);9539 if (RT_LIKELY(rcStrict == VINF_SUCCESS))9540 { /* likely */ }9541 else9542 {9543 VMMRZCallRing3Enable(pVCpu);9544 return rcStrict;9545 }9546 9547 /*9548 * We disable interrupts so that we don't miss any interrupts that would flag preemption9549 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with9550 * preemption disabled for a while. Since this is purely to aid the9551 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and9552 * disable interrupt on NT.9553 *9554 * We need to check for force-flags that could've possible been altered since we last9555 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,9556 * see @bugref{6398}).9557 *9558 * We also check a couple of other force-flags as a last opportunity to get the EMT back9559 * to ring-3 before executing guest code.9560 */9561 pVmxTransient->fEFlags = ASMIntDisableFlags();9562 9563 if ( ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)9564 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))9565 || ( fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */9566 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )9567 {9568 if (!RTThreadPreemptIsPending(NIL_RTTHREAD))9569 {9570 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9571 /*9572 * If we are executing a nested-guest make sure that we should intercept subsequent9573 * events. The one we are injecting might be part of VM-entry. This is mainly to keep9574 * the VM-exit instruction emulation happy.9575 */9576 if (pVmxTransient->fIsNestedGuest)9577 CPUMSetGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx, true);9578 #endif9579 9580 /*9581 * We've injected any pending events. This is really the point of no return (to ring-3).9582 *9583 * Note! The caller expects to continue with interrupts & longjmps disabled on successful9584 * returns from this function, so do -not- enable them here.9585 */9586 pVCpu->hm.s.Event.fPending = false;9587 return VINF_SUCCESS;9588 }9589 9590 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);9591 rcStrict = VINF_EM_RAW_INTERRUPT;9592 }9593 else9594 {9595 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);9596 rcStrict = VINF_EM_RAW_TO_R3;9597 }9598 9599 ASMSetFlags(pVmxTransient->fEFlags);9600 VMMRZCallRing3Enable(pVCpu);9601 9602 return rcStrict;9603 }9604 9605 9606 /**9607 * Final preparations before executing guest code using hardware-assisted VMX.9608 *9609 * We can no longer get preempted to a different host CPU and there are no returns9610 * to ring-3. We ignore any errors that may happen from this point (e.g. VMWRITE9611 * failures), this function is not intended to fail sans unrecoverable hardware9612 * errors.9613 *9614 * @param pVCpu The cross context virtual CPU structure.9615 * @param pVmxTransient The VMX-transient structure.9616 *9617 * @remarks Called with preemption disabled.9618 * @remarks No-long-jump zone!!!9619 */9620 static void vmxHCPreRunGuestCommitted(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)9621 {9622 Assert(!VMMRZCallRing3IsEnabled(pVCpu));9623 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));9624 Assert(!pVCpu->hm.s.Event.fPending);9625 9626 /*9627 * Indicate start of guest execution and where poking EMT out of guest-context is recognized.9628 */9629 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);9630 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);9631 9632 PVMCC pVM = pVCpu->CTX_SUFF(pVM);9633 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;9634 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();9635 RTCPUID const idCurrentCpu = pHostCpu->idCpu;9636 9637 if (!CPUMIsGuestFPUStateActive(pVCpu))9638 {9639 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);9640 if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED)9641 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT;9642 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);9643 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);9644 }9645 9646 /*9647 * Re-export the host state bits as we may've been preempted (only happens when9648 * thread-context hooks are used or when the VM start function changes) or if9649 * the host CR0 is modified while loading the guest FPU state above.9650 *9651 * The 64-on-32 switcher saves the (64-bit) host state into the VMCS and if we9652 * changed the switcher back to 32-bit, we *must* save the 32-bit host state here,9653 * see @bugref{8432}.9654 *9655 * This may also happen when switching to/from a nested-guest VMCS without leaving9656 * ring-0.9657 */9658 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)9659 {9660 vmxHCExportHostState(pVCpu);9661 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportHostState);9662 }9663 Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT));9664 9665 /*9666 * Export the state shared between host and guest (FPU, debug, lazy MSRs).9667 */9668 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)9669 vmxHCExportSharedState(pVCpu, pVmxTransient);9670 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));9671 9672 /*9673 * Store status of the shared guest/host debug state at the time of VM-entry.9674 */9675 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);9676 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);9677 9678 /*9679 * Always cache the TPR-shadow if the virtual-APIC page exists, thereby skipping9680 * more than one conditional check. The post-run side of our code shall determine9681 * if it needs to sync. the virtual APIC TPR with the TPR-shadow.9682 */9683 if (pVmcsInfo->pbVirtApic)9684 pVmxTransient->u8GuestTpr = pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR];9685 9686 /*9687 * Update the host MSRs values in the VM-exit MSR-load area.9688 */9689 if (!pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs)9690 {9691 if (pVmcsInfo->cExitMsrLoad > 0)9692 vmxHCUpdateAutoLoadHostMsrs(pVCpu, pVmcsInfo);9693 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = true;9694 }9695 9696 /*9697 * Evaluate if we need to intercept guest RDTSC/P accesses. Set up the9698 * VMX-preemption timer based on the next virtual sync clock deadline.9699 */9700 if ( !pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer9701 || idCurrentCpu != pVCpu->hmr0.s.idLastCpu)9702 {9703 vmxHCUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient, idCurrentCpu);9704 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = true;9705 }9706 9707 /* Record statistics of how often we use TSC offsetting as opposed to intercepting RDTSC/P. */9708 bool const fIsRdtscIntercepted = RT_BOOL(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT);9709 if (!fIsRdtscIntercepted)9710 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);9711 else9712 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);9713 9714 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */9715 vmxHCFlushTaggedTlb(pHostCpu, pVCpu, pVmcsInfo); /* Invalidate the appropriate guest entries from the TLB. */9716 Assert(idCurrentCpu == pVCpu->hmr0.s.idLastCpu);9717 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Record the error reporting info. with the current host CPU. */9718 pVmcsInfo->idHostCpuState = idCurrentCpu; /* Record the CPU for which the host-state has been exported. */9719 pVmcsInfo->idHostCpuExec = idCurrentCpu; /* Record the CPU on which we shall execute. */9720 9721 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);9722 9723 TMNotifyStartOfExecution(pVM, pVCpu); /* Notify TM to resume its clocks when TSC is tied to execution,9724 as we're about to start executing the guest. */9725 9726 /*9727 * Load the guest TSC_AUX MSR when we are not intercepting RDTSCP.9728 *9729 * This is done this late as updating the TSC offsetting/preemption timer above9730 * figures out if we can skip intercepting RDTSCP by calculating the number of9731 * host CPU ticks till the next virtual sync deadline (for the dynamic case).9732 */9733 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP)9734 && !fIsRdtscIntercepted)9735 {9736 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_TSC_AUX);9737 9738 /* NB: Because we call vmxHCAddAutoLoadStoreMsr with fUpdateHostMsr=true,9739 it's safe even after vmxHCUpdateAutoLoadHostMsrs has already been done. */9740 int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu),9741 true /* fSetReadWrite */, true /* fUpdateHostMsr */);9742 AssertRC(rc);9743 Assert(!pVmxTransient->fRemoveTscAuxMsr);9744 pVmxTransient->fRemoveTscAuxMsr = true;9745 }9746 9747 #ifdef VBOX_STRICT9748 Assert(pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs);9749 vmxHCCheckAutoLoadStoreMsrs(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);9750 vmxHCCheckHostEferMsr(pVmcsInfo);9751 AssertRC(vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest));9752 #endif9753 9754 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE9755 /** @todo r=ramshankar: We can now probably use iemVmxVmentryCheckGuestState here.9756 * Add a PVMXMSRS parameter to it, so that IEM can look at the host MSRs,9757 * see @bugref{9180#c54}. */9758 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);9759 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)9760 Log4(("vmxHCCheckGuestState returned %#x\n", uInvalidReason));9761 #endif9762 }9763 9764 9765 /**9766 * First C routine invoked after running guest code using hardware-assisted VMX.9767 *9768 * @param pVCpu The cross context virtual CPU structure.9769 * @param pVmxTransient The VMX-transient structure.9770 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.9771 *9772 * @remarks Called with interrupts disabled, and returns with interrupts enabled!9773 *9774 * @remarks No-long-jump zone!!! This function will however re-enable longjmps9775 * unconditionally when it is safe to do so.9776 */9777 static void vmxHCPostRunGuest(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun)9778 {9779 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */9780 ASMAtomicIncU32(&pVCpu->hmr0.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */9781 pVCpu->hm.s.fCtxChanged = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */9782 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */9783 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */9784 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */9785 9786 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;9787 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))9788 {9789 uint64_t uGstTsc;9790 if (!pVmxTransient->fIsNestedGuest)9791 uGstTsc = pVCpu->hmr0.s.uTscExit + pVmcsInfo->u64TscOffset;9792 else9793 {9794 uint64_t const uNstGstTsc = pVCpu->hmr0.s.uTscExit + pVmcsInfo->u64TscOffset;9795 uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, uNstGstTsc);9796 }9797 TMCpuTickSetLastSeen(pVCpu, uGstTsc); /* Update TM with the guest TSC. */9798 }9799 9800 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);9801 TMNotifyEndOfExecution(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->hmr0.s.uTscExit); /* Notify TM that the guest is no longer running. */9802 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);9803 9804 pVCpu->hmr0.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Some host state messed up by VMX needs restoring. */9805 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */9806 #ifdef VBOX_STRICT9807 vmxHCCheckHostEferMsr(pVmcsInfo); /* Verify that the host EFER MSR wasn't modified. */9808 #endif9809 Assert(!ASMIntAreEnabled());9810 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */9811 Assert(!VMMRZCallRing3IsEnabled(pVCpu));9812 9813 #ifdef HMVMX_ALWAYS_CLEAN_TRANSIENT9814 /*9815 * Clean all the VMCS fields in the transient structure before reading9816 * anything from the VMCS.9817 */9818 pVmxTransient->uExitReason = 0;9819 pVmxTransient->uExitIntErrorCode = 0;9820 pVmxTransient->uExitQual = 0;9821 pVmxTransient->uGuestLinearAddr = 0;9822 pVmxTransient->uExitIntInfo = 0;9823 pVmxTransient->cbExitInstr = 0;9824 pVmxTransient->ExitInstrInfo.u = 0;9825 pVmxTransient->uEntryIntInfo = 0;9826 pVmxTransient->uEntryXcptErrorCode = 0;9827 pVmxTransient->cbEntryInstr = 0;9828 pVmxTransient->uIdtVectoringInfo = 0;9829 pVmxTransient->uIdtVectoringErrorCode = 0;9830 #endif9831 9832 /*9833 * Save the basic VM-exit reason and check if the VM-entry failed.9834 * See Intel spec. 24.9.1 "Basic VM-exit Information".9835 */9836 uint32_t uExitReason;9837 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason);9838 AssertRC(rc);9839 pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason);9840 pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);9841 9842 /*9843 * Log the VM-exit before logging anything else as otherwise it might be a9844 * tad confusing what happens before and after the world-switch.9845 */9846 HMVMX_LOG_EXIT(pVCpu, uExitReason);9847 9848 /*9849 * Remove the TSC_AUX MSR from the auto-load/store MSR area and reset any MSR9850 * bitmap permissions, if it was added before VM-entry.9851 */9852 if (pVmxTransient->fRemoveTscAuxMsr)9853 {9854 vmxHCRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX);9855 pVmxTransient->fRemoveTscAuxMsr = false;9856 }9857 9858 /*9859 * Check if VMLAUNCH/VMRESUME succeeded.9860 * If this failed, we cause a guru meditation and cease further execution.9861 *9862 * However, if we are executing a nested-guest we might fail if we use the9863 * fast path rather than fully emulating VMLAUNCH/VMRESUME instruction in IEM.9864 */9865 if (RT_LIKELY(rcVMRun == VINF_SUCCESS))9866 {9867 /*9868 * Update the VM-exit history array here even if the VM-entry failed due to:9869 * - Invalid guest state.9870 * - MSR loading.9871 * - Machine-check event.9872 *9873 * In any of the above cases we will still have a "valid" VM-exit reason9874 * despite @a fVMEntryFailed being false.9875 *9876 * See Intel spec. 26.7 "VM-Entry failures during or after loading guest state".9877 *9878 * Note! We don't have CS or RIP at this point. Will probably address that later9879 * by amending the history entry added here.9880 */9881 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK),9882 UINT64_MAX, pVCpu->hmr0.s.uTscExit);9883 9884 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))9885 {9886 VMMRZCallRing3Enable(pVCpu);9887 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));9888 9889 #ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE9890 vmxHCReadAllRoFieldsVmcs(pVCpu, pVmxTransient);9891 #endif9892 9893 /*9894 * Import the guest-interruptibility state always as we need it while evaluating9895 * injecting events on re-entry.9896 *9897 * We don't import CR0 (when unrestricted guest execution is unavailable) despite9898 * checking for real-mode while exporting the state because all bits that cause9899 * mode changes wrt CR0 are intercepted.9900 */9901 uint64_t const fImportMask = CPUMCTX_EXTRN_HM_VMX_INT_STATE9902 #if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)9903 | HMVMX_CPUMCTX_EXTRN_ALL9904 #elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)9905 | CPUMCTX_EXTRN_RFLAGS9906 #endif9907 ;9908 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImportMask);9909 AssertRC(rc);9910 9911 /*9912 * Sync the TPR shadow with our APIC state.9913 */9914 if ( !pVmxTransient->fIsNestedGuest9915 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))9916 {9917 Assert(pVmcsInfo->pbVirtApic);9918 if (pVmxTransient->u8GuestTpr != pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR])9919 {9920 rc = APICSetTpr(pVCpu, pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]);9921 AssertRC(rc);9922 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);9923 }9924 }9925 9926 Assert(VMMRZCallRing3IsEnabled(pVCpu));9927 Assert( pVmxTransient->fWasGuestDebugStateActive == false9928 || pVmxTransient->fWasHyperDebugStateActive == false);9929 return;9930 }9931 }9932 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9933 else if (pVmxTransient->fIsNestedGuest)9934 AssertMsgFailed(("VMLAUNCH/VMRESUME failed but shouldn't happen when VMLAUNCH/VMRESUME was emulated in IEM!\n"));9935 #endif9936 else9937 Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));9938 9939 VMMRZCallRing3Enable(pVCpu);9940 }9941 9942 9943 /**9944 8081 * Runs the guest code using hardware-assisted VMX the normal way. 9945 8082 * … … 9986 8123 Assert(!HMR0SuspendPending()); 9987 8124 HMVMX_ASSERT_CPU_SAFE(pVCpu); 9988 STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatEntry, x);8125 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x); 9989 8126 9990 8127 /* … … 10011 8148 else 10012 8149 { 10013 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatPreExit, x);8150 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, x); 10014 8151 vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient); 10015 8152 return rcRun; … … 10020 8157 */ 10021 8158 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 10022 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitAll);10023 STAM_COUNTER_INC(& pVCpu->hm.s.aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);10024 STAM_PROFILE_ADV_STOP_START(& pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);8159 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitAll); 8160 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 8161 STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, &VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x); 10025 8162 HMVMX_START_EXIT_DISPATCH_PROF(); 10026 8163 … … 10035 8172 rcStrict = vmxHCHandleExit(pVCpu, &VmxTransient); 10036 8173 #endif 10037 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatExitHandling, x);8174 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x); 10038 8175 if (rcStrict == VINF_SUCCESS) 10039 8176 { 10040 8177 if (++(*pcLoops) <= cMaxResumeLoops) 10041 8178 continue; 10042 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchMaxResumeLoops);8179 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchMaxResumeLoops); 10043 8180 rcStrict = VINF_EM_RAW_INTERRUPT; 10044 8181 } … … 10046 8183 } 10047 8184 10048 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatEntry, x);8185 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x); 10049 8186 return rcStrict; 10050 8187 } … … 10098 8235 Assert(!HMR0SuspendPending()); 10099 8236 HMVMX_ASSERT_CPU_SAFE(pVCpu); 10100 STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatEntry, x);8237 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x); 10101 8238 10102 8239 /* … … 10123 8260 else 10124 8261 { 10125 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatPreExit, x);8262 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, x); 10126 8263 vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient); 10127 8264 return rcRun; … … 10132 8269 */ 10133 8270 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 10134 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitAll);10135 STAM_COUNTER_INC(& pVCpu->hm.s.StatNestedExitAll);10136 STAM_COUNTER_INC(& pVCpu->hm.s.aStatNestedExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);10137 STAM_PROFILE_ADV_STOP_START(& pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);8271 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitAll); 8272 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatNestedExitAll); 8273 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).aStatNestedExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 8274 STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, &VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x); 10138 8275 HMVMX_START_EXIT_DISPATCH_PROF(); 10139 8276 … … 10144 8281 */ 10145 8282 rcStrict = vmxHCHandleExitNested(pVCpu, &VmxTransient); 10146 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatExitHandling, x);8283 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x); 10147 8284 if (rcStrict == VINF_SUCCESS) 10148 8285 { 10149 8286 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 10150 8287 { 10151 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchNstGstVmexit);8288 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchNstGstVmexit); 10152 8289 rcStrict = VINF_VMX_VMEXIT; 10153 8290 } … … 10156 8293 if (++(*pcLoops) <= cMaxResumeLoops) 10157 8294 continue; 10158 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchMaxResumeLoops);8295 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchMaxResumeLoops); 10159 8296 rcStrict = VINF_EM_RAW_INTERRUPT; 10160 8297 } … … 10165 8302 } 10166 8303 10167 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatEntry, x);8304 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x); 10168 8305 return rcStrict; 10169 8306 } … … 10560 8697 { 10561 8698 pDbgState->fClearCr0Mask = false; 10562 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);8699 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0); 10563 8700 } 10564 8701 if (pDbgState->fClearCr4Mask) 10565 8702 { 10566 8703 pDbgState->fClearCr4Mask = false; 10567 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);8704 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4); 10568 8705 } 10569 8706 } … … 11085 9222 } 11086 9223 9224 #ifdef IN_RING0 /* NMIs should never reach R3. */ 11087 9225 /* 11088 9226 * Check for host NMI, just to get that out of the way. … … 11097 9235 return vmxHCExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo); 11098 9236 } 9237 #endif 11099 9238 11100 9239 /* 11101 9240 * Check for single stepping event if we're stepping. 11102 9241 */ 11103 if ( pVCpu->hm.s.fSingleInstruction)9242 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction) 11104 9243 { 11105 9244 switch (uExitReason) … … 11234 9373 11235 9374 /* Set HMCPU indicators. */ 11236 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;11237 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);9375 bool const fSavedSingleInstruction = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction; 9376 VCPU_2_VMXSTATE(pVCpu).fSingleInstruction = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction || DBGFIsStepping(pVCpu); 11238 9377 pVCpu->hmr0.s.fDebugWantRdTscExit = false; 11239 9378 pVCpu->hmr0.s.fUsingDebugLoop = true; … … 11252 9391 Assert(!HMR0SuspendPending()); 11253 9392 HMVMX_ASSERT_CPU_SAFE(pVCpu); 11254 STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatEntry, x);11255 bool fStepping = pVCpu->hm.s.fSingleInstruction;9393 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x); 9394 bool fStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction; 11256 9395 11257 9396 /* Set up VM-execution controls the next two can respond to. */ … … 11287 9426 else 11288 9427 { 11289 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatPreExit, x);9428 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, x); 11290 9429 vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient); 11291 9430 return rcRun; … … 11294 9433 /* Profile the VM-exit. */ 11295 9434 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 11296 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitAll);11297 STAM_COUNTER_INC(& pVCpu->hm.s.aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);11298 STAM_PROFILE_ADV_STOP_START(& pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);9435 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitAll); 9436 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 9437 STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, &VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x); 11299 9438 HMVMX_START_EXIT_DISPATCH_PROF(); 11300 9439 … … 11305 9444 */ 11306 9445 rcStrict = vmxHCRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState); 11307 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatExitHandling, x);9446 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x); 11308 9447 if (rcStrict != VINF_SUCCESS) 11309 9448 break; 11310 9449 if (++(*pcLoops) > cMaxResumeLoops) 11311 9450 { 11312 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchMaxResumeLoops);9451 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchMaxResumeLoops); 11313 9452 rcStrict = VINF_EM_RAW_INTERRUPT; 11314 9453 break; … … 11329 9468 break; 11330 9469 } 11331 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);9470 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7); 11332 9471 } 11333 9472 … … 11360 9499 pVCpu->hmr0.s.fUsingDebugLoop = false; 11361 9500 pVCpu->hmr0.s.fDebugWantRdTscExit = false; 11362 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;11363 11364 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatEntry, x);9501 VCPU_2_VMXSTATE(pVCpu).fSingleInstruction = fSavedSingleInstruction; 9502 9503 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x); 11365 9504 return rcStrict; 11366 9505 } 11367 9506 #endif 11368 9507 11369 9508 /** @} */ … … 11387 9526 VBOXSTRICTRC rcStrict = a_CallExpr; \ 11388 9527 if (a_fSave != 0) \ 11389 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \9528 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \ 11390 9529 return rcStrict; \ 11391 9530 } while (0) … … 11621 9760 #define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \ 11622 9761 do { \ 11623 (a_pVCpu)->hm.s.u32HMError = (a_HmError); \9762 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \ 11624 9763 return VERR_VMX_UNEXPECTED_EXIT; \ 11625 9764 } while (0) 11626 9765 11627 9766 #ifdef VBOX_STRICT 9767 # ifdef IN_RING0 11628 9768 /* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */ 11629 9769 # define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \ … … 11651 9791 HMVMX_STOP_EXIT_DISPATCH_PROF(); \ 11652 9792 } while (0) 9793 # else 9794 # define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0) 9795 # define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0) 9796 # define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \ 9797 do { \ 9798 AssertPtr((a_pVCpu)); \ 9799 AssertPtr((a_pVmxTransient)); \ 9800 Assert((a_pVmxTransient)->fVMEntryFailed == false); \ 9801 Assert((a_pVmxTransient)->pVmcsInfo); \ 9802 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \ 9803 HMVMX_STOP_EXIT_DISPATCH_PROF(); \ 9804 } while (0) 9805 # endif 11653 9806 11654 9807 # define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \ … … 11733 9886 /* Advance the RIP. */ 11734 9887 pVCpu->cpum.GstCtx.rip += cbInstr; 11735 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);9888 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP); 11736 9889 11737 9890 /* Update interrupt inhibition. */ … … 11784 9937 static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient) 11785 9938 { 11786 Assert(! pVCpu->hm.s.Event.fPending);9939 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending); 11787 9940 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO); 11788 9941 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION … … 11884 10037 11885 10038 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */ 11886 STAM_COUNTER_INC(& pVCpu->hm.s.StatInjectReflect);10039 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectReflect); 11887 10040 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, 11888 10041 u32ErrCode, pVCpu->cpum.GstCtx.cr2); 11889 10042 11890 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,11891 pVCpu->hm.s.Event.u32ErrCode));10043 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo, 10044 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode)); 11892 10045 Assert(rcStrict == VINF_SUCCESS); 11893 10046 break; … … 11907 10060 { 11908 10061 pVmxTransient->fVectoringDoublePF = true; 11909 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,10062 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo, 11910 10063 pVCpu->cpum.GstCtx.cr2)); 11911 10064 rcStrict = VINF_SUCCESS; … … 11913 10066 else 11914 10067 { 11915 STAM_COUNTER_INC(& pVCpu->hm.s.StatInjectConvertDF);10068 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectConvertDF); 11916 10069 vmxHCSetPendingXcptDF(pVCpu); 11917 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,10070 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo, 11918 10071 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo))); 11919 10072 rcStrict = VINF_HM_DOUBLE_FAULT; … … 12256 10409 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12257 10410 12258 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);10411 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 12259 10412 if (rcStrict == VINF_IEM_RAISED_XCPT) 12260 10413 { 12261 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);10414 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 12262 10415 rcStrict = VINF_SUCCESS; 12263 10416 } 12264 10417 12265 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitLmsw);10418 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitLmsw); 12266 10419 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12267 10420 return rcStrict; … … 12281 10434 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12282 10435 12283 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);10436 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 12284 10437 if (rcStrict == VINF_IEM_RAISED_XCPT) 12285 10438 { 12286 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);10439 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 12287 10440 rcStrict = VINF_SUCCESS; 12288 10441 } 12289 10442 12290 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitClts);10443 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitClts); 12291 10444 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12292 10445 return rcStrict; … … 12310 10463 12311 10464 if (iGReg == X86_GREG_xSP) 12312 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);10465 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP); 12313 10466 else 12314 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);10467 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 12315 10468 #ifdef VBOX_WITH_STATISTICS 12316 10469 switch (iCrReg) 12317 10470 { 12318 case 0: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitCR0Read); break;12319 case 2: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitCR2Read); break;12320 case 3: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitCR3Read); break;12321 case 4: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitCR4Read); break;12322 case 8: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitCR8Read); break;10471 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR0Read); break; 10472 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR2Read); break; 10473 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR3Read); break; 10474 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR4Read); break; 10475 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR8Read); break; 12323 10476 } 12324 10477 #endif … … 12343 10496 { 12344 10497 case 0: 12345 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR010498 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0 12346 10499 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS); 12347 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitCR0Write);10500 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR0Write); 12348 10501 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0)); 12349 10502 break; 12350 10503 12351 10504 case 2: 12352 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitCR2Write);10505 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR2Write); 12353 10506 /* Nothing to do here, CR2 it's not part of the VMCS. */ 12354 10507 break; 12355 10508 12356 10509 case 3: 12357 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);12358 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitCR3Write);10510 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3); 10511 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR3Write); 12359 10512 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3)); 12360 10513 break; 12361 10514 12362 10515 case 4: 12363 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4); 12364 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write); 10516 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4); 10517 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR4Write); 10518 #ifdef IN_RING0 12365 10519 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), 12366 10520 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0)); 10521 #else 10522 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4)); 10523 #endif 12367 10524 break; 12368 10525 12369 10526 case 8: 12370 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged,10527 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, 12371 10528 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR); 12372 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitCR8Write);10529 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR8Write); 12373 10530 break; 12374 10531 … … 12380 10537 if (rcStrict == VINF_IEM_RAISED_XCPT) 12381 10538 { 12382 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);10539 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 12383 10540 rcStrict = VINF_SUCCESS; 12384 10541 } … … 12395 10552 { 12396 10553 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient); 10554 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient); 10555 10556 #ifdef IN_RING0 12397 10557 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 12398 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);12399 12400 10558 if (!pVM->hmr0.s.fNestedPaging) 12401 10559 { /* likely */ } 12402 10560 else 12403 { 12404 #if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) 10561 #endif 10562 { 10563 #if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && defined(IN_RING0) 12405 10564 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop); 12406 10565 #endif 12407 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */10566 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */ 12408 10567 if (!pVmxTransient->fVectoringDoublePF) 12409 10568 { … … 12418 10577 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n")); 12419 10578 } 12420 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestPF);10579 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestPF); 12421 10580 return VINF_SUCCESS; 12422 10581 } … … 12428 10587 if (pVmxTransient->fVectoringPF) 12429 10588 { 12430 Assert( pVCpu->hm.s.Event.fPending);10589 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending); 12431 10590 return VINF_EM_RAW_INJECT_TRPM_EVENT; 12432 10591 } … … 12449 10608 * emulated something like LTR or a far jump. Any part of the CPU context may have changed. 12450 10609 */ 12451 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);10610 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 12452 10611 TRPMResetTrap(pVCpu); 12453 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitShadowPF);10612 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitShadowPF); 12454 10613 return rc; 12455 10614 } … … 12462 10621 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu); 12463 10622 TRPMResetTrap(pVCpu); 12464 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */10623 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */ 12465 10624 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */, 12466 10625 uGstErrorCode, pVmxTransient->uExitQual); … … 12470 10629 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */ 12471 10630 TRPMResetTrap(pVCpu); 12472 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */10631 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */ 12473 10632 vmxHCSetPendingXcptDF(pVCpu); 12474 10633 Log4Func(("#PF: Pending #DF due to vectoring #PF\n")); 12475 10634 } 12476 10635 12477 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestPF);10636 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestPF); 12478 10637 return VINF_SUCCESS; 12479 10638 } 12480 10639 12481 10640 TRPMResetTrap(pVCpu); 12482 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitShadowPFEM);10641 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitShadowPFEM); 12483 10642 return rc; 12484 10643 } … … 12493 10652 { 12494 10653 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12495 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestMF);10654 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestMF); 12496 10655 12497 10656 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0); … … 12525 10684 { 12526 10685 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12527 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestBP);10686 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestBP); 12528 10687 12529 10688 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); … … 12574 10733 * Check for debug/trace events and import state accordingly. 12575 10734 */ 12576 STAM_REL_COUNTER_INC(& pVCpu->hm.s.StatExitGuestACSplitLock);12577 PVMCC pVM = pVCpu-> pVMR0;10735 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestACSplitLock); 10736 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 12578 10737 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK) 12579 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()) 10738 #ifdef IN_RING0 10739 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED() 10740 #endif 10741 ) 12580 10742 { 12581 10743 if (pVM->cCpus == 1) … … 12619 10781 if (rcStrict == VINF_SUCCESS) 12620 10782 #if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */ 12621 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged,10783 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, 12622 10784 HM_CHANGED_GUEST_RIP 12623 10785 | HM_CHANGED_GUEST_RFLAGS … … 12626 10788 | HM_CHANGED_GUEST_SS); 12627 10789 #else 12628 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);10790 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 12629 10791 #endif 12630 10792 else if (rcStrict == VINF_IEM_RAISED_XCPT) 12631 10793 { 12632 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);10794 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 12633 10795 rcStrict = VINF_SUCCESS; 12634 10796 } … … 12640 10802 } 12641 10803 12642 STAM_REL_COUNTER_INC(& pVCpu->hm.s.StatExitGuestAC);10804 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestAC); 12643 10805 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 12644 10806 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) )); … … 12659 10821 { 12660 10822 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12661 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestDB);10823 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestDB); 12662 10824 12663 10825 /* … … 12675 10837 if (!pVmxTransient->fIsNestedGuest) 12676 10838 { 12677 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction);10839 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction); 12678 10840 12679 10841 /* … … 12685 10847 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)) 12686 10848 { 12687 Assert( pVCpu->hm.s.fSingleInstruction);10849 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction); 12688 10850 rc = VINF_EM_RAW_GUEST_TRAP; 12689 10851 } … … 12699 10861 * See Intel spec. 27.1 "Architectural State before a VM-Exit". 12700 10862 */ 10863 #ifdef IN_RING0 12701 10864 VMMRZCallRing3Disable(pVCpu); 12702 10865 HM_DISABLE_PREEMPT(pVCpu); … … 12709 10872 HM_RESTORE_PREEMPT(); 12710 10873 VMMRZCallRing3Enable(pVCpu); 10874 #else 10875 /** @todo */ 10876 #endif 12711 10877 12712 10878 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7); … … 12826 10992 { 12827 10993 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12828 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestGP);10994 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestGP); 12829 10995 12830 10996 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12831 10997 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 10998 #ifdef IN_RING0 12832 10999 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared; 12833 11000 if (pVmcsInfoShared->RealMode.fRealOnV86Active) 12834 11001 { /* likely */ } 12835 11002 else 11003 #endif 12836 11004 { 12837 11005 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 12838 Assert(pVCpu->hmr0.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest); 11006 # ifdef IN_RING0 11007 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest); 11008 # else 11009 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest); 11010 # endif 12839 11011 #endif 12840 11012 /* … … 12848 11020 12849 11021 if ( pVmxTransient->fIsNestedGuest 12850 || ! pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv11022 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv 12851 11023 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx)) 12852 11024 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), … … 12857 11029 } 12858 11030 11031 #ifdef IN_RING0 12859 11032 Assert(CPUMIsGuestInRealModeEx(pCtx)); 12860 11033 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest); … … 12874 11047 */ 12875 11048 pVmcsInfoShared->RealMode.fRealOnV86Active = false; 12876 if (HMCanExecuteVmxGuest(pVCpu-> pVMR0, pVCpu, pCtx))11049 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx)) 12877 11050 { 12878 11051 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n")); 12879 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);11052 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 12880 11053 } 12881 11054 else … … 12886 11059 } 12887 11060 else 12888 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);11061 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 12889 11062 } 12890 11063 else if (rcStrict == VINF_IEM_RAISED_XCPT) 12891 11064 { 12892 11065 rcStrict = VINF_SUCCESS; 12893 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);11066 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 12894 11067 } 12895 11068 return VBOXSTRICTRC_VAL(rcStrict); 11069 #endif 12896 11070 } 12897 11071 … … 12911 11085 12912 11086 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 11087 # ifdef IN_RING0 12913 11088 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 12914 11089 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest, … … 12916 11091 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap)); 12917 11092 NOREF(pVmcsInfo); 11093 # endif 12918 11094 #endif 12919 11095 … … 12927 11103 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 12928 11104 AssertRCReturn(rc, rc); 12929 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, p Ctx->cs.Sel, pCtx->rip));11105 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 12930 11106 #endif 12931 11107 … … 12933 11109 switch (uVector) 12934 11110 { 12935 case X86_XCPT_DE: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestDE); break;12936 case X86_XCPT_DB: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestDB); break;12937 case X86_XCPT_BP: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestBP); break;12938 case X86_XCPT_OF: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestOF); break;12939 case X86_XCPT_BR: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestBR); break;12940 case X86_XCPT_UD: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestUD); break;12941 case X86_XCPT_NM: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestOF); break;12942 case X86_XCPT_DF: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestDF); break;12943 case X86_XCPT_TS: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestTS); break;12944 case X86_XCPT_NP: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestNP); break;12945 case X86_XCPT_SS: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestSS); break;12946 case X86_XCPT_GP: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestGP); break;12947 case X86_XCPT_PF: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestPF); break;12948 case X86_XCPT_MF: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestMF); break;12949 case X86_XCPT_AC: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestAC); break;12950 case X86_XCPT_XF: STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestXF); break;11111 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestDE); break; 11112 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestDB); break; 11113 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestBP); break; 11114 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestOF); break; 11115 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestBR); break; 11116 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestUD); break; 11117 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestOF); break; 11118 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestDF); break; 11119 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestTS); break; 11120 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestNP); break; 11121 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestSS); break; 11122 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestGP); break; 11123 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestPF); break; 11124 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestMF); break; 11125 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestAC); break; 11126 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestXF); break; 12951 11127 default: 12952 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitGuestXcpUnk);11128 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestXcpUnk); 12953 11129 break; 12954 11130 } … … 12993 11169 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo)); 12994 11170 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo); 12995 if ( ! pVCpu->hm.s.Event.fPending11171 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending 12996 11172 || uVector == X86_XCPT_PF) 12997 11173 { … … 13012 11188 else if (rcStrict == VINF_HM_DOUBLE_FAULT) 13013 11189 { 13014 Assert( pVCpu->hm.s.Event.fPending);11190 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending); 13015 11191 rcStrict = VINF_SUCCESS; 13016 11192 } … … 13034 11210 { 13035 11211 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13036 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt); 11212 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitExtInt); 11213 11214 #ifdef IN_RING0 13037 11215 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */ 13038 11216 if (VMMR0ThreadCtxHookIsEnabled(pVCpu)) 13039 11217 return VINF_SUCCESS; 13040 11218 return VINF_EM_RAW_INTERRUPT; 11219 #else 11220 return VINF_SUCCESS; 11221 #endif 13041 11222 } 13042 11223 … … 13049 11230 { 13050 11231 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13051 STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatExitXcptNmi, y3);11232 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitXcptNmi, y3); 13052 11233 13053 11234 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient); … … 13065 11246 switch (uExitIntType) 13066 11247 { 11248 #ifdef IN_RING0 /* NMIs should never reach R3. */ 13067 11249 /* 13068 11250 * Host physical NMIs: … … 13079 11261 break; 13080 11262 } 11263 #endif 13081 11264 13082 11265 /* … … 13106 11289 default: 13107 11290 { 13108 pVCpu->hm.s.u32HMError = pVmxTransient->uExitIntInfo;11291 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo; 13109 11292 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE; 13110 11293 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo)); … … 13113 11296 } 13114 11297 13115 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatExitXcptNmi, y3);11298 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitXcptNmi, y3); 13116 11299 return rcStrict; 13117 11300 } … … 13127 11310 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */ 13128 11311 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13129 vmxHCClearIntWindowExitVmcs(pV mcsInfo);11312 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo); 13130 11313 13131 11314 /* Evaluate and deliver pending events and resume guest execution. */ 13132 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitIntWindow);11315 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitIntWindow); 13133 11316 return VINF_SUCCESS; 13134 11317 } … … 13170 11353 13171 11354 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */ 13172 vmxHCClearNmiWindowExitVmcs(pV mcsInfo);11355 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo); 13173 11356 13174 11357 /* Evaluate and deliver pending events and resume guest execution. */ … … 13224 11407 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr); 13225 11408 if (rcStrict == VINF_SUCCESS) 13226 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);11409 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13227 11410 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13228 11411 { 13229 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);11412 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13230 11413 rcStrict = VINF_SUCCESS; 13231 11414 } … … 13243 11426 13244 11427 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 13245 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);11428 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 13246 11429 13247 11430 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 13291 11474 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING) 13292 11475 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false; 13293 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);11476 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13294 11477 } 13295 11478 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13296 11479 { 13297 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);11480 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13298 11481 rcStrict = VINF_SUCCESS; 13299 11482 } … … 13321 11504 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING) 13322 11505 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false; 13323 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);11506 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13324 11507 } 13325 11508 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13326 11509 { 13327 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);11510 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13328 11511 rcStrict = VINF_SUCCESS; 13329 11512 } … … 13410 11593 { 13411 11594 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11595 #ifdef IN_RING0 13412 11596 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop); 11597 #endif 13413 11598 13414 11599 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; … … 13421 11606 13422 11607 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3) 13423 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);11608 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13424 11609 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13425 11610 { 13426 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);11611 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13427 11612 rcStrict = VINF_SUCCESS; 13428 11613 } … … 13448 11633 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr); 13449 11634 if (rcStrict == VINF_SUCCESS) 13450 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);11635 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13451 11636 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13452 11637 { 13453 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);11638 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13454 11639 rcStrict = VINF_SUCCESS; 13455 11640 } … … 13474 11659 if (RT_SUCCESS(rcStrict)) 13475 11660 { 13476 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);11661 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13477 11662 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx)) 13478 11663 rcStrict = VINF_SUCCESS; … … 13511 11696 13512 11697 if (rc != VINF_SUCCESS) 13513 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchHltToR3);11698 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchHltToR3); 13514 11699 return rc; 13515 11700 } … … 13542 11727 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 13543 11728 bool fTimersPending = TMTimerPollBool(pVM, pVCpu); 13544 STAM_REL_COUNTER_INC(& pVCpu->hm.s.StatExitPreemptTimer);11729 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitPreemptTimer); 13545 11730 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS; 13546 11731 } … … 13554 11739 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13555 11740 11741 #ifdef IN_RING0 13556 11742 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13557 11743 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient); … … 13560 11746 13561 11747 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr); 13562 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS11748 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS 13563 11749 : HM_CHANGED_RAISED_XCPT_MASK); 13564 11750 … … 13572 11758 13573 11759 return rcStrict; 11760 #else 11761 return VERR_EM_INTERPRETER; 11762 #endif 13574 11763 } 13575 11764 … … 13607 11796 GCPtrDesc, uType); 13608 11797 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13609 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);11798 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13610 11799 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13611 11800 { 13612 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);11801 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13613 11802 rcStrict = VINF_SUCCESS; 13614 11803 } … … 13659 11848 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc); 13660 11849 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val)); 11850 # ifdef IN_RING0 13661 11851 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging) 13662 11852 { … … 13664 11854 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val)); 13665 11855 } 11856 13666 11857 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL); 11858 # endif 13667 11859 #endif 13668 11860 … … 13784 11976 Log4Func(("ecx=%#RX32\n", idMsr)); 13785 11977 13786 #if def VBOX_STRICT11978 #if defined(VBOX_STRICT) && defined(IN_RING0) 13787 11979 Assert(!pVmxTransient->fIsNestedGuest); 13788 11980 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) … … 13808 12000 13809 12001 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr); 13810 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitRdmsr);12002 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitRdmsr); 13811 12003 if (rcStrict == VINF_SUCCESS) 13812 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);12004 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13813 12005 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13814 12006 { 13815 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);12007 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13816 12008 rcStrict = VINF_SUCCESS; 13817 12009 } … … 13858 12050 13859 12051 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr); 13860 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitWrmsr);12052 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitWrmsr); 13861 12053 13862 12054 if (rcStrict == VINF_SUCCESS) 13863 12055 { 13864 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);12056 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13865 12057 13866 12058 /* If this is an X2APIC WRMSR access, update the APIC state as well. */ … … 13874 12066 * sure APIC state is saved from the VMCS before IEM changes it. 13875 12067 */ 13876 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);12068 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 13877 12069 } 13878 12070 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ … … 13885 12077 * We care about the other bits as well, SCE and NXE. See @bugref{7368}. 13886 12078 */ 13887 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);12079 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS); 13888 12080 } 13889 12081 … … 13893 12085 switch (idMsr) 13894 12086 { 13895 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;13896 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;13897 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;13898 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;13899 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;12087 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; 12088 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break; 12089 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break; 12090 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break; 12091 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break; 13900 12092 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break; 13901 12093 default: 13902 12094 { 12095 #ifdef IN_RING0 13903 12096 if (vmxHCIsLazyGuestMsr(pVCpu, idMsr)) 13904 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);12097 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS); 13905 12098 else if (vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr)) 13906 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 12099 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 12100 #else 12101 AssertMsgFailed(("TODO\n")); 12102 #endif 13907 12103 break; 13908 12104 } 13909 12105 } 13910 12106 } 13911 #if def VBOX_STRICT12107 #if defined(VBOX_STRICT) && defined(IN_RING0) 13912 12108 else 13913 12109 { … … 13957 12153 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13958 12154 { 13959 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);12155 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13960 12156 rcStrict = VINF_SUCCESS; 13961 12157 } … … 14000 12196 * entry so we can just continue execution here. 14001 12197 */ 14002 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitTprBelowThreshold);12198 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitTprBelowThreshold); 14003 12199 return VINF_SUCCESS; 14004 12200 } … … 14017 12213 { 14018 12214 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 14019 STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatExitMovCRx, y2);12215 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitMovCRx, y2); 14020 12216 14021 12217 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; … … 14044 12240 14045 12241 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 12242 #ifdef IN_RING0 14046 12243 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0; 12244 #endif 14047 12245 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual); 14048 12246 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual); … … 14054 12252 * - We are executing in the VM debug loop. 14055 12253 */ 12254 #ifdef IN_RING0 14056 12255 Assert( iCrReg != 3 14057 12256 || !pVM->hmr0.s.fNestedPaging 14058 12257 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx) 14059 12258 || pVCpu->hmr0.s.fUsingDebugLoop); 12259 #else 12260 Assert( iCrReg != 3 12261 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)); 12262 #endif 14060 12263 14061 12264 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */ … … 14067 12270 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 14068 12271 12272 #ifdef IN_RING0 14069 12273 /* 14070 12274 * This is a kludge for handling switches back to real mode when we try to use … … 14089 12293 rcStrict = VINF_EM_RESCHEDULE_REM; 14090 12294 } 12295 #endif 12296 14091 12297 break; 14092 12298 } … … 14106 12312 * - We are executing in the VM debug loop. 14107 12313 */ 12314 #ifdef IN_RING0 14108 12315 Assert( iCrReg != 3 14109 12316 || !pVM->hmr0.s.fNestedPaging 14110 12317 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx) 14111 12318 || pVCpu->hmr0.s.fLeaveDone); 12319 #else 12320 Assert( iCrReg != 3 12321 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)); 12322 #endif 14112 12323 14113 12324 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */ … … 14156 12367 } 14157 12368 14158 Assert(( pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))12369 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS)) 14159 12370 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS)); 14160 12371 Assert(rcStrict != VINF_IEM_RAISED_XCPT); 14161 12372 14162 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatExitMovCRx, y2);12373 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitMovCRx, y2); 14163 12374 NOREF(pVM); 14164 12375 return rcStrict; … … 14173 12384 { 14174 12385 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 14175 STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatExitIO, y1);12386 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitIO, y1); 14176 12387 14177 12388 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 14190 12401 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual); 14191 12402 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 14192 bool const fDbgStepping = pVCpu->hm.s.fSingleInstruction;12403 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction; 14193 12404 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1); 14194 12405 … … 14253 12464 rcStrict = IEMExecOne(pVCpu); 14254 12465 14255 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);12466 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP); 14256 12467 fUpdateRipAlready = true; 14257 12468 } … … 14267 12478 { 14268 12479 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue); 14269 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite); 12480 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitIOWrite); 12481 #ifdef IN_RING0 14270 12482 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE 14271 12483 && !pCtx->eflags.Bits.u1TF) 14272 12484 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal); 12485 #endif 14273 12486 } 14274 12487 else … … 14281 12494 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal); 14282 12495 } 12496 #ifdef IN_RING0 14283 12497 if ( rcStrict == VINF_IOM_R3_IOPORT_READ 14284 12498 && !pCtx->eflags.Bits.u1TF) 14285 12499 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue); 14286 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); 12500 #endif 12501 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitIORead); 14287 12502 } 14288 12503 } … … 14293 12508 { 14294 12509 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr); 14295 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);12510 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP); 14296 12511 } 14297 12512 … … 14303 12518 */ 14304 12519 if (fIOString) 14305 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);12520 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS); 14306 12521 14307 12522 /* … … 14321 12536 || DBGFBpIsHwIoArmed(pVM))) 14322 12537 { 14323 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); 14324 12538 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatDRxIoCheck); 12539 12540 #ifdef IN_RING0 14325 12541 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 14326 12542 VMMRZCallRing3Disable(pVCpu); … … 14336 12552 ASMSetDR6(pCtx->dr[6]); 14337 12553 if (pCtx->dr[7] != uDr7) 14338 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;12554 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7; 14339 12555 14340 12556 vmxHCSetPendingXcptDB(pVCpu); … … 14349 12565 HM_RESTORE_PREEMPT(); 14350 12566 VMMRZCallRing3Enable(pVCpu); 12567 #else 12568 /** @todo */ 12569 #endif 14351 12570 } 14352 12571 } … … 14374 12593 } 14375 12594 #endif 14376 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatExitIO, y1);12595 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitIO, y1); 14377 12596 } 14378 12597 else … … 14383 12602 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 14384 12603 AssertRCReturn(rc2, rc2); 14385 STAM_COUNTER_INC(!fIOString ? fIOWrite ? & pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead14386 : fIOWrite ? & pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);12604 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATE(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATE(pVCpu).StatExitIORead 12605 : fIOWrite ? &VCPU_2_VMXSTATE(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATE(pVCpu).StatExitIOStringRead); 14387 12606 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n", 14388 12607 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, … … 14391 12610 14392 12611 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 14393 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);12612 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 14394 12613 14395 12614 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 14438 12657 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo), 14439 12658 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo))); 14440 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitTaskSwitch);12659 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitTaskSwitch); 14441 12660 return VINF_EM_RAW_INJECT_TRPM_EVENT; 14442 12661 } … … 14444 12663 14445 12664 /* Fall back to the interpreter to emulate the task-switch. */ 14446 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitTaskSwitch);12665 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitTaskSwitch); 14447 12666 return VERR_EM_INTERPRETER; 14448 12667 } … … 14470 12689 { 14471 12690 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 14472 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitApicAccess);12691 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitApicAccess); 14473 12692 14474 12693 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient); … … 14485 12704 { 14486 12705 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */ 14487 if (RT_UNLIKELY( pVCpu->hm.s.Event.fPending))14488 { 14489 STAM_COUNTER_INC(& pVCpu->hm.s.StatInjectInterpret);12706 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending)) 12707 { 12708 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectInterpret); 14490 12709 return VINF_EM_RAW_INJECT_TRPM_EVENT; 14491 12710 } … … 14507 12726 switch (uAccessType) 14508 12727 { 12728 #ifdef IN_RING0 14509 12729 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE: 14510 12730 case VMX_APIC_ACCESS_TYPE_LINEAR_READ: … … 14514 12734 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n")); 14515 12735 14516 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */12736 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */ 14517 12737 GCPhys &= PAGE_BASE_GC_MASK; 14518 12738 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual); … … 14527 12747 || rcStrict == VERR_PAGE_NOT_PRESENT) 14528 12748 { 14529 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS12749 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 14530 12750 | HM_CHANGED_GUEST_APIC_TPR); 14531 12751 rcStrict = VINF_SUCCESS; … … 14533 12753 break; 14534 12754 } 12755 #else 12756 /** @todo */ 12757 #endif 14535 12758 14536 12759 default: … … 14543 12766 14544 12767 if (rcStrict != VINF_SUCCESS) 14545 STAM_COUNTER_INC(& pVCpu->hm.s.StatSwitchApicAccessToR3);12768 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchApicAccessToR3); 14546 12769 return rcStrict; 14547 12770 } … … 14571 12794 } 14572 12795 14573 if ( ! pVCpu->hm.s.fSingleInstruction12796 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction 14574 12797 && !pVmxTransient->fWasHyperDebugStateActive) 14575 12798 { … … 14582 12805 AssertRC(rc); 14583 12806 12807 #ifdef IN_RING0 14584 12808 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */ 14585 12809 VMMRZCallRing3Disable(pVCpu); … … 14592 12816 HM_RESTORE_PREEMPT(); 14593 12817 VMMRZCallRing3Enable(pVCpu); 12818 #else 12819 /** @todo */ 12820 #endif 14594 12821 14595 12822 #ifdef VBOX_WITH_STATISTICS 14596 12823 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient); 14597 12824 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE) 14598 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitDRxWrite);12825 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitDRxWrite); 14599 12826 else 14600 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitDRxRead);12827 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitDRxRead); 14601 12828 #endif 14602 STAM_COUNTER_INC(& pVCpu->hm.s.StatDRxContextSwitch);12829 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatDRxContextSwitch); 14603 12830 return VINF_SUCCESS; 14604 12831 } … … 14623 12850 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual)); 14624 12851 if (RT_SUCCESS(rc)) 14625 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);14626 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitDRxWrite);12852 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7); 12853 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitDRxWrite); 14627 12854 } 14628 12855 else … … 14631 12858 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual), 14632 12859 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual)); 14633 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitDRxRead);12860 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitDRxRead); 14634 12861 } 14635 12862 … … 14652 12879 { 14653 12880 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12881 12882 #ifdef IN_RING0 14654 12883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging); 14655 12884 … … 14671 12900 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again. 14672 12901 */ 14673 if (! pVCpu->hm.s.Event.fPending)12902 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending) 14674 12903 { /* likely */ } 14675 12904 else 14676 12905 { 14677 STAM_COUNTER_INC(& pVCpu->hm.s.StatInjectInterpret);12906 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectInterpret); 14678 12907 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 14679 12908 /** @todo NSTVMX: Think about how this should be handled. */ … … 14729 12958 { 14730 12959 /* Successfully handled MMIO operation. */ 14731 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS12960 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 14732 12961 | HM_CHANGED_GUEST_APIC_TPR); 14733 12962 rcStrict = VINF_SUCCESS; … … 14743 12972 14744 12973 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 14745 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);12974 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 14746 12975 14747 12976 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 14750 12979 } 14751 12980 return rcStrict; 12981 #else 12982 AssertFailed(); 12983 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */ 12984 #endif 14752 12985 } 14753 12986 … … 14760 12993 { 14761 12994 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12995 #ifdef IN_RING0 14762 12996 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging); 14763 12997 … … 14779 13013 * we shall resolve the nested #PF and re-inject the original event. 14780 13014 */ 14781 if ( pVCpu->hm.s.Event.fPending)14782 STAM_COUNTER_INC(& pVCpu->hm.s.StatInjectReflectNPF);13015 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending) 13016 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectReflectNPF); 14783 13017 } 14784 13018 else … … 14805 13039 uErrorCode |= X86_TRAP_PF_P; 14806 13040 14807 PVMCC pVM = pVCpu->CTX_SUFF(pVM);14808 13041 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 14809 13042 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip)); 13043 13044 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 14810 13045 14811 13046 /* … … 14822 13057 { 14823 13058 /* Successfully synced our nested page tables. */ 14824 STAM_COUNTER_INC(& pVCpu->hm.s.StatExitReasonNpf);14825 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);13059 STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitReasonNpf); 13060 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS); 14826 13061 return VINF_SUCCESS; 14827 13062 } 13063 #else 13064 PVM pVM = pVCpu->CTX_SUFF(pVM); 13065 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc); 13066 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient); 13067 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient); 13068 vmxHCImportGuestRip(pVCpu); 13069 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS); 13070 13071 /* 13072 * Ask PGM for information about the given GCPhys. We need to check if we're 13073 * out of sync first. 13074 */ 13075 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false }; 13076 PGMPHYSNEMPAGEINFO Info; 13077 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info, 13078 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State); 13079 if (RT_SUCCESS(rc)) 13080 { 13081 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) 13082 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ)) 13083 { 13084 if (State.fCanResume) 13085 { 13086 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n", 13087 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 13088 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt, 13089 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "", 13090 State.fDidSomething ? "" : " no-change")); 13091 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS), 13092 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc); 13093 return VINF_SUCCESS; 13094 } 13095 } 13096 13097 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n", 13098 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 13099 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt, 13100 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "", 13101 State.fDidSomething ? "" : " no-change")); 13102 } 13103 else 13104 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n", 13105 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 13106 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : "")); 13107 13108 /* 13109 * Emulate the memory access, either access handler or special memory. 13110 */ 13111 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, 13112 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) 13113 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE) 13114 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ), 13115 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc); 13116 #if 0 13117 rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS 13118 | NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES); 13119 AssertRCReturn(rc, rc); 13120 #endif 13121 VBOXSTRICTRC rcStrict; 13122 if (!pExitRec) 13123 { 13124 rcStrict = IEMExecOne(pVCpu); 13125 /** @todo do we need to do anything wrt debugging here? */ 13126 } 13127 else 13128 { 13129 /* Frequent access or probing. */ 13130 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 13131 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 13132 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 13133 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 13134 } 13135 #endif 14828 13136 14829 13137 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); … … 14860 13168 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo); 14861 13169 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 14862 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);13170 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT); 14863 13171 else if (rcStrict == VINF_IEM_RAISED_XCPT) 14864 13172 { 14865 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);13173 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 14866 13174 rcStrict = VINF_SUCCESS; 14867 13175 } … … 14885 13193 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason); 14886 13194 14887 STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatExitVmentry, z);13195 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitVmentry, z); 14888 13196 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH); 14889 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatExitVmentry, z);13197 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitVmentry, z); 14890 13198 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 14891 13199 { 14892 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);13200 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 14893 13201 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 14894 13202 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME; … … 14926 13234 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo); 14927 13235 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 14928 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);13236 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT); 14929 13237 else if (rcStrict == VINF_IEM_RAISED_XCPT) 14930 13238 { 14931 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);13239 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 14932 13240 rcStrict = VINF_SUCCESS; 14933 13241 } … … 14963 13271 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo); 14964 13272 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 14965 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);13273 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 14966 13274 else if (rcStrict == VINF_IEM_RAISED_XCPT) 14967 13275 { 14968 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);13276 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 14969 13277 rcStrict = VINF_SUCCESS; 14970 13278 } … … 15006 13314 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo); 15007 13315 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 15008 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);13316 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 15009 13317 else if (rcStrict == VINF_IEM_RAISED_XCPT) 15010 13318 { 15011 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);13319 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 15012 13320 rcStrict = VINF_SUCCESS; 15013 13321 } … … 15031 13339 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason); 15032 13340 15033 STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatExitVmentry, z);13341 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitVmentry, z); 15034 13342 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME); 15035 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatExitVmentry, z);13343 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitVmentry, z); 15036 13344 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 15037 13345 { 15038 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);13346 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); 15039 13347 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 15040 13348 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME; … … 15078 13386 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo); 15079 13387 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 15080 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);13388 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT); 15081 13389 else if (rcStrict == VINF_IEM_RAISED_XCPT) 15082 13390 { 15083 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);13391 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 15084 13392 rcStrict = VINF_SUCCESS; 15085 13393 } … … 15105 13413 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr); 15106 13414 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 15107 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);13415 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT); 15108 13416 else if (rcStrict == VINF_IEM_RAISED_XCPT) 15109 13417 { 15110 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);13418 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 15111 13419 rcStrict = VINF_SUCCESS; 15112 13420 } … … 15142 13450 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo); 15143 13451 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 15144 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);13452 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT); 15145 13453 else if (rcStrict == VINF_IEM_RAISED_XCPT) 15146 13454 { 15147 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);13455 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 15148 13456 rcStrict = VINF_SUCCESS; 15149 13457 } … … 15178 13486 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo); 15179 13487 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 15180 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);13488 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 15181 13489 else if (rcStrict == VINF_IEM_RAISED_XCPT) 15182 13490 { 15183 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);13491 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 15184 13492 rcStrict = VINF_SUCCESS; 15185 13493 } … … 15295 13603 default: 15296 13604 { 15297 pVCpu->hm.s.u32HMError = pVmxTransient->uExitIntInfo;13605 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo; 15298 13606 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE; 15299 13607 } … … 15634 13942 if (rcStrict == VINF_IEM_RAISED_XCPT) 15635 13943 { 15636 ASMAtomicUoOrU64(& pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);13944 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 15637 13945 rcStrict = VINF_SUCCESS; 15638 13946 }
Note:
See TracChangeset
for help on using the changeset viewer.