Changeset 78220 in vbox for trunk/src/VBox
- Timestamp:
- Apr 20, 2019 4:08:44 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r77712 r78220 3010 3010 3011 3011 /** 3012 * Applies the TSC offset of a nested-guest if any and returns the new TSC3013 * value for the guest (or nested-guest).3012 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the 3013 * nested-guest. 3014 3014 * 3015 3015 * @returns The TSC offset after applying any nested-guest TSC offset. … … 3017 3017 * @param uTicks The guest TSC. 3018 3018 * 3019 * @sa HMApplySvmNstGstTscOffset.3019 * @sa CPUMRemoveNestedGuestTscOffset. 3020 3020 */ 3021 3021 VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks) … … 3033 3033 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 3034 3034 { 3035 /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMHasGuestSvmVmcbCached to save a call. */ 3035 3036 if (!HMHasGuestSvmVmcbCached(pVCpu)) 3036 3037 { … … 3039 3040 } 3040 3041 return HMApplySvmNstGstTscOffset(pVCpu, uTicks); 3042 } 3043 #else 3044 RT_NOREF(pVCpu); 3045 #endif 3046 return uTicks; 3047 } 3048 3049 3050 /** 3051 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the 3052 * guest. 3053 * 3054 * @returns The TSC offset after removing any nested-guest TSC offset. 3055 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 3056 * @param uTicks The nested-guest TSC. 3057 * 3058 * @sa CPUMApplyNestedGuestTscOffset. 3059 */ 3060 VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks) 3061 { 3062 #ifndef IN_RC 3063 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 3064 if (CPUMIsGuestInVmxNonRootMode(pCtx)) 3065 { 3066 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 3067 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING) 3068 return uTicks - pVmcs->u64TscOffset.u; 3069 return uTicks; 3070 } 3071 3072 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 3073 { 3074 /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMRemoveSvmNstGstTscOffset to save a call. */ 3075 if (!HMHasGuestSvmVmcbCached(pVCpu)) 3076 { 3077 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 3078 return uTicks - pVmcb->ctrl.u64TSCOffset; 3079 } 3080 return HMRemoveSvmNstGstTscOffset(pVCpu, uTicks); 3041 3081 } 3042 3082 #else -
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r77589 r78220 799 799 */ 800 800 if (enmGuestMode == PGMMODE_REAL) 801 pVCpu->hm.s.vmx.fWasInRealMode = true; 801 { 802 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 803 pVmcsInfo->fWasInRealMode = true; 804 } 802 805 803 806 # ifdef IN_RING0 … … 814 817 fChanged |= HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS; 815 818 else 816 fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_ CTLS | HM_CHANGED_VMX_EXIT_CTLS;819 fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_EXIT_CTLS; 817 820 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fChanged); 818 821 } -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r77902 r78220 204 204 * using hardware-assisted SVM. 205 205 * 206 * @note If you make any changes to this function, please check if 207 * hmR0SvmNstGstUndoTscOffset() needs adjusting. 208 * 209 * @sa CPUMApplyNestedGuestTscOffset(), hmR0SvmNstGstUndoTscOffset(). 206 * @sa CPUMRemoveNestedGuestTscOffset, HMRemoveSvmNstGstTscOffset. 210 207 */ 211 208 VMM_INT_DECL(uint64_t) HMApplySvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks) … … 216 213 Assert(pVmcbNstGstCache->fCacheValid); 217 214 return uTicks + pVmcbNstGstCache->u64TSCOffset; 215 } 216 217 218 /** 219 * Removes the TSC offset of an SVM nested-guest if any and returns the new TSC 220 * value for the guest. 221 * 222 * @returns The TSC offset after removing any nested-guest TSC offset. 223 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 224 * @param uTicks The nested-guest TSC. 225 * 226 * @remarks This function looks at the VMCB cache rather than directly at the 227 * nested-guest VMCB. The latter may have been modified for executing 228 * using hardware-assisted SVM. 229 * 230 * @sa CPUMApplyNestedGuestTscOffset, HMApplySvmNstGstTscOffset. 231 */ 232 VMM_INT_DECL(uint64_t) HMRemoveSvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks) 233 { 234 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 235 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx); 236 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 237 Assert(pVmcbNstGstCache->fCacheValid); 238 return uTicks - pVmcbNstGstCache->u64TSCOffset; 218 239 } 219 240 -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r77745 r78220 716 716 * (all sorts of RPL & DPL assumptions). 717 717 */ 718 if (pVCpu->hm.s.vmx.fWasInRealMode) 718 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 719 if (pVmcsInfo->fWasInRealMode) 719 720 { 720 721 if (!CPUMIsGuestInV86ModeEx(pCtx)) … … 858 859 859 860 /** 860 * Gets the permission bits for the specified MSR in the specifiedMSR bitmap.861 * 862 * @returns V Box status code.861 * Gets the read and write permission bits for an MSR in an MSR bitmap. 862 * 863 * @returns VMXMSRPM_XXX - the MSR permission. 863 864 * @param pvMsrBitmap Pointer to the MSR bitmap. 864 * @param idMsr The MSR. 865 * @param penmRead Where to store the read permissions. Optional, can be 866 * NULL. 867 * @param penmWrite Where to store the write permissions. Optional, can be 868 * NULL. 869 */ 870 VMM_INT_DECL(int) HMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr, PVMXMSREXITREAD penmRead, 871 PVMXMSREXITWRITE penmWrite) 872 { 873 AssertPtrReturn(pvMsrBitmap, VERR_INVALID_PARAMETER); 874 875 int32_t iBit; 876 uint8_t const *pbMsrBitmap = (uint8_t *)pvMsrBitmap; 865 * @param idMsr The MSR to get permissions for. 866 * 867 * @sa hmR0VmxSetMsrPermission. 868 */ 869 VMM_INT_DECL(uint32_t) HMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr) 870 { 871 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR); 872 873 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap; 877 874 878 875 /* … … 885 882 * 886 883 * A bit corresponding to an MSR within the above range causes a VM-exit 887 * if the bit is 1 on executions of RDMSR/WRMSR. 888 * 889 * If an MSR falls out of the MSR range, it always cause a VM-exit. 884 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of 885 * the MSR range, it always cause a VM-exit. 890 886 * 891 887 * See Intel spec. 24.6.9 "MSR-Bitmap Address". 892 888 */ 893 if (idMsr <= 0x00001fff) 894 iBit = idMsr; 895 else if ( idMsr >= 0xc0000000 896 && idMsr <= 0xc0001fff) 897 { 898 iBit = (idMsr - 0xc0000000); 899 pbMsrBitmap += 0x400; 889 uint32_t const offBitmapRead = 0; 890 uint32_t const offBitmapWrite = 0x800; 891 uint32_t offMsr; 892 uint32_t iBit; 893 if (idMsr <= UINT32_C(0x00001fff)) 894 { 895 offMsr = 0; 896 iBit = idMsr; 897 } 898 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff)) 899 { 900 offMsr = 0x400; 901 iBit = idMsr - UINT32_C(0xc0000000); 900 902 } 901 903 else 902 904 { 903 if (penmRead) 904 *penmRead = VMXMSREXIT_INTERCEPT_READ; 905 if (penmWrite) 906 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE; 907 Log(("CPUMVmxGetMsrPermission: Warning! Out of range MSR %#RX32\n", idMsr)); 908 return VINF_SUCCESS; 909 } 910 911 /* Validate the MSR bit position. */ 912 Assert(iBit <= 0x1fff); 913 914 /* Get the MSR read permissions. */ 915 if (penmRead) 916 { 917 if (ASMBitTest(pbMsrBitmap, iBit)) 918 *penmRead = VMXMSREXIT_INTERCEPT_READ; 919 else 920 *penmRead = VMXMSREXIT_PASSTHRU_READ; 921 } 922 923 /* Get the MSR write permissions. */ 924 if (penmWrite) 925 { 926 if (ASMBitTest(pbMsrBitmap + 0x800, iBit)) 927 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE; 928 else 929 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE; 930 } 931 932 return VINF_SUCCESS; 905 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr)); 906 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR; 907 } 908 909 /* 910 * Get the MSR read permissions. 911 */ 912 uint32_t fRet; 913 uint32_t const offMsrRead = offBitmapRead + offMsr; 914 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite); 915 if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit)) 916 fRet = VMXMSRPM_EXIT_RD; 917 else 918 fRet = VMXMSRPM_ALLOW_RD; 919 920 /* 921 * Get the MSR write permissions. 922 */ 923 uint32_t const offMsrWrite = offBitmapWrite + offMsr; 924 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE); 925 if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit)) 926 fRet |= VMXMSRPM_EXIT_WR; 927 else 928 fRet |= VMXMSRPM_ALLOW_WR; 929 930 Assert(VMXMSRPM_IS_FLAG_VALID(fRet)); 931 return fRet; 933 932 } 934 933 … … 943 942 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes). 944 943 */ 945 VMM_INT_DECL(bool) HMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort, 946 uint8_t cbAccess) 944 VMM_INT_DECL(bool) HMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort, uint8_t cbAccess) 947 945 { 948 946 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4); … … 1017 1015 LogRel(("uFirstPauseLoopTick = %RX64\n", pCtx->hwvirt.vmx.uFirstPauseLoopTick)); 1018 1016 LogRel(("uPrevPauseTick = %RX64\n", pCtx->hwvirt.vmx.uPrevPauseTick)); 1019 LogRel(("u VmentryTick = %RX64\n", pCtx->hwvirt.vmx.uVmentryTick));1017 LogRel(("uEntryTick = %RX64\n", pCtx->hwvirt.vmx.uEntryTick)); 1020 1018 LogRel(("offVirtApicWrite = %#RX16\n", pCtx->hwvirt.vmx.offVirtApicWrite)); 1019 LogRel(("fVirtNmiBlocking = %RTbool\n", pCtx->hwvirt.vmx.fVirtNmiBlocking)); 1021 1020 LogRel(("VMCS cache:\n")); 1022 1021 … … 1243 1242 } 1244 1243 1244 1245 /** 1246 * Gets the active (in use) VMCS info. object for the specified VCPU. 1247 * 1248 * This is either the guest or nested-guest VMCS and need not necessarily pertain to 1249 * the "current" VMCS (in the VMX definition of the term). For instance, if the 1250 * VM-entry failed due to an invalid-guest state, we may have "cleared" the VMCS 1251 * while returning to ring-3. The VMCS info. object for that VMCS would still be 1252 * active and returned so that we could dump the VMCS fields to ring-3 for 1253 * diagnostics. This function is thus only used to distinguish between the 1254 * nested-guest or guest VMCS. 1255 * 1256 * @returns The active VMCS information. 1257 * @param pVCpu The cross context virtual CPU structure. 1258 * 1259 * @thread EMT. 1260 * @remarks This function may be called with preemption or interrupts disabled! 1261 */ 1262 VMM_INT_DECL(PVMXVMCSINFO) hmGetVmxActiveVmcsInfo(PVMCPU pVCpu) 1263 { 1264 if (!pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs) 1265 return &pVCpu->hm.s.vmx.VmcsInfo; 1266 return &pVCpu->hm.s.vmx.VmcsInfoNstGst; 1267 } 1268 1269 1270 /** 1271 * Converts a VMX event type into an appropriate TRPM event type. 1272 * 1273 * @returns TRPM event. 1274 * @param uIntInfo The VMX event. 1275 */ 1276 VMM_INT_DECL(TRPMEVENT) HMVmxEventToTrpmEventType(uint32_t uIntInfo) 1277 { 1278 TRPMEVENT enmTrapType; 1279 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uIntInfo); 1280 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(uIntInfo); 1281 1282 switch (uType) 1283 { 1284 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT: 1285 enmTrapType = TRPM_HARDWARE_INT; 1286 break; 1287 1288 case VMX_ENTRY_INT_INFO_TYPE_NMI: 1289 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT: 1290 enmTrapType = TRPM_TRAP; 1291 break; 1292 1293 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT: /* INT1 (ICEBP). */ 1294 Assert(uVector == X86_XCPT_DB); NOREF(uVector); 1295 enmTrapType = TRPM_SOFTWARE_INT; 1296 break; 1297 1298 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: /* INT3 (#BP) and INTO (#OF) */ 1299 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF); NOREF(uVector); 1300 enmTrapType = TRPM_SOFTWARE_INT; 1301 break; 1302 1303 case VMX_ENTRY_INT_INFO_TYPE_SW_INT: 1304 enmTrapType = TRPM_SOFTWARE_INT; 1305 break; 1306 1307 case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT: /* Shouldn't really happen. */ 1308 default: 1309 AssertMsgFailed(("Invalid trap type %#x\n", uType)); 1310 enmTrapType = TRPM_32BIT_HACK; 1311 break; 1312 } 1313 1314 return enmTrapType; 1315 } 1316 1317 1318 #ifndef IN_RC 1319 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1320 /** 1321 * Notification callback for when a VM-exit happens outside VMX R0 code (e.g. in 1322 * IEM). 1323 * 1324 * @param pVCpu The cross context virtual CPU structure. 1325 * @param pCtx Pointer to the guest-CPU context. 1326 */ 1327 VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx) 1328 { 1329 NOREF(pCtx); 1330 pVCpu->hm.s.vmx.fMergedNstGstCtls = false; 1331 } 1332 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 1333 #endif /* IN_RC */ 1334 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r77899 r78220 162 162 return VERR_VMX_VMEXIT_FAILED; \ 163 163 } while (0) 164 165 /** Enables/disables IEM-only EM execution policy in and from ring-3. */166 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)167 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) \168 do { \169 Log(("%s: Enabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \170 int rcSched = EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); \171 if (rcSched != VINF_SUCCESS) \172 iemSetPassUpStatus(pVCpu, rcSched); \173 return (a_rcStrictRet); \174 } while (0)175 176 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) \177 do { \178 Log(("%s: Disabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \179 int rcSched = EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); \180 if (rcSched != VINF_SUCCESS) \181 iemSetPassUpStatus(pVCpu, rcSched); \182 return (a_rcStrictRet); \183 } while (0)184 # else185 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) do { return (a_rcRet); } while (0)186 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) do { return (a_rcRet); } while (0)187 # endif188 164 189 165 … … 1646 1622 * PreemptTimerShift = 5 1647 1623 * VmcsPreemptTimer = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks) 1648 * VmentryTick= 50000 (TSC at time of VM-entry)1624 * EntryTick = 50000 (TSC at time of VM-entry) 1649 1625 * 1650 1626 * CurTick Delta PreemptTimerVal … … 1670 1646 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 1671 1647 uint64_t const uCurTick = TMCpuTickGetNoCheck(pVCpu); 1672 uint64_t const u VmentryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick;1673 uint64_t const uDelta = uCurTick - u VmentryTick;1648 uint64_t const uEntryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick; 1649 uint64_t const uDelta = uCurTick - uEntryTick; 1674 1650 uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer; 1675 1651 uint32_t const uPreemptTimer = uVmcsPreemptVal … … 1913 1889 1914 1890 /** 1915 * Saves the guest MSRs into the VM-exit auto-store MSRsarea as part of VM-exit.1891 * Saves the guest MSRs into the VM-exit MSR-store area as part of VM-exit. 1916 1892 * 1917 1893 * @returns VBox status code. … … 1948 1924 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount); 1949 1925 1950 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(p AutoMsrArea);1926 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea); 1951 1927 Assert(pMsr); 1952 1928 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++) … … 1980 1956 } 1981 1957 1982 RTGCPHYS const GCPhys AutoMsrArea = pVmcs->u64AddrExitMsrStore.u;1983 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhys AutoMsrArea,1984 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(p AutoMsrArea), cMsrs * sizeof(VMXAUTOMSR));1958 RTGCPHYS const GCPhysVmExitMsrStoreArea = pVmcs->u64AddrExitMsrStore.u; 1959 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmExitMsrStoreArea, 1960 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea), cMsrs * sizeof(VMXAUTOMSR)); 1985 1961 if (RT_SUCCESS(rc)) 1986 1962 { /* likely */ } 1987 1963 else 1988 1964 { 1989 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhys AutoMsrArea, rc));1965 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc)); 1990 1966 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys); 1991 1967 } … … 2256 2232 2257 2233 /** 2258 * Loads the host MSRs from the VM-exit auto-load MSRsarea as part of VM-exit.2234 * Loads the host MSRs from the VM-exit MSR-load area as part of VM-exit. 2259 2235 * 2260 2236 * @returns VBox status code. … … 2291 2267 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount); 2292 2268 2293 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea)); 2294 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u; 2295 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), 2296 GCPhysAutoMsrArea, cMsrs * sizeof(VMXAUTOMSR)); 2269 RTGCPHYS const GCPhysVmExitMsrLoadArea = pVmcs->u64AddrExitMsrLoad.u; 2270 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea), 2271 GCPhysVmExitMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR)); 2297 2272 if (RT_SUCCESS(rc)) 2298 2273 { 2299 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(p AutoMsrArea);2274 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea); 2300 2275 Assert(pMsr); 2301 2276 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++) … … 2331 2306 else 2332 2307 { 2333 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhys AutoMsrArea, rc));2308 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrLoadArea, rc)); 2334 2309 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys); 2335 2310 } … … 2896 2871 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false; 2897 2872 2898 /* Revert any IEM-only nested-guest execution policy if it was set earlier, otherwise return rcStrict. */ 2899 IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(pVCpu, "VM-exit", rcStrict); 2873 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 2874 /* Revert any IEM-only nested-guest execution policy, otherwise return rcStrict. */ 2875 Log(("vmexit: Disabling IEM-only EM execution policy!\n")); 2876 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); 2877 if (rcSched != VINF_SUCCESS) 2878 iemSetPassUpStatus(pVCpu, rcSched); 2879 # endif 2880 return VINF_SUCCESS; 2900 2881 # endif 2901 2882 } … … 4312 4293 * @param offReg The offset of the register being read. 4313 4294 */ 4314 DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg) 4315 { 4316 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t)); 4317 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage); 4318 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)); 4319 uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg); 4295 IEM_STATIC uint32_t iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg) 4296 { 4297 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4298 Assert(pVmcs); 4299 4300 uint32_t uReg; 4301 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg)); 4302 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4303 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg)); 4304 if (RT_FAILURE(rc)) 4305 { 4306 AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 4307 GCPhysVirtApic)); 4308 uReg = 0; 4309 } 4320 4310 return uReg; 4321 4311 } … … 4329 4319 * @param offReg The offset of the register being read. 4330 4320 */ 4331 DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg) 4332 { 4333 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t)); 4334 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage); 4335 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)); 4336 uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg); 4321 IEM_STATIC uint64_t iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg) 4322 { 4323 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4324 Assert(pVmcs); 4325 4326 uint64_t uReg; 4327 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg)); 4328 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4329 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg)); 4330 if (RT_FAILURE(rc)) 4331 { 4332 AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 4333 GCPhysVirtApic)); 4334 uReg = 0; 4335 } 4337 4336 return uReg; 4338 4337 } … … 4346 4345 * @param uReg The register value to write. 4347 4346 */ 4348 DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg) 4349 { 4350 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t)); 4351 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage); 4352 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)); 4353 *(uint32_t *)(pbVirtApic + offReg) = uReg; 4347 IEM_STATIC void iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg) 4348 { 4349 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4350 Assert(pVmcs); 4351 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg)); 4352 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4353 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg)); 4354 if (RT_FAILURE(rc)) 4355 { 4356 AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 4357 GCPhysVirtApic)); 4358 } 4354 4359 } 4355 4360 … … 4362 4367 * @param uReg The register value to write. 4363 4368 */ 4364 DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg) 4365 { 4366 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t)); 4367 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage); 4368 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)); 4369 *(uint64_t *)(pbVirtApic + offReg) = uReg; 4369 IEM_STATIC void iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg) 4370 { 4371 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4372 Assert(pVmcs); 4373 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg)); 4374 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4375 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg)); 4376 if (RT_FAILURE(rc)) 4377 { 4378 AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 4379 GCPhysVirtApic)); 4380 } 4370 4381 } 4371 4382 … … 4380 4391 * @remarks This is based on our APIC device code. 4381 4392 */ 4382 DECLINLINE(void) iemVmxVirtApicSetVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector) 4383 { 4384 Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0); 4385 uint8_t *pbBitmap = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg; 4386 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1; 4387 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f); 4388 ASMAtomicBitSet(pbBitmap + offVector, idxVectorBit); 4393 IEM_STATIC void iemVmxVirtApicSetVectorInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector) 4394 { 4395 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4396 Assert(pVmcs); 4397 uint32_t uReg; 4398 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1; 4399 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4400 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg)); 4401 if (RT_SUCCESS(rc)) 4402 { 4403 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f); 4404 uReg |= RT_BIT(idxVectorBit); 4405 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg)); 4406 if (RT_FAILURE(rc)) 4407 { 4408 AssertMsgFailed(("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4409 uVector, offReg, GCPhysVirtApic)); 4410 } 4411 } 4412 else 4413 { 4414 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4415 uVector, offReg, GCPhysVirtApic)); 4416 } 4389 4417 } 4390 4418 … … 4399 4427 * @remarks This is based on our APIC device code. 4400 4428 */ 4401 DECLINLINE(void) iemVmxVirtApicClearVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector) 4402 { 4403 Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0); 4404 uint8_t *pbBitmap = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg; 4405 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1; 4406 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f); 4407 ASMAtomicBitClear(pbBitmap + offVector, idxVectorBit); 4429 IEM_STATIC void iemVmxVirtApicClearVectorInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector) 4430 { 4431 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4432 Assert(pVmcs); 4433 uint32_t uReg; 4434 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1; 4435 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4436 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg)); 4437 if (RT_SUCCESS(rc)) 4438 { 4439 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f); 4440 uReg &= ~RT_BIT(idxVectorBit); 4441 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg)); 4442 if (RT_FAILURE(rc)) 4443 { 4444 AssertMsgFailed(("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4445 uVector, offReg, GCPhysVirtApic)); 4446 } 4447 } 4448 else 4449 { 4450 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4451 uVector, offReg, GCPhysVirtApic)); 4452 } 4408 4453 } 4409 4454 … … 4833 4878 Assert(offReg < XAPIC_OFF_END + 4); 4834 4879 Assert(pidxHighestBit); 4880 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)); 4835 4881 4836 4882 /* … … 5007 5053 5008 5054 uint8_t uVector = uSvi; 5009 iemVmxVirtApicClearVector (pVCpu, XAPIC_OFF_ISR0, uVector);5055 iemVmxVirtApicClearVectorInReg(pVCpu, XAPIC_OFF_ISR0, uVector); 5010 5056 5011 5057 uVector = 0; … … 5048 5094 uint8_t const uVector = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_LO); 5049 5095 Log2(("self_ipi_virt: uVector=%#x\n", uVector)); 5050 iemVmxVirtApicSetVector (pVCpu, XAPIC_OFF_IRR0, uVector);5096 iemVmxVirtApicSetVectorInReg(pVCpu, XAPIC_OFF_IRR0, uVector); 5051 5097 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus); 5052 5098 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus); … … 6652 6698 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage); 6653 6699 6654 /* Read the Virtual-APIC page. */6655 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));6656 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),6657 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);6658 if (RT_SUCCESS(rc))6659 { /* likely */ }6660 else6661 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);6662 6663 6700 /* TPR threshold without virtual-interrupt delivery. */ 6664 6701 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY) … … 6667 6704 6668 6705 /* TPR threshold and VTPR. */ 6669 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);6670 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);6671 6706 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS) 6672 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY) 6673 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */) 6674 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr); 6707 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)) 6708 { 6709 /* Read the VTPR from the virtual-APIC page. */ 6710 uint8_t u8VTpr; 6711 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &u8VTpr, GCPhysVirtApic + XAPIC_OFF_TPR, sizeof(u8VTpr)); 6712 if (RT_SUCCESS(rc)) 6713 { /* likely */ } 6714 else 6715 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys); 6716 6717 /* Bits 3:0 of the TPR-threshold must not be greater than bits 7:4 of VTPR. */ 6718 if ((uint8_t)RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) <= (u8VTpr & 0xf0)) 6719 { /* likely */ } 6720 else 6721 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr); 6722 } 6675 6723 } 6676 6724 else … … 7009 7057 7010 7058 /** 7011 * Loads the guest MSRs from the VM-entry auto-load MSRsas part of VM-entry.7059 * Loads the guest MSRs from the VM-entry MSR-load area as part of VM-entry. 7012 7060 * 7013 7061 * @returns VBox status code. … … 7047 7095 } 7048 7096 7049 RTGCPHYS const GCPhys AutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;7050 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(p AutoMsrArea),7051 GCPhys AutoMsrArea, cMsrs * sizeof(VMXAUTOMSR));7097 RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u; 7098 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea), 7099 GCPhysVmEntryMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR)); 7052 7100 if (RT_SUCCESS(rc)) 7053 7101 { 7054 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(p AutoMsrArea);7102 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea); 7055 7103 Assert(pMsr); 7056 7104 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++) … … 7089 7137 else 7090 7138 { 7091 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhys AutoMsrArea, rc));7139 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysVmEntryMsrLoadArea, rc)); 7092 7140 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys); 7093 7141 } … … 7298 7346 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER) 7299 7347 { 7300 uint64_t const u VmentryTick = TMCpuTickGetNoCheck(pVCpu);7301 pVCpu->cpum.GstCtx.hwvirt.vmx.u VmentryTick = uVmentryTick;7348 uint64_t const uEntryTick = TMCpuTickGetNoCheck(pVCpu); 7349 pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick = uEntryTick; 7302 7350 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER); 7303 7351 7304 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, u VmentryTick));7352 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uEntryTick)); 7305 7353 } 7306 7354 else … … 7340 7388 break; 7341 7389 7390 case VMX_ENTRY_INT_INFO_TYPE_NMI: 7391 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT: 7392 enmTrapType = TRPM_TRAP; 7393 break; 7394 7342 7395 case VMX_ENTRY_INT_INFO_TYPE_SW_INT: 7343 7396 enmTrapType = TRPM_SOFTWARE_INT; 7344 7397 break; 7345 7398 7346 case VMX_ENTRY_INT_INFO_TYPE_NMI:7347 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT: /* ICEBP. */7348 7399 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: /* #BP and #OF */ 7349 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT: 7350 enmTrapType = TRPM_TRAP; 7400 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF); 7401 enmTrapType = TRPM_SOFTWARE_INT; 7402 break; 7403 7404 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT: /* #DB (INT1/ICEBP). */ 7405 Assert(uVector == X86_XCPT_DB); 7406 enmTrapType = TRPM_SOFTWARE_INT; 7351 7407 break; 7352 7408 … … 7363 7419 TRPMSetErrorCode(pVCpu, uErrCode); 7364 7420 7365 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT7366 && uVector == X86_XCPT_PF)7421 if ( enmTrapType == TRPM_TRAP 7422 && uVector == X86_XCPT_PF) 7367 7423 TRPMSetFaultAddress(pVCpu, GCPtrFaultAddress); 7368 else if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT 7369 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT 7370 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT) 7371 { 7372 AssertMsg( uType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT 7373 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF), 7374 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uType)); 7424 else if (enmTrapType == TRPM_SOFTWARE_INT) 7375 7425 TRPMSetInstrLength(pVCpu, cbInstr); 7376 }7377 7426 7378 7427 return VINF_SUCCESS; … … 7709 7758 { 7710 7759 /* Reschedule to IEM-only execution of the nested-guest or return VINF_SUCCESS. */ 7711 IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(pVCpu, pszInstr, VINF_SUCCESS); 7760 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 7761 Log(("%s: Enabling IEM-only EM execution policy!\n", pszInstr)); 7762 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); 7763 if (rcSched != VINF_SUCCESS) 7764 iemSetPassUpStatus(pVCpu, rcSched); 7765 # endif 7766 return VINF_SUCCESS; 7712 7767 } 7713 7768 … … 7757 7812 { 7758 7813 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap)); 7814 uint32_t fMsrpm = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr); 7759 7815 if (uExitReason == VMX_EXIT_RDMSR) 7760 { 7761 VMXMSREXITREAD enmRead; 7762 int rc = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead, 7763 NULL /* penmWrite */); 7764 AssertRC(rc); 7765 if (enmRead == VMXMSREXIT_INTERCEPT_READ) 7766 return true; 7767 } 7768 else 7769 { 7770 VMXMSREXITWRITE enmWrite; 7771 int rc = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */, 7772 &enmWrite); 7773 AssertRC(rc); 7774 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE) 7775 return true; 7776 } 7777 return false; 7816 return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_RD); 7817 return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_WR); 7778 7818 } 7779 7819 -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r76993 r78220 110 110 uint64_t u64HostCr4; 111 111 /** Host EFER value (set by ring-0 VMX init) */ 112 uint64_t u64Host Efer;112 uint64_t u64HostMsrEfer; 113 113 /** Host SMM monitor control (used for logging/diagnostics) */ 114 114 uint64_t u64HostSmmMonitorCtl; … … 366 366 { 367 367 /* Read CR4 and EFER for logging/diagnostic purposes. */ 368 g_HmR0.hwvirt.u.vmx.u64HostCr4 = ASMGetCR4();369 g_HmR0.hwvirt.u.vmx.u64Host Efer = ASMRdMsr(MSR_K6_EFER);368 g_HmR0.hwvirt.u.vmx.u64HostCr4 = ASMGetCR4(); 369 g_HmR0.hwvirt.u.vmx.u64HostMsrEfer = ASMRdMsr(MSR_K6_EFER); 370 370 371 371 /* Get VMX MSRs for determining VMX features we can ultimately use. */ … … 1174 1174 pVM->hm.s.vmx.cPreemptTimerShift = g_HmR0.hwvirt.u.vmx.cPreemptTimerShift; 1175 1175 pVM->hm.s.vmx.u64HostCr4 = g_HmR0.hwvirt.u.vmx.u64HostCr4; 1176 pVM->hm.s.vmx.u64Host Efer = g_HmR0.hwvirt.u.vmx.u64HostEfer;1176 pVM->hm.s.vmx.u64HostMsrEfer = g_HmR0.hwvirt.u.vmx.u64HostMsrEfer; 1177 1177 pVM->hm.s.vmx.u64HostSmmMonitorCtl = g_HmR0.hwvirt.u.vmx.u64HostSmmMonitorCtl; 1178 1178 HMGetVmxMsrsFromHwvirtMsrs(&g_HmR0.hwvirt.Msrs, &pVM->hm.s.vmx.Msrs); -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r77481 r78220 678 678 ; * @param HCPhysVmcs Physical address of VMCS structure. 679 679 ; */ 680 ;DECLASM(int) VMX ActivateVmcs(RTHCPHYS HCPhysVmcs);680 ;DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs); 681 681 ALIGNCODE(16) 682 BEGINPROC VMX ActivateVmcs682 BEGINPROC VMXLoadVmcs 683 683 %ifdef RT_ARCH_AMD64 684 684 xor rax, rax … … 700 700 %endif 701 701 ret 702 ENDPROC VMX ActivateVmcs702 ENDPROC VMXLoadVmcs 703 703 704 704 … … 709 709 ; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer. 710 710 ; */ 711 ;DECLASM(int) VMXGet ActivatedVmcs(RTHCPHYS *pVMCS);712 BEGINPROC VMXGet ActivatedVmcs711 ;DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pVMCS); 712 BEGINPROC VMXGetCurrentVmcs 713 713 %ifdef RT_OS_OS2 714 714 mov eax, VERR_NOT_SUPPORTED … … 728 728 ret 729 729 %endif 730 ENDPROC VMXGet ActivatedVmcs730 ENDPROC VMXGetCurrentVmcs 731 731 732 732 ;/** … … 867 867 ; load the guest ones when necessary. 868 868 ; 869 ; @cproto DECLASM(int) HMR0VMXStartVMhmR0DumpDescriptorM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCS BATCHCACHE pCache,869 ; @cproto DECLASM(int) HMR0VMXStartVMhmR0DumpDescriptorM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pCache, 870 870 ; PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM); 871 871 ; … … 1302 1302 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any 1303 1303 ; trouble only just less efficient. 1304 mov ecx, [ss:xDX + VMXVMCS BATCHCACHE.Read.cValidEntries]1304 mov ecx, [ss:xDX + VMXVMCSCACHE.Read.cValidEntries] 1305 1305 cmp ecx, 0 ; Can't happen 1306 1306 je %%no_cached_read32 … … 1310 1310 %%cached_read32: 1311 1311 dec xCX 1312 mov eax, [ss:xDX + VMXVMCS BATCHCACHE.Read.aField + xCX * 4]1312 mov eax, [ss:xDX + VMXVMCSCACHE.Read.aField + xCX * 4] 1313 1313 ; Note! This leaves the high 32 bits of the cache entry unmodified!! 1314 vmread [ss:xDX + VMXVMCS BATCHCACHE.Read.aFieldVal + xCX * 8], xAX1314 vmread [ss:xDX + VMXVMCSCACHE.Read.aFieldVal + xCX * 8], xAX 1315 1315 cmp xCX, 0 1316 1316 jnz %%cached_read32 … … 1428 1428 1429 1429 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 1430 mov ecx, [xBX + VMXVMCS BATCHCACHE.Write.cValidEntries]1430 mov ecx, [xBX + VMXVMCSCACHE.Write.cValidEntries] 1431 1431 cmp ecx, 0 1432 1432 je .no_cached_writes … … 1437 1437 ALIGN(16) 1438 1438 .cached_write: 1439 mov eax, [xBX + VMXVMCS BATCHCACHE.Write.aField + xCX * 4]1440 vmwrite xAX, [xBX + VMXVMCS BATCHCACHE.Write.aFieldVal + xCX * 8]1439 mov eax, [xBX + VMXVMCSCACHE.Write.aField + xCX * 4] 1440 vmwrite xAX, [xBX + VMXVMCSCACHE.Write.aFieldVal + xCX * 8] 1441 1441 inc xCX 1442 1442 cmp xCX, xDX 1443 1443 jl .cached_write 1444 1444 1445 mov dword [xBX + VMXVMCS BATCHCACHE.Write.cValidEntries], 01445 mov dword [xBX + VMXVMCSCACHE.Write.cValidEntries], 0 1446 1446 .no_cached_writes: 1447 1447 … … 1629 1629 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any 1630 1630 ; trouble only just less efficient. 1631 mov ecx, [xDX + VMXVMCS BATCHCACHE.Read.cValidEntries]1631 mov ecx, [xDX + VMXVMCSCACHE.Read.cValidEntries] 1632 1632 cmp ecx, 0 ; Can't happen 1633 1633 je %%no_cached_read64 … … 1637 1637 %%cached_read64: 1638 1638 dec xCX 1639 mov eax, [xDX + VMXVMCS BATCHCACHE.Read.aField + xCX * 4]1640 vmread [xDX + VMXVMCS BATCHCACHE.Read.aFieldVal + xCX * 8], xAX1639 mov eax, [xDX + VMXVMCSCACHE.Read.aField + xCX * 4] 1640 vmread [xDX + VMXVMCSCACHE.Read.aFieldVal + xCX * 8], xAX 1641 1641 cmp xCX, 0 1642 1642 jnz %%cached_read64 … … 1737 1737 1738 1738 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 1739 mov ecx, [xBX + VMXVMCS BATCHCACHE.Write.cValidEntries]1739 mov ecx, [xBX + VMXVMCSCACHE.Write.cValidEntries] 1740 1740 cmp ecx, 0 1741 1741 je .no_cached_writes … … 1746 1746 ALIGN(16) 1747 1747 .cached_write: 1748 mov eax, [xBX + VMXVMCS BATCHCACHE.Write.aField + xCX * 4]1749 vmwrite xAX, [xBX + VMXVMCS BATCHCACHE.Write.aFieldVal + xCX * 8]1748 mov eax, [xBX + VMXVMCSCACHE.Write.aField + xCX * 4] 1749 vmwrite xAX, [xBX + VMXVMCSCACHE.Write.aFieldVal + xCX * 8] 1750 1750 inc xCX 1751 1751 cmp xCX, xDX 1752 1752 jl .cached_write 1753 1753 1754 mov dword [xBX + VMXVMCS BATCHCACHE.Write.cValidEntries], 01754 mov dword [xBX + VMXVMCSCACHE.Write.cValidEntries], 0 1755 1755 .no_cached_writes: 1756 1756 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r77906 r78220 4675 4675 4676 4676 /** 4677 * Undoes the TSC offset applied for an SVM nested-guest and returns the TSC4678 * value for the guest.4679 *4680 * @returns The TSC offset after undoing any nested-guest TSC offset.4681 * @param pVCpu The cross context virtual CPU structure of the calling EMT.4682 * @param uTicks The nested-guest TSC.4683 *4684 * @note If you make any changes to this function, please check if4685 * hmR0SvmNstGstUndoTscOffset() needs adjusting.4686 *4687 * @sa HMApplySvmNstGstTscOffset().4688 */4689 DECLINLINE(uint64_t) hmR0SvmNstGstUndoTscOffset(PVMCPU pVCpu, uint64_t uTicks)4690 {4691 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;4692 Assert(pVmcbNstGstCache->fCacheValid);4693 return uTicks - pVmcbNstGstCache->u64TSCOffset;4694 }4695 4696 4697 /**4698 4677 * Performs some essential restoration of state after running guest (or 4699 4678 * nested-guest) code in AMD-V. … … 4727 4706 { 4728 4707 /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMNotifySvmNstGstVmexit(). */ 4729 uint64_t const uGstTsc = hmR0SvmNstGstUndoTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);4708 uint64_t const uGstTsc = HMRemoveSvmNstGstTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset); 4730 4709 TMCpuTickSetLastSeen(pVCpu, uGstTsc); 4731 4710 } … … 6379 6358 } 6380 6359 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6381 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);6382 6360 if (rcStrict != VINF_SUCCESS) 6383 6361 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3); … … 6419 6397 } 6420 6398 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6421 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);6422 6399 return VBOXSTRICTRC_TODO(rcStrict); 6423 6400 } … … 6455 6432 } 6456 6433 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6457 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);6458 6434 return VBOXSTRICTRC_TODO(rcStrict); 6459 6435 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r77590 r78220 44 44 #include "dtrace/VBoxVMM.h" 45 45 46 # define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE 46 47 #ifdef DEBUG_ramshankar 47 48 # define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS 48 49 # define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE 49 # define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE50 50 # define HMVMX_ALWAYS_CHECK_GUEST_STATE 51 51 # define HMVMX_ALWAYS_TRAP_ALL_XCPTS … … 63 63 64 64 /** Determine which tagged-TLB flush handler to use. */ 65 #define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 066 #define HMVMX_FLUSH_TAGGED_TLB_EPT 167 #define HMVMX_FLUSH_TAGGED_TLB_VPID 268 #define HMVMX_FLUSH_TAGGED_TLB_NONE 365 #define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0 66 #define HMVMX_FLUSH_TAGGED_TLB_EPT 1 67 #define HMVMX_FLUSH_TAGGED_TLB_VPID 2 68 #define HMVMX_FLUSH_TAGGED_TLB_NONE 3 69 69 70 70 /** @name HMVMX_READ_XXX … … 72 72 * the guest-CPU or VCPU state but are needed while handling VM-exits. 73 73 */ 74 #define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)75 #define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)76 #define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)77 #define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)78 #define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)79 #define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)80 #define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)81 #define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)74 #define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0) 75 #define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1) 76 #define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2) 77 #define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3) 78 #define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4) 79 #define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5) 80 #define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6) 81 #define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7) 82 82 /** @} */ 83 84 /**85 * States of the VMCS.86 *87 * This does not reflect all possible VMCS states but currently only those88 * needed for maintaining the VMCS consistently even when thread-context hooks89 * are used. Maybe later this can be extended (i.e. Nested Virtualization).90 */91 #define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)92 #define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)93 #define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)94 83 95 84 /** … … 123 112 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking 124 113 * due to bugs in Intel CPUs. 125 * - \#PF need not be intercepted even in real-mode if we have Nested Paging114 * - \#PF need not be intercepted even in real-mode if we have nested paging 126 115 * support. 127 116 */ … … 139 128 /** Profiling macro. */ 140 129 #ifdef HM_PROFILE_EXIT_DISPATCH 141 # define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)142 # define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)130 # define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed) 131 # define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed) 143 132 #else 144 # define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)145 # define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)133 # define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0) 134 # define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0) 146 135 #endif 147 136 … … 163 152 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz))) 164 153 165 /** Macro for importing guest state from the VMCS back into CPUMCTX (intended to be166 * used only from VM-exit handlers). */167 #define HMVMX_CPUMCTX_IMPORT_STATE(a_pVCpu, a_fWhat) (hmR0VmxImportGuestState((a_pVCpu), (a_fWhat)))168 169 154 /** Helper macro for VM-exit handlers called unexpectedly. */ 170 155 #define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_pVmxTransient) \ … … 173 158 return VERR_VMX_UNEXPECTED_EXIT; \ 174 159 } while (0) 175 176 /** Macro for importing segment registers to the VMCS from the guest-CPU context. */177 #ifdef VMX_USE_CACHED_VMCS_ACCESSES178 # define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \179 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \180 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))181 #else182 # define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \183 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \184 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))185 #endif186 187 /** Macro for exporting segment registers to the VMCS from the guest-CPU context. */188 #define HMVMX_EXPORT_SREG(Sel, a_pCtxSelReg) \189 hmR0VmxExportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \190 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))191 160 192 161 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX … … 277 246 /** Whether the VM-entry failed or not. */ 278 247 bool fVMEntryFailed; 248 /** Whether we are currently executing a nested-guest. */ 249 bool fIsNestedGuest; 279 250 /** Alignment. */ 280 uint8_t abAlignment1[ 3];251 uint8_t abAlignment1[2]; 281 252 282 253 /** The VM-entry interruption-information field. */ … … 299 270 /** Whether the hyper debug state was active at the time of VM-exit. */ 300 271 bool fWasHyperDebugStateActive; 301 /** Whether TSC-offsetting should be setupbefore VM-entry. */302 bool fUpdate TscOffsettingAndPreemptTimer;272 /** Whether TSC-offsetting and VMX-preemption timer was updated before VM-entry. */ 273 bool fUpdatedTscOffsettingAndPreemptTimer; 303 274 /** Whether the VM-exit was caused by a page-fault during delivery of a 304 275 * contributory exception or a page-fault. */ … … 307 278 * external interrupt or NMI. */ 308 279 bool fVectoringPF; 280 bool afAlignment0[3]; 281 282 /** The VMCS info. object. */ 283 PVMXVMCSINFO pVmcsInfo; 309 284 } VMXTRANSIENT; 310 285 AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t)); … … 312 287 AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t)); 313 288 AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestDebugStateActive, sizeof(uint64_t)); 289 AssertCompileMemberAlignment(VMXTRANSIENT, pVmcsInfo, sizeof(uint64_t)); 314 290 AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t)); 315 291 /** Pointer to VMX transient state. */ … … 330 306 * @returns Strict VBox status code (i.e. informational status codes too). 331 307 * @param pVCpu The cross context virtual CPU structure. 332 * @param pVmxTransient Pointer to the VMX-transient structure.308 * @param pVmxTransient The VMX-transient structure. 333 309 */ 334 310 #ifndef HMVMX_USE_FUNCTION_TABLE 335 typedef VBOXSTRICTRC 311 typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 336 312 #else 337 typedef DECLCALLBACK(VBOXSTRICTRC) 313 typedef DECLCALLBACK(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 338 314 /** Pointer to VM-exit handler. */ 339 typedef FNVMXEXITHANDLER 315 typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER; 340 316 #endif 341 317 … … 347 323 * @returns VBox status code, no informational status code returned. 348 324 * @param pVCpu The cross context virtual CPU structure. 349 * @param pVmxTransient Pointer to the VMX-transient structure.325 * @param pVmxTransient The VMX-transient structure. 350 326 * 351 327 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the … … 354 330 */ 355 331 #ifndef HMVMX_USE_FUNCTION_TABLE 356 typedef int 332 typedef int FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 357 333 #else 358 typedef FNVMXEXITHANDLER 334 typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC; 359 335 #endif 360 336 … … 363 339 * Internal Functions * 364 340 *********************************************************************************************************************************/ 365 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXTLBFLUSHEPT enmTlbFlush); 366 static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr); 367 static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu); 368 static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat); 369 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode, 370 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState); 371 #if HC_ARCH_BITS == 32 372 static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu); 341 #ifndef HMVMX_USE_FUNCTION_TABLE 342 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 343 # define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC) 344 # define HMVMX_EXIT_NSRC_DECL DECLINLINE(int) 345 #else 346 # define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC) 347 # define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL 373 348 #endif 374 #if ndef HMVMX_USE_FUNCTION_TABLE375 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);376 # define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)377 # define HMVMX_EXIT_NSRC_DECL DECLINLINE(int) 378 #else 379 # define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)380 # define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL 349 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 350 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 351 #endif 352 353 static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint64_t fWhat); 354 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 355 static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu); 381 356 #endif 382 357 … … 384 359 * @{ 385 360 */ 386 static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;387 static FNVMXEXITHANDLER hmR0VmxExitExtInt;388 static FNVMXEXITHANDLER hmR0VmxExitTripleFault;389 static FNVMXEXITHANDLERNSRC hmR0VmxExitInitSignal;390 static FNVMXEXITHANDLERNSRC hmR0VmxExitSipi;391 static FNVMXEXITHANDLERNSRC hmR0VmxExitIoSmi;392 static FNVMXEXITHANDLERNSRC hmR0VmxExitSmi;393 static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindow;394 static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow;395 static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;396 static FNVMXEXITHANDLER hmR0VmxExitCpuid;397 static FNVMXEXITHANDLER hmR0VmxExitGetsec;398 static FNVMXEXITHANDLER hmR0VmxExitHlt;399 static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd;400 static FNVMXEXITHANDLER hmR0VmxExitInvlpg;401 static FNVMXEXITHANDLER hmR0VmxExitRdpmc;402 static FNVMXEXITHANDLER hmR0VmxExitVmcall;361 static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi; 362 static FNVMXEXITHANDLER hmR0VmxExitExtInt; 363 static FNVMXEXITHANDLER hmR0VmxExitTripleFault; 364 static FNVMXEXITHANDLERNSRC hmR0VmxExitInitSignal; 365 static FNVMXEXITHANDLERNSRC hmR0VmxExitSipi; 366 static FNVMXEXITHANDLERNSRC hmR0VmxExitIoSmi; 367 static FNVMXEXITHANDLERNSRC hmR0VmxExitSmi; 368 static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindow; 369 static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow; 370 static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch; 371 static FNVMXEXITHANDLER hmR0VmxExitCpuid; 372 static FNVMXEXITHANDLER hmR0VmxExitGetsec; 373 static FNVMXEXITHANDLER hmR0VmxExitHlt; 374 static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd; 375 static FNVMXEXITHANDLER hmR0VmxExitInvlpg; 376 static FNVMXEXITHANDLER hmR0VmxExitRdpmc; 377 static FNVMXEXITHANDLER hmR0VmxExitVmcall; 403 378 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 404 static FNVMXEXITHANDLER hmR0VmxExitVmclear;405 static FNVMXEXITHANDLER hmR0VmxExitVmlaunch;406 static FNVMXEXITHANDLER hmR0VmxExitVmptrld;407 static FNVMXEXITHANDLER hmR0VmxExitVmptrst;408 static FNVMXEXITHANDLER hmR0VmxExitVmread;409 static FNVMXEXITHANDLER hmR0VmxExitVmresume;410 static FNVMXEXITHANDLER hmR0VmxExitVmwrite;411 static FNVMXEXITHANDLER hmR0VmxExitVmxoff;412 static FNVMXEXITHANDLER hmR0VmxExitVmxon;379 static FNVMXEXITHANDLER hmR0VmxExitVmclear; 380 static FNVMXEXITHANDLER hmR0VmxExitVmlaunch; 381 static FNVMXEXITHANDLER hmR0VmxExitVmptrld; 382 static FNVMXEXITHANDLER hmR0VmxExitVmptrst; 383 static FNVMXEXITHANDLER hmR0VmxExitVmread; 384 static FNVMXEXITHANDLER hmR0VmxExitVmresume; 385 static FNVMXEXITHANDLER hmR0VmxExitVmwrite; 386 static FNVMXEXITHANDLER hmR0VmxExitVmxoff; 387 static FNVMXEXITHANDLER hmR0VmxExitVmxon; 413 388 #endif 414 static FNVMXEXITHANDLER hmR0VmxExitRdtsc;415 static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm;416 static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD;417 static FNVMXEXITHANDLER hmR0VmxExitMovCRx;418 static FNVMXEXITHANDLER hmR0VmxExitMovDRx;419 static FNVMXEXITHANDLER hmR0VmxExitIoInstr;420 static FNVMXEXITHANDLER hmR0VmxExitRdmsr;421 static FNVMXEXITHANDLER hmR0VmxExitWrmsr;422 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState;423 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMsrLoad;424 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUndefined;425 static FNVMXEXITHANDLER hmR0VmxExitMwait;426 static FNVMXEXITHANDLER hmR0VmxExitMtf;427 static FNVMXEXITHANDLER hmR0VmxExitMonitor;428 static FNVMXEXITHANDLER hmR0VmxExitPause;429 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMachineCheck;430 static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold;431 static FNVMXEXITHANDLER hmR0VmxExitApicAccess;432 static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;433 static FNVMXEXITHANDLER hmR0VmxExitEptViolation;434 static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;435 static FNVMXEXITHANDLER hmR0VmxExitRdtscp;436 static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;437 static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvd;438 static FNVMXEXITHANDLER hmR0VmxExitXsetbv;439 static FNVMXEXITHANDLER hmR0VmxExitRdrand;440 static FNVMXEXITHANDLER hmR0VmxExitInvpcid;389 static FNVMXEXITHANDLER hmR0VmxExitRdtsc; 390 static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm; 391 static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD; 392 static FNVMXEXITHANDLER hmR0VmxExitMovCRx; 393 static FNVMXEXITHANDLER hmR0VmxExitMovDRx; 394 static FNVMXEXITHANDLER hmR0VmxExitIoInstr; 395 static FNVMXEXITHANDLER hmR0VmxExitRdmsr; 396 static FNVMXEXITHANDLER hmR0VmxExitWrmsr; 397 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState; 398 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMsrLoad; 399 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUndefined; 400 static FNVMXEXITHANDLER hmR0VmxExitMwait; 401 static FNVMXEXITHANDLER hmR0VmxExitMtf; 402 static FNVMXEXITHANDLER hmR0VmxExitMonitor; 403 static FNVMXEXITHANDLER hmR0VmxExitPause; 404 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMachineCheck; 405 static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold; 406 static FNVMXEXITHANDLER hmR0VmxExitApicAccess; 407 static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess; 408 static FNVMXEXITHANDLER hmR0VmxExitEptViolation; 409 static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig; 410 static FNVMXEXITHANDLER hmR0VmxExitRdtscp; 411 static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer; 412 static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvd; 413 static FNVMXEXITHANDLER hmR0VmxExitXsetbv; 414 static FNVMXEXITHANDLER hmR0VmxExitRdrand; 415 static FNVMXEXITHANDLER hmR0VmxExitInvpcid; 441 416 /** @} */ 442 417 443 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 444 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 445 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 446 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 447 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 448 static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 449 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 450 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu); 418 /** @name Helpers for hardware exceptions VM-exit handlers. 419 * @{ 420 */ 421 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 422 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 423 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 424 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 425 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 426 static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 427 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 428 /** @} */ 451 429 452 430 … … 454 432 * Global Variables * 455 433 *********************************************************************************************************************************/ 434 #ifdef VMX_USE_CACHED_VMCS_ACCESSES 435 static const uint32_t g_aVmcsCacheSegBase[] = 436 { 437 VMX_VMCS_GUEST_ES_BASE_CACHE_IDX, 438 VMX_VMCS_GUEST_CS_BASE_CACHE_IDX, 439 VMX_VMCS_GUEST_SS_BASE_CACHE_IDX, 440 VMX_VMCS_GUEST_DS_BASE_CACHE_IDX, 441 VMX_VMCS_GUEST_FS_BASE_CACHE_IDX, 442 VMX_VMCS_GUEST_GS_BASE_CACHE_IDX 443 }; 444 AssertCompile(RT_ELEMENTS(g_aVmcsCacheSegBase) == X86_SREG_COUNT); 445 #endif 446 static const uint32_t g_aVmcsSegBase[] = 447 { 448 VMX_VMCS_GUEST_ES_BASE, 449 VMX_VMCS_GUEST_CS_BASE, 450 VMX_VMCS_GUEST_SS_BASE, 451 VMX_VMCS_GUEST_DS_BASE, 452 VMX_VMCS_GUEST_FS_BASE, 453 VMX_VMCS_GUEST_GS_BASE 454 }; 455 static const uint32_t g_aVmcsSegSel[] = 456 { 457 VMX_VMCS16_GUEST_ES_SEL, 458 VMX_VMCS16_GUEST_CS_SEL, 459 VMX_VMCS16_GUEST_SS_SEL, 460 VMX_VMCS16_GUEST_DS_SEL, 461 VMX_VMCS16_GUEST_FS_SEL, 462 VMX_VMCS16_GUEST_GS_SEL 463 }; 464 static const uint32_t g_aVmcsSegLimit[] = 465 { 466 VMX_VMCS32_GUEST_ES_LIMIT, 467 VMX_VMCS32_GUEST_CS_LIMIT, 468 VMX_VMCS32_GUEST_SS_LIMIT, 469 VMX_VMCS32_GUEST_DS_LIMIT, 470 VMX_VMCS32_GUEST_FS_LIMIT, 471 VMX_VMCS32_GUEST_GS_LIMIT 472 }; 473 static const uint32_t g_aVmcsSegAttr[] = 474 { 475 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, 476 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, 477 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, 478 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, 479 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, 480 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS 481 }; 482 AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT); 483 AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT); 484 AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT); 485 AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT); 486 456 487 #ifdef HMVMX_USE_FUNCTION_TABLE 457 458 488 /** 459 489 * VMX_EXIT dispatch table. … … 578 608 579 609 /** 610 * Get the CR0 guest/host mask that does not change through the lifetime of a VM. 611 * 612 * Any bit set in this mask is owned by the host/hypervisor and would cause a 613 * VM-exit when modified by the guest. 614 * 615 * @returns The static CR0 guest/host mask. 616 * @param pVCpu The cross context virtual CPU structure. 617 */ 618 DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr0Mask(PVMCPU pVCpu) 619 { 620 /* 621 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and 622 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits. 623 */ 624 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM 625 * enmGuestMode to be in-sync with the current mode. See @bugref{6398} 626 * and @bugref{6944}. */ 627 PVM pVM = pVCpu->CTX_SUFF(pVM); 628 return ( X86_CR0_PE 629 | X86_CR0_NE 630 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP) 631 | X86_CR0_PG 632 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */ 633 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */ 634 | X86_CR0_NW); /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */ 635 } 636 637 638 /** 639 * Gets the CR4 guest/host mask that does not change through the lifetime of a VM. 640 * 641 * Any bit set in this mask is owned by the host/hypervisor and would cause a 642 * VM-exit when modified by the guest. 643 * 644 * @returns The static CR4 guest/host mask. 645 * @param pVCpu The cross context virtual CPU structure. 646 */ 647 DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr4Mask(PVMCPU pVCpu) 648 { 649 /* 650 * We need to look at the host features here (for e.g. OSXSAVE, PCID) because 651 * these bits are reserved on hardware that does not support them. Since the 652 * CPU cannot refer to our virtual CPUID, we need to intercept CR4 changes to 653 * these bits and handle it depending on whether we expose them to the guest. 654 */ 655 PVM pVM = pVCpu->CTX_SUFF(pVM); 656 bool const fXSaveRstor = pVM->cpum.ro.HostFeatures.fXSaveRstor; 657 bool const fPcid = pVM->cpum.ro.HostFeatures.fPcid; 658 return ( X86_CR4_VMXE 659 | X86_CR4_VME 660 | X86_CR4_PAE 661 | X86_CR4_PGE 662 | X86_CR4_PSE 663 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0) 664 | (fPcid ? X86_CR4_PCIDE : 0)); 665 } 666 667 668 /** 669 * Returns whether the the VM-exit MSR-store area differs from the VM-exit MSR-load 670 * area. 671 * 672 * @returns @c true if it's different, @c false otherwise. 673 * @param pVmcsInfo The VMCS info. object. 674 */ 675 DECL_FORCE_INLINE(bool) hmR0VmxIsSeparateExitMsrStoreAreaVmcs(PCVMXVMCSINFO pVmcsInfo) 676 { 677 return RT_BOOL( pVmcsInfo->pvGuestMsrStore != pVmcsInfo->pvGuestMsrLoad 678 && pVmcsInfo->pvGuestMsrStore); 679 } 680 681 682 /** 683 * Adds one or more exceptions to the exception bitmap and commits it to the current 684 * VMCS. 685 * 686 * @returns VBox status code. 687 * @param pVmxTransient The VMX-transient structure. 688 * @param uXcptMask The exception(s) to add. 689 */ 690 static int hmR0VmxAddXcptInterceptMask(PVMXTRANSIENT pVmxTransient, uint32_t uXcptMask) 691 { 692 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 693 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap; 694 if ((uXcptBitmap & uXcptMask) != uXcptMask) 695 { 696 uXcptBitmap |= uXcptMask; 697 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap); 698 AssertRCReturn(rc, rc); 699 pVmcsInfo->u32XcptBitmap = uXcptBitmap; 700 } 701 return VINF_SUCCESS; 702 } 703 704 705 /** 706 * Adds an exception to the exception bitmap and commits it to the current VMCS. 707 * 708 * @returns VBox status code. 709 * @param pVmxTransient The VMX-transient structure. 710 * @param uXcpt The exception to add. 711 */ 712 static int hmR0VmxAddXcptIntercept(PVMXTRANSIENT pVmxTransient, uint8_t uXcpt) 713 { 714 Assert(uXcpt <= X86_XCPT_LAST); 715 return hmR0VmxAddXcptInterceptMask(pVmxTransient, RT_BIT_32(uXcpt)); 716 } 717 718 719 /** 720 * Remove one or more exceptions from the exception bitmap and commits it to the 721 * current VMCS. 722 * 723 * This takes care of not removing the exception intercept if a nested-guest 724 * requires the exception to be intercepted. 725 * 726 * @returns VBox status code. 727 * @param pVCpu The cross context virtual CPU structure. 728 * @param pVmxTransient The VMX-transient structure. 729 * @param uXcptMask The exception(s) to remove. 730 */ 731 static int hmR0VmxRemoveXcptInterceptMask(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uXcptMask) 732 { 733 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 734 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap; 735 if (u32XcptBitmap & uXcptMask) 736 { 737 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 738 if (!pVmxTransient->fIsNestedGuest) 739 { /* likely */ } 740 else 741 { 742 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 743 uXcptMask &= ~pVmcsNstGst->u32XcptBitmap; 744 } 745 #endif 746 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 747 uXcptMask &= ~( RT_BIT(X86_XCPT_BP) 748 | RT_BIT(X86_XCPT_DE) 749 | RT_BIT(X86_XCPT_NM) 750 | RT_BIT(X86_XCPT_TS) 751 | RT_BIT(X86_XCPT_UD) 752 | RT_BIT(X86_XCPT_NP) 753 | RT_BIT(X86_XCPT_SS) 754 | RT_BIT(X86_XCPT_GP) 755 | RT_BIT(X86_XCPT_PF) 756 | RT_BIT(X86_XCPT_MF)); 757 #elif defined(HMVMX_ALWAYS_TRAP_PF) 758 uXcptMask &= ~RT_BIT(X86_XCPT_PF); 759 #endif 760 if (uXcptMask) 761 { 762 /* Validate we are not removing any essential exception intercepts. */ 763 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF))); RT_NOREF(pVCpu); 764 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB))); 765 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC))); 766 767 /* Remove it from the exception bitmap. */ 768 u32XcptBitmap &= ~uXcptMask; 769 770 /* Commit and update the cache if necessary. */ 771 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap) 772 { 773 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap); 774 AssertRCReturn(rc, rc); 775 pVmcsInfo->u32XcptBitmap = u32XcptBitmap; 776 } 777 } 778 } 779 return VINF_SUCCESS; 780 } 781 782 783 /** 784 * Remove an exceptions from the exception bitmap and commits it to the current 785 * VMCS. 786 * 787 * @returns VBox status code. 788 * @param pVCpu The cross context virtual CPU structure. 789 * @param pVmxTransient The VMX-transient structure. 790 * @param uXcpt The exception to remove. 791 */ 792 static int hmR0VmxRemoveXcptIntercept(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint8_t uXcpt) 793 { 794 return hmR0VmxRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt)); 795 } 796 797 798 /** 799 * Loads the VMCS specified by the VMCS info. object. 800 * 801 * @returns VBox status code. 802 * @param pVmcsInfo The VMCS info. object. 803 */ 804 static int hmR0VmxLoadVmcs(PVMXVMCSINFO pVmcsInfo) 805 { 806 Assert(pVmcsInfo); 807 Assert(pVmcsInfo->HCPhysVmcs); 808 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 809 810 if (pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_CLEAR) 811 { 812 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs); 813 if (RT_SUCCESS(rc)) 814 { 815 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT; 816 return VINF_SUCCESS; 817 } 818 return rc; 819 } 820 return VERR_VMX_INVALID_VMCS_LAUNCH_STATE; 821 } 822 823 824 /** 825 * Clears the VMCS specified by the VMCS info. object. 826 * 827 * @returns VBox status code. 828 * @param pVmcsInfo The VMCS info. object. 829 */ 830 static int hmR0VmxClearVmcs(PVMXVMCSINFO pVmcsInfo) 831 { 832 Assert(pVmcsInfo); 833 Assert(pVmcsInfo->HCPhysVmcs); 834 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 835 836 int rc = VMXClearVmcs(pVmcsInfo->HCPhysVmcs); 837 if (RT_SUCCESS(rc)) 838 pVmcsInfo->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR; 839 return rc; 840 } 841 842 843 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 844 /** 845 * Switches the current VMCS to the one specified. 846 * 847 * @returns VBox status code. 848 * @param pVmcsInfoFrom The VMCS info. object we are switching from. 849 * @param pVmcsInfoTo The VMCS info. object we are switching to. 850 * 851 * @remarks Called with interrupts disabled. 852 */ 853 static int hmR0VmxSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo) 854 { 855 Assert(pVmcsInfoFrom); 856 Assert(pVmcsInfoTo); 857 858 /* 859 * Clear the VMCS we are switching out if it has not already been cleared. 860 * This will sync any CPU internal data back to the VMCS. 861 */ 862 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR) 863 { 864 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom); 865 if (RT_SUCCESS(rc)) 866 { /* likely */ } 867 else 868 return rc; 869 } 870 871 /* 872 * Clear the VMCS we are switching to if it has not already been cleared. 873 * This will initialize the VMCS launch state to "clear" required for loading it. 874 * 875 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine". 876 */ 877 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR) 878 { 879 int rc = hmR0VmxClearVmcs(pVmcsInfoTo); 880 if (RT_SUCCESS(rc)) 881 { /* likely */ } 882 else 883 return rc; 884 } 885 886 /* 887 * Finally, load the VMCS we are switching to. 888 */ 889 return hmR0VmxLoadVmcs(pVmcsInfoTo); 890 } 891 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 892 893 894 /** 580 895 * Updates the VM's last error record. 581 896 * … … 605 920 * 606 921 * @returns VBox status code. 607 * @param pVmxTransient Pointer to the VMXtransient structure.922 * @param pVmxTransient The VMX-transient structure. 608 923 * 609 924 * @remarks No-long-jump zone!!! … … 616 931 } 617 932 933 618 934 #ifdef VBOX_STRICT 619 935 /** … … 622 938 * 623 939 * @returns VBox status code. 624 * @param pVmxTransient Pointer to the VMXtransient structure.940 * @param pVmxTransient The VMX-transient structure. 625 941 * 626 942 * @remarks No-long-jump zone!!! … … 639 955 * 640 956 * @returns VBox status code. 641 * @param pVmxTransient Pointer to the VMXtransient structure.957 * @param pVmxTransient The VMX-transient structure. 642 958 * 643 959 * @remarks No-long-jump zone!!! … … 657 973 * 658 974 * @returns VBox status code. 659 * @param pVmxTransient Pointer to the VMXtransient structure.975 * @param pVmxTransient The VMX-transient structure. 660 976 */ 661 977 DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient) … … 676 992 * 677 993 * @returns VBox status code. 678 * @param pVmxTransient Pointer to the VMXtransient structure.994 * @param pVmxTransient The VMX-transient structure. 679 995 */ 680 996 DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient) … … 695 1011 * 696 1012 * @returns VBox status code. 697 * @param pVmxTransient Pointer to the VMXtransient structure.1013 * @param pVmxTransient The VMX-transient structure. 698 1014 */ 699 1015 DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient) … … 714 1030 * 715 1031 * @returns VBox status code. 716 * @param pVmxTransient Pointer to the VMXtransient structure.1032 * @param pVmxTransient The VMX-transient structure. 717 1033 */ 718 1034 DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient) … … 734 1050 * @param pVCpu The cross context virtual CPU structure of the 735 1051 * calling EMT. (Required for the VMCS cache case.) 736 * @param pVmxTransient Pointer to the VMXtransient structure.1052 * @param pVmxTransient The VMX-transient structure. 737 1053 */ 738 1054 DECLINLINE(int) hmR0VmxReadExitQualVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) … … 754 1070 * @param pVCpu The cross context virtual CPU structure of the 755 1071 * calling EMT. (Required for the VMCS cache case.) 756 * @param pVmxTransient Pointer to the VMXtransient structure.1072 * @param pVmxTransient The VMX-transient structure. 757 1073 */ 758 1074 DECLINLINE(int) hmR0VmxReadGuestLinearAddrVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) … … 773 1089 * 774 1090 * @returns VBox status code. 775 * @param pVmxTransient Pointer to the VMXtransient structure.1091 * @param pVmxTransient The VMX-transient structure. 776 1092 * 777 1093 * @remarks No-long-jump zone!!! … … 794 1110 * 795 1111 * @returns VBox status code. 796 * @param pVmxTransient Pointer to the VMXtransient structure.1112 * @param pVmxTransient The VMX-transient structure. 797 1113 */ 798 1114 DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient) … … 826 1142 if (pVM) 827 1143 { 828 /* Write the VMCS revision dwordto the VMXON region. */1144 /* Write the VMCS revision identifier to the VMXON region. */ 829 1145 *(uint32_t *)pvCpuPage = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID); 830 1146 } … … 886 1202 887 1203 /** 888 * Allocates and maps onephysically contiguous page. The allocated page is889 * zero'd out . (Used by various VT-x structures).1204 * Allocates and maps a physically contiguous page. The allocated page is 1205 * zero'd out (used by various VT-x structures). 890 1206 * 891 1207 * @returns IPRT status code. … … 898 1214 static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys) 899 1215 { 900 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER); 901 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER); 902 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER); 903 904 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */); 1216 AssertPtr(pMemObj); 1217 AssertPtr(ppVirt); 1218 AssertPtr(pHCPhys); 1219 int rc = RTR0MemObjAllocCont(pMemObj, X86_PAGE_4K_SIZE, false /* fExecutable */); 905 1220 if (RT_FAILURE(rc)) 906 1221 return rc; 907 1222 *ppVirt = RTR0MemObjAddress(*pMemObj); 908 1223 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */); 909 ASMMemZero32(*ppVirt, PAGE_SIZE);1224 ASMMemZero32(*ppVirt, X86_PAGE_4K_SIZE); 910 1225 return VINF_SUCCESS; 911 1226 } … … 913 1228 914 1229 /** 915 * Frees and unmaps an allocated physical page.1230 * Frees and unmaps an allocated, physical page. 916 1231 * 917 1232 * @param pMemObj Pointer to the ring-0 memory object. … … 926 1241 AssertPtr(ppVirt); 927 1242 AssertPtr(pHCPhys); 928 if (*pMemObj != NIL_RTR0MEMOBJ) 929 { 930 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */); 931 AssertRC(rc); 932 *pMemObj = NIL_RTR0MEMOBJ; 933 *ppVirt = 0; 934 *pHCPhys = 0; 935 } 936 } 937 938 939 /** 940 * Worker function to free VT-x related structures. 1243 /* NULL is valid, accepted and ignored by the free function below. */ 1244 RTR0MemObjFree(*pMemObj, true /* fFreeMappings */); 1245 *pMemObj = NIL_RTR0MEMOBJ; 1246 *ppVirt = NULL; 1247 *pHCPhys = NIL_RTHCPHYS; 1248 } 1249 1250 1251 /** 1252 * Initializes a VMCS info. object. 1253 * 1254 * @param pVmcsInfo The VMCS info. object. 1255 */ 1256 static void hmR0VmxInitVmcsInfo(PVMXVMCSINFO pVmcsInfo) 1257 { 1258 RT_ZERO(*pVmcsInfo); 1259 1260 Assert(pVmcsInfo->hMemObjVmcs == NIL_RTR0MEMOBJ); 1261 Assert(pVmcsInfo->hMemObjMsrBitmap == NIL_RTR0MEMOBJ); 1262 Assert(pVmcsInfo->hMemObjGuestMsrLoad == NIL_RTR0MEMOBJ); 1263 Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ); 1264 Assert(pVmcsInfo->hMemObjHostMsrLoad == NIL_RTR0MEMOBJ); 1265 pVmcsInfo->HCPhysVmcs = NIL_RTHCPHYS; 1266 pVmcsInfo->HCPhysMsrBitmap = NIL_RTHCPHYS; 1267 pVmcsInfo->HCPhysGuestMsrLoad = NIL_RTHCPHYS; 1268 pVmcsInfo->HCPhysGuestMsrStore = NIL_RTHCPHYS; 1269 pVmcsInfo->HCPhysHostMsrLoad = NIL_RTHCPHYS; 1270 pVmcsInfo->HCPhysVirtApic = NIL_RTHCPHYS; 1271 pVmcsInfo->HCPhysEPTP = NIL_RTHCPHYS; 1272 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS; 1273 } 1274 1275 1276 /** 1277 * Frees the VT-x structures for a VMCS info. object. 1278 * 1279 * @param pVM The cross context VM structure. 1280 * @param pVmcsInfo The VMCS info. object. 1281 */ 1282 static void hmR0VmxFreeVmcsInfo(PVM pVM, PVMXVMCSINFO pVmcsInfo) 1283 { 1284 hmR0VmxPageFree(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs); 1285 1286 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1287 hmR0VmxPageFree(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap); 1288 1289 hmR0VmxPageFree(&pVmcsInfo->hMemObjHostMsrLoad, &pVmcsInfo->pvHostMsrLoad, &pVmcsInfo->HCPhysHostMsrLoad); 1290 hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad, &pVmcsInfo->HCPhysGuestMsrLoad); 1291 hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrStore, &pVmcsInfo->pvGuestMsrStore, &pVmcsInfo->HCPhysGuestMsrStore); 1292 1293 hmR0VmxInitVmcsInfo(pVmcsInfo); 1294 } 1295 1296 1297 /** 1298 * Allocates the VT-x structures for a VMCS info. object. 1299 * 1300 * @returns VBox status code. 1301 * @param pVCpu The cross context virtual CPU structure. 1302 * @param pVmcsInfo The VMCS info. object. 1303 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS. 1304 */ 1305 static int hmR0VmxAllocVmcsInfo(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs) 1306 { 1307 PVM pVM = pVCpu->CTX_SUFF(pVM); 1308 1309 /* Allocate the guest VM control structure (VMCS). */ 1310 int rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs); 1311 if (RT_SUCCESS(rc)) 1312 { 1313 if (!fIsNstGstVmcs) 1314 { 1315 /* Get the allocated virtual-APIC page from the virtual APIC device. */ 1316 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM)) 1317 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)) 1318 { 1319 rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic, 1320 NULL /* pR3Ptr */, NULL /* pRCPtr */); 1321 } 1322 } 1323 else 1324 { 1325 Assert(pVmcsInfo->HCPhysVirtApic == NIL_RTHCPHYS); 1326 Assert(!pVmcsInfo->pbVirtApic); 1327 } 1328 1329 if (RT_SUCCESS(rc)) 1330 { 1331 /* 1332 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for 1333 * transparent accesses of specific MSRs. 1334 * 1335 * If the condition for enabling MSR bitmaps changes here, don't forget to 1336 * update HMIsMsrBitmapActive(). 1337 * 1338 * We don't share MSR bitmaps between the guest and nested-guest as we then 1339 * don't need to care about carefully restoring the guest MSR bitmap. 1340 * The guest visible nested-guest MSR bitmap needs to remain unchanged. 1341 * Hence, allocate a separate MSR bitmap for the guest and nested-guest. 1342 */ 1343 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1344 { 1345 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap); 1346 if (RT_SUCCESS(rc)) 1347 ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff)); 1348 } 1349 1350 if (RT_SUCCESS(rc)) 1351 { 1352 /* 1353 * Allocate the VM-entry MSR-load area for the guest MSRs. 1354 * 1355 * Similar to MSR-bitmaps, we do not share the auto MSR-load/store are between 1356 * the guest and nested-guest. 1357 */ 1358 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad, 1359 &pVmcsInfo->HCPhysGuestMsrLoad); 1360 if (RT_SUCCESS(rc)) 1361 { 1362 /* 1363 * We use the same page for VM-entry MSR-load and VM-exit MSR store areas. 1364 * These contain the guest MSRs to load on VM-entry and store on VM-exit. 1365 */ 1366 Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ); 1367 pVmcsInfo->pvGuestMsrStore = pVmcsInfo->pvGuestMsrLoad; 1368 pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad; 1369 1370 /* Allocate the VM-exit MSR-load page for the host MSRs. */ 1371 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjHostMsrLoad, &pVmcsInfo->pvHostMsrLoad, 1372 &pVmcsInfo->HCPhysHostMsrLoad); 1373 } 1374 } 1375 } 1376 } 1377 1378 return rc; 1379 } 1380 1381 1382 /** 1383 * Free all VT-x structures for the VM. 941 1384 * 942 1385 * @returns IPRT status code. … … 945 1388 static void hmR0VmxStructsFree(PVM pVM) 946 1389 { 947 for (VMCPUID i = 0; i < pVM->cCpus; i++)948 {949 PVMCPU pVCpu = &pVM->aCpus[i];950 AssertPtr(pVCpu);951 952 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);953 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);954 955 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)956 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);957 958 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);959 }960 961 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);962 1390 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 963 1391 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch); 964 1392 #endif 965 } 966 967 968 /** 969 * Worker function to allocate VT-x related VM structures. 1393 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess); 1394 1395 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 1396 { 1397 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 1398 PVMXVMCSINFO pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfo; 1399 hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo); 1400 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1401 if (pVM->cpum.ro.GuestFeatures.fVmx) 1402 { 1403 pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst; 1404 hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo); 1405 } 1406 #endif 1407 } 1408 } 1409 1410 1411 /** 1412 * Allocate all VT-x structures for the VM. 970 1413 * 971 1414 * @returns IPRT status code. … … 975 1418 { 976 1419 /* 977 * Initialize members up-front so we can cleanup properly on allocation failure. 978 */ 979 #define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \ 980 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \ 981 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \ 982 pVM->hm.s.vmx.HCPhys##a_Name = 0; 983 984 #define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \ 985 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \ 986 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \ 987 pVCpu->hm.s.vmx.HCPhys##a_Name = 0; 988 1420 * Sanity check the VMCS size reported by the CPU as we assume 4KB allocations. 1421 * The VMCS size cannot be more than 4096 bytes. 1422 * 1423 * See Intel spec. Appendix A.1 "Basic VMX Information". 1424 */ 1425 uint32_t const cbVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_SIZE); 1426 if (cbVmcs <= X86_PAGE_4K_SIZE) 1427 { /* likely */ } 1428 else 1429 { 1430 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE; 1431 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 1432 } 1433 1434 /* 1435 * Initialize/check members up-front so we can cleanup en masse on allocation failures. 1436 */ 989 1437 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 990 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv); 1438 Assert(pVM->hm.s.vmx.hMemObjScratch == NIL_RTR0MEMOBJ); 1439 Assert(pVM->hm.s.vmx.pbScratch == NULL); 1440 pVM->hm.s.vmx.HCPhysScratch = NIL_RTHCPHYS; 991 1441 #endif 992 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb); 993 994 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus)); 995 for (VMCPUID i = 0; i < pVM->cCpus; i++) 996 { 997 PVMCPU pVCpu = &pVM->aCpus[i]; 998 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv); 999 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv); 1000 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv); 1001 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv); 1002 } 1003 #undef VMXLOCAL_INIT_VMCPU_MEMOBJ 1004 #undef VMXLOCAL_INIT_VM_MEMOBJ 1005 1006 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */ 1007 AssertReturnStmt(RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_SIZE) <= PAGE_SIZE, 1008 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE, 1009 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO); 1010 1011 /* 1012 * Allocate all the VT-x structures. 1442 1443 Assert(pVM->hm.s.vmx.hMemObjApicAccess == NIL_RTR0MEMOBJ); 1444 Assert(pVM->hm.s.vmx.pbApicAccess == NULL); 1445 pVM->hm.s.vmx.HCPhysApicAccess = NIL_RTHCPHYS; 1446 1447 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 1448 { 1449 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 1450 hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfo); 1451 hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfoNstGst); 1452 } 1453 1454 /* 1455 * Allocate per-VM VT-x structures. 1013 1456 */ 1014 1457 int rc = VINF_SUCCESS; 1015 1458 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 1459 /* Allocate crash-dump magic scratch page. */ 1016 1460 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch); 1017 1461 if (RT_FAILURE(rc)) 1018 goto cleanup; 1462 { 1463 hmR0VmxStructsFree(pVM); 1464 return rc; 1465 } 1019 1466 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic"); 1020 1467 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef); … … 1027 1474 &pVM->hm.s.vmx.HCPhysApicAccess); 1028 1475 if (RT_FAILURE(rc)) 1029 goto cleanup; 1476 { 1477 hmR0VmxStructsFree(pVM); 1478 return rc; 1479 } 1030 1480 } 1031 1481 … … 1033 1483 * Initialize per-VCPU VT-x structures. 1034 1484 */ 1035 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1036 { 1037 PVMCPU pVCpu = &pVM->aCpus[i]; 1038 AssertPtr(pVCpu); 1039 1040 /* Allocate the VM control structure (VMCS). */ 1041 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs); 1042 if (RT_FAILURE(rc)) 1043 goto cleanup; 1044 1045 /* Get the allocated virtual-APIC page from the APIC device for transparent TPR accesses. */ 1046 if ( PDMHasApic(pVM) 1047 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)) 1048 { 1049 rc = APICGetApicPageForCpu(pVCpu, &pVCpu->hm.s.vmx.HCPhysVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, 1050 NULL /* pR3Ptr */, NULL /* pRCPtr */); 1051 if (RT_FAILURE(rc)) 1052 goto cleanup; 1053 } 1054 1055 /* 1056 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for 1057 * transparent accesses of specific MSRs. 1058 * 1059 * If the condition for enabling MSR bitmaps changes here, don't forget to 1060 * update HMAreMsrBitmapsAvailable(). 1061 */ 1062 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1063 { 1064 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, 1065 &pVCpu->hm.s.vmx.HCPhysMsrBitmap); 1066 if (RT_FAILURE(rc)) 1067 goto cleanup; 1068 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff)); 1069 } 1070 1071 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */ 1072 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr); 1073 if (RT_FAILURE(rc)) 1074 goto cleanup; 1075 1076 /* Allocate the VM-exit MSR-load page for the host MSRs. */ 1077 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr); 1078 if (RT_FAILURE(rc)) 1079 goto cleanup; 1485 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 1486 { 1487 /* Allocate the guest VMCS structures. */ 1488 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 1489 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */); 1490 if (RT_SUCCESS(rc)) 1491 { 1492 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1493 /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */ 1494 if (pVM->cpum.ro.GuestFeatures.fVmx) 1495 { 1496 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */); 1497 if (RT_SUCCESS(rc)) 1498 { /* likely */ } 1499 else 1500 break; 1501 } 1502 #endif 1503 } 1504 else 1505 break; 1506 } 1507 1508 if (RT_FAILURE(rc)) 1509 { 1510 hmR0VmxStructsFree(pVM); 1511 return rc; 1080 1512 } 1081 1513 1082 1514 return VINF_SUCCESS; 1083 1084 cleanup: 1085 hmR0VmxStructsFree(pVM); 1086 return rc; 1087 } 1088 1089 1090 /** 1091 * Does global VT-x initialization (called during module initialization). 1092 * 1093 * @returns VBox status code. 1094 */ 1095 VMMR0DECL(int) VMXR0GlobalInit(void) 1096 { 1097 #ifdef HMVMX_USE_FUNCTION_TABLE 1098 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers)); 1099 # ifdef VBOX_STRICT 1100 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++) 1101 Assert(g_apfnVMExitHandlers[i]); 1102 # endif 1515 } 1516 1517 1518 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1519 /** 1520 * Returns whether an MSR at the given MSR-bitmap offset is intercepted or not. 1521 * 1522 * @returns @c true if the MSR is intercepted, @c false otherwise. 1523 * @param pvMsrBitmap The MSR bitmap. 1524 * @param offMsr The MSR byte offset. 1525 * @param iBit The bit offset from the byte offset. 1526 */ 1527 DECLINLINE(bool) hmR0VmxIsMsrBitSet(const void *pvMsrBitmap, uint16_t offMsr, int32_t iBit) 1528 { 1529 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap; 1530 Assert(pbMsrBitmap); 1531 Assert(offMsr + (iBit >> 3) <= X86_PAGE_4K_SIZE); 1532 return ASMBitTest(pbMsrBitmap + offMsr, iBit); 1533 } 1103 1534 #endif 1104 return VINF_SUCCESS; 1105 } 1106 1107 1108 /** 1109 * Does global VT-x termination (called during module termination). 1110 */ 1111 VMMR0DECL(void) VMXR0GlobalTerm() 1112 { 1113 /* Nothing to do currently. */ 1114 } 1115 1116 1117 /** 1118 * Sets up and activates VT-x on the current CPU. 1119 * 1120 * @returns VBox status code. 1121 * @param pHostCpu The HM physical-CPU structure. 1122 * @param pVM The cross context VM structure. Can be 1123 * NULL after a host resume operation. 1124 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a 1125 * fEnabledByHost is @c true). 1126 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if 1127 * @a fEnabledByHost is @c true). 1128 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to 1129 * enable VT-x on the host. 1130 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs. 1131 */ 1132 VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 1133 PCSUPHWVIRTMSRS pHwvirtMsrs) 1134 { 1135 Assert(pHostCpu); 1136 Assert(pHwvirtMsrs); 1137 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1138 1139 /* Enable VT-x if it's not already enabled by the host. */ 1140 if (!fEnabledByHost) 1141 { 1142 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage); 1143 if (RT_FAILURE(rc)) 1144 return rc; 1145 } 1146 1147 /* 1148 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been 1149 * using EPTPs) so we don't retain any stale guest-physical mappings which won't get 1150 * invalidated when flushing by VPID. 1151 */ 1152 if (pHwvirtMsrs->u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS) 1153 { 1154 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXTLBFLUSHEPT_ALL_CONTEXTS); 1155 pHostCpu->fFlushAsidBeforeUse = false; 1156 } 1157 else 1158 pHostCpu->fFlushAsidBeforeUse = true; 1159 1160 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */ 1161 ++pHostCpu->cTlbFlushes; 1162 1163 return VINF_SUCCESS; 1164 } 1165 1166 1167 /** 1168 * Deactivates VT-x on the current CPU. 1169 * 1170 * @returns VBox status code. 1171 * @param pvCpuPage Pointer to the VMXON region. 1172 * @param HCPhysCpuPage Physical address of the VMXON region. 1173 * 1174 * @remarks This function should never be called when SUPR0EnableVTx() or 1175 * similar was used to enable VT-x on the host. 1176 */ 1177 VMMR0DECL(int) VMXR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 1178 { 1179 RT_NOREF2(pvCpuPage, HCPhysCpuPage); 1180 1181 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1182 return hmR0VmxLeaveRootMode(); 1183 } 1184 1185 1186 /** 1187 * Sets the permission bits for the specified MSR in the MSR bitmap. 1188 * 1189 * @param pVCpu The cross context virtual CPU structure. 1190 * @param uMsr The MSR value. 1191 * @param enmRead Whether reading this MSR causes a VM-exit. 1192 * @param enmWrite Whether writing this MSR causes a VM-exit. 1193 */ 1194 static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite) 1195 { 1196 int32_t iBit; 1197 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap; 1198 1199 /* 1200 * MSR Layout: 1535 1536 1537 /** 1538 * Sets the permission bits for the specified MSR in the given MSR bitmap. 1539 * 1540 * If the passed VMCS is a nested-guest VMCS, this function ensures that the 1541 * read/write intercept is cleared from the MSR bitmap used for hardware-assisted 1542 * VMX execution of the nested-guest, only if nested-guest is also not intercepting 1543 * the read/write access of this MSR. 1544 * 1545 * @param pVmcsInfo The VMCS info. object. 1546 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS. 1547 * @param idMsr The MSR value. 1548 * @param fMsrpm The MSR permissions (see VMXMSRPM_XXX). This must 1549 * include both a read -and- a write permission! 1550 * 1551 * @sa HMGetVmxMsrPermission. 1552 */ 1553 static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm) 1554 { 1555 uint8_t *pbMsrBitmap = (uint8_t *)pVmcsInfo->pvMsrBitmap; 1556 Assert(pbMsrBitmap); 1557 Assert(VMXMSRPM_IS_FLAG_VALID(fMsrpm)); 1558 1559 /* 1560 * MSR-bitmap Layout: 1201 1561 * Byte index MSR range Interpreted as 1202 1562 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits. … … 1206 1566 * 1207 1567 * A bit corresponding to an MSR within the above range causes a VM-exit 1208 * if the bit is 1 on executions of RDMSR/WRMSR. 1209 * 1210 * If an MSR falls out of the MSR range, it always cause a VM-exit. 1568 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of 1569 * the MSR range, it always cause a VM-exit. 1211 1570 * 1212 1571 * See Intel spec. 24.6.9 "MSR-Bitmap Address". 1213 1572 */ 1214 if (uMsr <= 0x00001fff) 1215 iBit = uMsr; 1216 else if (uMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff)) 1217 { 1218 iBit = uMsr - UINT32_C(0xc0000000); 1219 pbMsrBitmap += 0x400; 1573 uint16_t const offBitmapRead = 0; 1574 uint16_t const offBitmapWrite = 0x800; 1575 uint16_t offMsr; 1576 int32_t iBit; 1577 if (idMsr <= UINT32_C(0x00001fff)) 1578 { 1579 offMsr = 0; 1580 iBit = idMsr; 1581 } 1582 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff)) 1583 { 1584 offMsr = 0x400; 1585 iBit = idMsr - UINT32_C(0xc0000000); 1220 1586 } 1221 1587 else 1222 AssertMsgFailedReturnVoid(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr)); 1223 1224 Assert(iBit <= 0x1fff); 1225 if (enmRead == VMXMSREXIT_INTERCEPT_READ) 1226 ASMBitSet(pbMsrBitmap, iBit); 1588 AssertMsgFailedReturnVoid(("Invalid MSR %#RX32\n", idMsr)); 1589 1590 /* 1591 * Set the MSR read permission. 1592 */ 1593 uint16_t const offMsrRead = offBitmapRead + offMsr; 1594 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite); 1595 if (fMsrpm & VMXMSRPM_ALLOW_RD) 1596 { 1597 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1598 bool const fClear = !fIsNstGstVmcs ? true 1599 : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), offMsrRead, iBit); 1600 #else 1601 RT_NOREF2(pVCpu, fIsNstGstVmcs); 1602 bool const fClear = true; 1603 #endif 1604 if (fClear) 1605 ASMBitClear(pbMsrBitmap + offMsrRead, iBit); 1606 } 1227 1607 else 1228 ASMBitClear(pbMsrBitmap, iBit); 1229 1230 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE) 1231 ASMBitSet(pbMsrBitmap + 0x800, iBit); 1608 ASMBitSet(pbMsrBitmap + offMsrRead, iBit); 1609 1610 /* 1611 * Set the MSR write permission. 1612 */ 1613 uint16_t const offMsrWrite = offBitmapWrite + offMsr; 1614 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE); 1615 if (fMsrpm & VMXMSRPM_ALLOW_WR) 1616 { 1617 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1618 bool const fClear = !fIsNstGstVmcs ? true 1619 : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), offMsrWrite, iBit); 1620 #else 1621 RT_NOREF2(pVCpu, fIsNstGstVmcs); 1622 bool const fClear = true; 1623 #endif 1624 if (fClear) 1625 ASMBitClear(pbMsrBitmap + offMsrWrite, iBit); 1626 } 1232 1627 else 1233 ASMBit Clear(pbMsrBitmap + 0x800, iBit);1628 ASMBitSet(pbMsrBitmap + offMsrWrite, iBit); 1234 1629 } 1235 1630 … … 1241 1636 * @returns VBox status code. 1242 1637 * @param pVCpu The cross context virtual CPU structure. 1638 * @param pVmcsInfo The VMCS info. object. 1243 1639 * @param cMsrs The number of MSRs. 1244 1640 */ 1245 static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)1641 static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t cMsrs) 1246 1642 { 1247 1643 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ 1248 uint64_t const uVmxMiscMsr = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc; 1249 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(uVmxMiscMsr); 1250 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs)) 1251 { 1252 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs)); 1644 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc); 1645 if (RT_UNLIKELY(cMsrs >= cMaxSupportedMsrs)) 1646 { 1647 LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs)); 1253 1648 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE; 1254 1649 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 1255 1650 } 1256 1651 1257 /* Update number of guest MSRs to load/store across the world-switch. */ 1258 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); 1259 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); 1260 1261 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */ 1262 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); 1652 /* Commit the MSR counts to the VMCS and update the cache. */ 1653 int rc = VINF_SUCCESS; 1654 if (pVmcsInfo->cEntryMsrLoad != cMsrs) 1655 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); 1656 if (pVmcsInfo->cExitMsrStore != cMsrs) 1657 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); 1658 if (pVmcsInfo->cExitMsrLoad != cMsrs) 1659 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); 1263 1660 AssertRCReturn(rc, rc); 1264 1661 1265 /* Update the VCPU's copy of the MSR count. */ 1266 pVCpu->hm.s.vmx.cMsrs = cMsrs; 1662 pVmcsInfo->cEntryMsrLoad = cMsrs; 1663 pVmcsInfo->cExitMsrStore = cMsrs; 1664 pVmcsInfo->cExitMsrLoad = cMsrs; 1267 1665 1268 1666 return VINF_SUCCESS; … … 1277 1675 * @returns VBox status code. 1278 1676 * @param pVCpu The cross context virtual CPU structure. 1279 * @param uMsr The MSR. 1677 * @param pVmxTransient The VMX-transient structure. 1678 * @param idMsr The MSR. 1280 1679 * @param uGuestMsrValue Value of the guest MSR. 1680 * @param fSetReadWrite Whether to set the guest read/write access of this 1681 * MSR (thus not causing a VM-exit). 1281 1682 * @param fUpdateHostMsr Whether to update the value of the host MSR if 1282 1683 * necessary. 1283 * @param pfAddedAndUpdated Where to store whether the MSR was added -and- 1284 * its value was updated. Optional, can be NULL. 1285 */ 1286 static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr, 1287 bool *pfAddedAndUpdated) 1288 { 1289 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1290 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs; 1291 uint32_t i; 1684 */ 1685 static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue, 1686 bool fSetReadWrite, bool fUpdateHostMsr) 1687 { 1688 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 1689 bool const fIsNstGstVmcs = pVmxTransient->fIsNestedGuest; 1690 PVMXAUTOMSR pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad; 1691 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad; 1692 uint32_t i; 1693 1694 /* Paranoia. */ 1695 Assert(pGuestMsrLoad); 1696 1697 /* Check if the MSR already exists in the VM-entry MSR-load area. */ 1292 1698 for (i = 0; i < cMsrs; i++) 1293 1699 { 1294 if (pGuestMsr ->u32Msr == uMsr)1700 if (pGuestMsrLoad->u32Msr == idMsr) 1295 1701 break; 1296 pGuestMsr ++;1702 pGuestMsrLoad++; 1297 1703 } 1298 1704 … … 1300 1706 if (i == cMsrs) 1301 1707 { 1708 /* The MSR does not exist, bump the MSR coun to make room for the new MSR. */ 1302 1709 ++cMsrs; 1303 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs); 1304 AssertMsgRCReturn(rc, ("hmR0VmxAddAutoLoadStoreMsr: Insufficient space to add MSR %u\n", uMsr), rc); 1305 1306 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */ 1307 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1308 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 1710 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs); 1711 AssertMsgRCReturn(rc, ("Insufficient space to add MSR to VM-entry MSR-load/store area %u\n", idMsr), rc); 1712 1713 /* Set the guest to read/write this MSR without causing VM-exits. */ 1714 if ( fSetReadWrite 1715 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)) 1716 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 1309 1717 1310 1718 fAdded = true; 1311 1719 } 1312 1720 1313 /* Update the MSR values in the auto-load/store MSR area. */ 1314 pGuestMsr->u32Msr = uMsr; 1315 pGuestMsr->u64Value = uGuestMsrValue; 1316 1317 /* Create/update the MSR slot in the host MSR area. */ 1318 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1721 /* Update the MSR value for the newly added or already existing MSR. */ 1722 pGuestMsrLoad->u32Msr = idMsr; 1723 pGuestMsrLoad->u64Value = uGuestMsrValue; 1724 1725 /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */ 1726 if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo)) 1727 { 1728 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 1729 pGuestMsrStore += i; 1730 pGuestMsrStore->u32Msr = idMsr; 1731 pGuestMsrStore->u64Value = 0; 1732 } 1733 1734 /* Update the corresponding slot in the host MSR area. */ 1735 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 1736 Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad && pHostMsr != pVmcsInfo->pvGuestMsrStore); 1319 1737 pHostMsr += i; 1320 pHostMsr->u32Msr = uMsr; 1321 1322 /* 1323 * Update the host MSR only when requested by the caller AND when we're 1324 * adding it to the auto-load/store area. Otherwise, it would have been 1325 * updated by hmR0VmxExportHostMsrs(). We do this for performance reasons. 1326 */ 1327 bool fUpdatedMsrValue = false; 1738 pHostMsr->u32Msr = idMsr; 1739 1740 /* 1741 * Only if the caller requests to update the host MSR value AND we've newly added the 1742 * MSR to the host MSR area do we actually update the value. Otherwise, it will be 1743 * updated by hmR0VmxUpdateAutoLoadHostMsrs(). 1744 * 1745 * We do this for performance reasons since reading MSRs may be quite expensive. 1746 */ 1328 1747 if ( fAdded 1329 1748 && fUpdateHostMsr) … … 1332 1751 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1333 1752 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr); 1334 fUpdatedMsrValue = true; 1335 } 1336 1337 if (pfAddedAndUpdated) 1338 *pfAddedAndUpdated = fUpdatedMsrValue; 1753 } 1339 1754 return VINF_SUCCESS; 1340 1755 } … … 1346 1761 * 1347 1762 * @returns VBox status code. 1348 * @param pVCpu The cross context virtual CPU structure. 1349 * @param uMsr The MSR. 1350 */ 1351 static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr) 1352 { 1353 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1354 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs; 1763 * @param pVCpu The cross context virtual CPU structure. 1764 * @param pVmxTransient The VMX-transient structure. 1765 * @param idMsr The MSR. 1766 */ 1767 static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t idMsr) 1768 { 1769 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 1770 bool const fIsNstGstVmcs = pVmxTransient->fIsNestedGuest; 1771 PVMXAUTOMSR pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad; 1772 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad; 1773 1774 bool const fSeparateExitMsrStorePage = hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo); 1355 1775 for (uint32_t i = 0; i < cMsrs; i++) 1356 1776 { 1357 1777 /* Find the MSR. */ 1358 if (pGuestMsr ->u32Msr == uMsr)1778 if (pGuestMsrLoad->u32Msr == idMsr) 1359 1779 { 1360 1780 /* If it's the last MSR, simply reduce the count. */ … … 1365 1785 } 1366 1786 1367 /* Remove it by swapping the last MSR in place of it, and reducing the count. */ 1368 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1369 pLastGuestMsr += cMsrs - 1; 1370 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr; 1371 pGuestMsr->u64Value = pLastGuestMsr->u64Value; 1372 1373 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1374 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1375 pLastHostMsr += cMsrs - 1; 1376 pHostMsr->u32Msr = pLastHostMsr->u32Msr; 1377 pHostMsr->u64Value = pLastHostMsr->u64Value; 1787 /* Remove it by copying the last MSR in place of it, and reducing the count. */ 1788 PVMXAUTOMSR pLastGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad; 1789 pLastGuestMsrLoad += cMsrs - 1; 1790 pGuestMsrLoad->u32Msr = pLastGuestMsrLoad->u32Msr; 1791 pGuestMsrLoad->u64Value = pLastGuestMsrLoad->u64Value; 1792 1793 /* Remove it from the VM-exit MSR-store area if we are using a different page. */ 1794 if (fSeparateExitMsrStorePage) 1795 { 1796 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 1797 PVMXAUTOMSR pLastGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 1798 pGuestMsrStore += i; 1799 pLastGuestMsrStore += cMsrs - 1; 1800 Assert(pGuestMsrStore->u32Msr == idMsr); 1801 pGuestMsrStore->u32Msr = pLastGuestMsrStore->u32Msr; 1802 pGuestMsrStore->u64Value = pLastGuestMsrStore->u64Value; 1803 } 1804 1805 /* Remove it from the VM-exit MSR-load area. */ 1806 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 1807 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 1808 pHostMsr += i; 1809 pLastHostMsr += cMsrs - 1; 1810 Assert(pHostMsr->u32Msr == idMsr); 1811 pHostMsr->u32Msr = pLastHostMsr->u32Msr; 1812 pHostMsr->u64Value = pLastHostMsr->u64Value; 1378 1813 --cMsrs; 1379 1814 break; 1380 1815 } 1381 pGuestMsr ++;1816 pGuestMsrLoad++; 1382 1817 } 1383 1818 1384 1819 /* Update the VMCS if the count changed (meaning the MSR was found). */ 1385 if (cMsrs != pV Cpu->hm.s.vmx.cMsrs)1386 { 1387 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);1820 if (cMsrs != pVmcsInfo->cEntryMsrLoad) 1821 { 1822 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs); 1388 1823 AssertRCReturn(rc, rc); 1389 1824 1390 1825 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */ 1391 if (pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)1392 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);1393 1394 Log4Func(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));1826 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1827 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR); 1828 1829 Log4Func(("Removed MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs)); 1395 1830 return VINF_SUCCESS; 1396 1831 } … … 1401 1836 1402 1837 /** 1403 * Checks if the specified guest MSR is part of the auto-load/store area in 1404 * the VMCS. 1405 * 1406 * @returns true if found, false otherwise. 1838 * Checks if the specified guest MSR is part of the VM-entry MSR-load area. 1839 * 1840 * @returns @c true if found, @c false otherwise. 1841 * @param pVmcsInfo The VMCS info. object. 1842 * @param idMsr The MSR to find. 1843 */ 1844 static bool hmR0VmxIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr) 1845 { 1846 PCVMXAUTOMSR pGuestMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad; 1847 uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad; 1848 for (uint32_t i = 0; i < cMsrs; i++) 1849 { 1850 if (pGuestMsrLoad->u32Msr == idMsr) 1851 return true; 1852 pGuestMsrLoad++; 1853 } 1854 return false; 1855 } 1856 1857 1858 /** 1859 * Updates the value of all host MSRs in the VM-exit MSR-load area. 1860 * 1407 1861 * @param pVCpu The cross context virtual CPU structure. 1408 * @param uMsr The MSR to find. 1409 */ 1410 static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr) 1411 { 1412 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1413 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs; 1414 1415 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++) 1416 { 1417 if (pGuestMsr->u32Msr == uMsr) 1418 return true; 1419 } 1420 return false; 1421 } 1422 1423 1424 /** 1425 * Updates the value of all host MSRs in the auto-load/store area in the VMCS. 1426 * 1427 * @param pVCpu The cross context virtual CPU structure. 1862 * @param pVmcsInfo The VMCS info. object. 1428 1863 * 1429 1864 * @remarks No-long-jump zone!!! 1430 1865 */ 1431 static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu) 1432 { 1866 static void hmR0VmxUpdateAutoLoadHostMsrs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 1867 { 1868 PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 1869 uint32_t const cMsrs = pVmcsInfo->cExitMsrLoad; 1870 1433 1871 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1434 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1435 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1436 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs; 1437 1438 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++) 1439 { 1440 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr); 1441 1872 Assert(pHostMsrLoad); 1873 1874 for (uint32_t i = 0; i < cMsrs; i++, pHostMsrLoad++) 1875 { 1442 1876 /* 1443 1877 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it. 1444 1878 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}. 1445 1879 */ 1446 if (pHostMsr ->u32Msr == MSR_K6_EFER)1447 pHostMsr ->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;1880 if (pHostMsrLoad->u32Msr == MSR_K6_EFER) 1881 pHostMsrLoad->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer; 1448 1882 else 1449 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr); 1450 } 1451 1452 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true; 1883 pHostMsrLoad->u64Value = ASMRdMsr(pHostMsrLoad->u32Msr); 1884 } 1453 1885 } 1454 1886 … … 1467 1899 1468 1900 /* 1469 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().1901 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap accesses in hmR0VmxSetupVmcsProcCtls(). 1470 1902 */ 1471 1903 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST)) … … 1475 1907 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 1476 1908 { 1477 pVCpu->hm.s.vmx.u64Host LStarMsr = ASMRdMsr(MSR_K8_LSTAR);1478 pVCpu->hm.s.vmx.u64Host StarMsr = ASMRdMsr(MSR_K6_STAR);1479 pVCpu->hm.s.vmx.u64Host SFMaskMsr= ASMRdMsr(MSR_K8_SF_MASK);1480 pVCpu->hm.s.vmx.u64Host KernelGSBaseMsr= ASMRdMsr(MSR_K8_KERNEL_GS_BASE);1909 pVCpu->hm.s.vmx.u64HostMsrLStar = ASMRdMsr(MSR_K8_LSTAR); 1910 pVCpu->hm.s.vmx.u64HostMsrStar = ASMRdMsr(MSR_K6_STAR); 1911 pVCpu->hm.s.vmx.u64HostMsrSfMask = ASMRdMsr(MSR_K8_SF_MASK); 1912 pVCpu->hm.s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 1481 1913 } 1482 1914 #endif … … 1492 1924 * @returns true if it does, false otherwise. 1493 1925 * @param pVCpu The cross context virtual CPU structure. 1494 * @param uMsrThe MSR to check.1495 */ 1496 static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)1926 * @param idMsr The MSR to check. 1927 */ 1928 static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t idMsr) 1497 1929 { 1498 1930 NOREF(pVCpu); … … 1500 1932 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 1501 1933 { 1502 switch ( uMsr)1934 switch (idMsr) 1503 1935 { 1504 1936 case MSR_K8_LSTAR: … … 1510 1942 } 1511 1943 #else 1512 RT_NOREF(pVCpu, uMsr);1944 RT_NOREF(pVCpu, idMsr); 1513 1945 #endif 1514 1946 return false; … … 1549 1981 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1550 1982 if ( !(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST) 1551 && pCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64Host KernelGSBaseMsr1552 && pCtx->msrLSTAR == pVCpu->hm.s.vmx.u64Host LStarMsr1553 && pCtx->msrSTAR == pVCpu->hm.s.vmx.u64Host StarMsr1554 && pCtx->msrSFMASK == pVCpu->hm.s.vmx.u64Host SFMaskMsr)1983 && pCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostMsrKernelGsBase 1984 && pCtx->msrLSTAR == pVCpu->hm.s.vmx.u64HostMsrLStar 1985 && pCtx->msrSTAR == pVCpu->hm.s.vmx.u64HostMsrStar 1986 && pCtx->msrSFMASK == pVCpu->hm.s.vmx.u64HostMsrSfMask) 1555 1987 { 1556 1988 #ifdef VBOX_STRICT … … 1595 2027 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 1596 2028 { 1597 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64Host LStarMsr);1598 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64Host StarMsr);1599 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64Host SFMaskMsr);1600 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64Host KernelGSBaseMsr);2029 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostMsrLStar); 2030 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostMsrStar); 2031 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostMsrSfMask); 2032 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostMsrKernelGsBase); 1601 2033 } 1602 2034 #endif … … 1615 2047 * VMCS content. HMCPU error-field is 1616 2048 * updated, see VMX_VCI_XXX. 1617 * @param pVCpu The cross context virtual CPU structure. 1618 */ 1619 static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu) 2049 * @param pVCpu The cross context virtual CPU structure. 2050 * @param pVmcsInfo The VMCS info. object. 2051 */ 2052 static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 1620 2053 { 1621 2054 uint32_t u32Val; 1622 2055 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); 1623 2056 AssertRCReturn(rc, rc); 1624 AssertMsgReturnStmt(pV Cpu->hm.s.vmx.Ctls.u32EntryCtls == u32Val,1625 ("Cache=%#RX32 VMCS=%#RX32\n", pV Cpu->hm.s.vmx.Ctls.u32EntryCtls, u32Val),2057 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val, 2058 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32EntryCtls, u32Val), 1626 2059 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY, 1627 2060 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1629 2062 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); 1630 2063 AssertRCReturn(rc, rc); 1631 AssertMsgReturnStmt(pV Cpu->hm.s.vmx.Ctls.u32ExitCtls == u32Val,1632 ("Cache=%#RX32 VMCS=%#RX32\n", pV Cpu->hm.s.vmx.Ctls.u32ExitCtls, u32Val),2064 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val, 2065 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ExitCtls, u32Val), 1633 2066 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT, 1634 2067 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1636 2069 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); 1637 2070 AssertRCReturn(rc, rc); 1638 AssertMsgReturnStmt(pV Cpu->hm.s.vmx.Ctls.u32PinCtls == u32Val,1639 ("Cache=%#RX32 VMCS=%#RX32\n", pV Cpu->hm.s.vmx.Ctls.u32PinCtls, u32Val),2071 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val, 2072 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32PinCtls, u32Val), 1640 2073 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC, 1641 2074 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1643 2076 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); 1644 2077 AssertRCReturn(rc, rc); 1645 AssertMsgReturnStmt(pV Cpu->hm.s.vmx.Ctls.u32ProcCtls == u32Val,1646 ("Cache=%#RX32 VMCS=%#RX32\n", pV Cpu->hm.s.vmx.Ctls.u32ProcCtls, u32Val),2078 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val, 2079 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ProcCtls, u32Val), 1647 2080 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC, 1648 2081 VERR_VMX_VMCS_FIELD_CACHE_INVALID); 1649 2082 1650 if (pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)2083 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS) 1651 2084 { 1652 2085 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); 1653 2086 AssertRCReturn(rc, rc); 1654 AssertMsgReturnStmt(pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2 == u32Val,1655 ("Cache=%#RX32 VMCS=%#RX32\n", pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2, u32Val),2087 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val, 2088 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ProcCtls2, u32Val), 1656 2089 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2, 1657 2090 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1660 2093 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); 1661 2094 AssertRCReturn(rc, rc); 1662 AssertMsgReturnStmt(pV Cpu->hm.s.vmx.Ctls.u32XcptBitmap == u32Val,1663 ("Cache=%#RX32 VMCS=%#RX32\n", pV Cpu->hm.s.vmx.Ctls.u32XcptBitmap, u32Val),2095 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val, 2096 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32XcptBitmap, u32Val), 1664 2097 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP, 1665 2098 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1668 2101 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val); 1669 2102 AssertRCReturn(rc, rc); 1670 AssertMsgReturnStmt(pV Cpu->hm.s.vmx.Ctls.u64TscOffset == u64Val,1671 ("Cache=%#RX64 VMCS=%#RX64\n", pV Cpu->hm.s.vmx.Ctls.u64TscOffset, u64Val),2103 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val, 2104 ("Cache=%#RX64 VMCS=%#RX64\n", pVmcsInfo->u64TscOffset, u64Val), 1672 2105 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET, 1673 2106 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1679 2112 #ifdef VBOX_STRICT 1680 2113 /** 1681 * Verifies that our cached host EFER value has not changed 1682 * since we cached it. 2114 * Verifies that our cached host EFER MSR value has not changed since we cached it. 1683 2115 * 1684 2116 * @param pVCpu The cross context virtual CPU structure. 1685 */ 1686 static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu) 2117 * @param pVmcsInfo The VMCS info. object. 2118 */ 2119 static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 1687 2120 { 1688 2121 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1689 2122 1690 if (pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR) 1691 { 1692 uint64_t u64Val; 1693 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &u64Val); 2123 if (pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR) 2124 { 2125 uint64_t const uHostEferMsr = ASMRdMsr(MSR_K6_EFER); 2126 uint64_t const uHostEferMsrCache = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer; 2127 uint64_t uVmcsEferMsrVmcs; 2128 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &uVmcsEferMsrVmcs); 1694 2129 AssertRC(rc); 1695 2130 1696 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER); 1697 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val)); 2131 AssertMsgReturnVoid(uHostEferMsr == uVmcsEferMsrVmcs, 2132 ("EFER Host/VMCS mismatch! host=%#RX64 vmcs=%#RX64\n", uHostEferMsr, uVmcsEferMsrVmcs)); 2133 AssertMsgReturnVoid(uHostEferMsr == uHostEferMsrCache, 2134 ("EFER Host/Cache mismatch! host=%#RX64 cache=%#RX64\n", uHostEferMsr, uHostEferMsrCache)); 1698 2135 } 1699 2136 } … … 1706 2143 * @param pVCpu The cross context virtual CPU structure. 1707 2144 */ 1708 static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu )2145 static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 1709 2146 { 1710 2147 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1712 2149 /* Verify MSR counts in the VMCS are what we think it should be. */ 1713 2150 uint32_t cMsrs; 1714 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc); 1715 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs); 1716 1717 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc); 1718 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs); 1719 1720 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc); 1721 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs); 1722 1723 PCVMXAUTOMSR pHostMsr = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1724 PCVMXAUTOMSR pGuestMsr = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1725 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++) 2151 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); 2152 AssertRC(rc); 2153 Assert(cMsrs == pVmcsInfo->cEntryMsrLoad); 2154 2155 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); 2156 AssertRC(rc); 2157 Assert(cMsrs == pVmcsInfo->cExitMsrStore); 2158 2159 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); 2160 AssertRC(rc); 2161 Assert(cMsrs == pVmcsInfo->cExitMsrLoad); 2162 2163 /* Verify the MSR counts do not exceed the maximum count supported by the hardware. */ 2164 Assert(cMsrs < VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc)); 2165 2166 PCVMXAUTOMSR pGuestMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad; 2167 PCVMXAUTOMSR pGuestMsrStore = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 2168 PCVMXAUTOMSR pHostMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 2169 bool const fSeparateExitMsrStorePage = hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo); 2170 for (uint32_t i = 0; i < cMsrs; i++) 1726 2171 { 1727 2172 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */ 1728 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr, 1729 pGuestMsr->u32Msr, cMsrs)); 1730 1731 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr); 1732 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n", 1733 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs)); 1734 1735 /* Verify that the permissions are as expected in the MSR bitmap. */ 1736 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1737 { 1738 VMXMSREXITREAD enmRead; 1739 VMXMSREXITWRITE enmWrite; 1740 rc = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, pGuestMsr->u32Msr, &enmRead, &enmWrite); 1741 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("HMGetVmxMsrPermission! failed. rc=%Rrc\n", rc)); 1742 if (pGuestMsr->u32Msr == MSR_K6_EFER) 2173 if (fSeparateExitMsrStorePage) 2174 { 2175 AssertMsgReturnVoid(pGuestMsrLoad->u32Msr == pGuestMsrStore->u32Msr, 2176 ("GuestMsrLoad=%#RX32 GuestMsrStore=%#RX32 cMsrs=%u\n", 2177 pGuestMsrLoad->u32Msr, pGuestMsrStore->u32Msr, cMsrs)); 2178 } 2179 2180 AssertMsgReturnVoid(pHostMsrLoad->u32Msr == pGuestMsrLoad->u32Msr, 2181 ("HostMsrLoad=%#RX32 GuestMsrLoad=%#RX32 cMsrs=%u\n", 2182 pHostMsrLoad->u32Msr, pGuestMsrLoad->u32Msr, cMsrs)); 2183 2184 uint64_t const u64Msr = ASMRdMsr(pHostMsrLoad->u32Msr); 2185 AssertMsgReturnVoid(pHostMsrLoad->u64Value == u64Msr, 2186 ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n", 2187 pHostMsrLoad->u32Msr, pHostMsrLoad->u64Value, u64Msr, cMsrs)); 2188 2189 /* Verify that the accesses are as expected in the MSR bitmap for auto-load/store MSRs. */ 2190 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 2191 { 2192 uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr); 2193 if (pGuestMsrLoad->u32Msr == MSR_K6_EFER) 1743 2194 { 1744 AssertMsgReturnVoid( enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));1745 AssertMsgReturnVoid( enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));2195 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_RD), ("Passthru read for EFER MSR!?\n")); 2196 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_WR), ("Passthru write for EFER MSR!?\n")); 1746 2197 } 1747 2198 else 1748 2199 { 1749 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n", 1750 pGuestMsr->u32Msr, cMsrs)); 1751 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n", 1752 pGuestMsr->u32Msr, cMsrs)); 2200 AssertMsgReturnVoid((fMsrpm & (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)) 2201 == (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR), 2202 ("u32Msr=%#RX32 cMsrs=%u No passthru read/write!\n", pGuestMsrLoad->u32Msr, cMsrs)); 1753 2203 } 1754 2204 } 2205 2206 /* Move to the next MSR. */ 2207 pHostMsrLoad++; 2208 pGuestMsrLoad++; 2209 pGuestMsrStore++; 1755 2210 } 1756 2211 } … … 1764 2219 * @param pVCpu The cross context virtual CPU structure of the calling 1765 2220 * EMT. Can be NULL depending on @a enmTlbFlush. 2221 * @param pVmcsInfo The VMCS info. object. Can be NULL depending on @a 2222 * enmTlbFlush. 1766 2223 * @param enmTlbFlush Type of flush. 1767 2224 * … … 1771 2228 * @remarks Can be called with interrupts disabled. 1772 2229 */ 1773 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXTLBFLUSHEPT enmTlbFlush)2230 static void hmR0VmxFlushEpt(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, VMXTLBFLUSHEPT enmTlbFlush) 1774 2231 { 1775 2232 uint64_t au64Descriptor[2]; … … 1779 2236 { 1780 2237 Assert(pVCpu); 1781 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP; 2238 Assert(pVmcsInfo); 2239 au64Descriptor[0] = pVmcsInfo->HCPhysEPTP; 1782 2240 } 1783 2241 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */ 1784 2242 1785 2243 int rc = VMXR0InvEPT(enmTlbFlush, &au64Descriptor[0]); 1786 AssertMsg(rc == VINF_SUCCESS, 1787 ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0, rc)); 2244 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %#RHp failed. rc=%Rrc\n", enmTlbFlush, au64Descriptor[0], rc)); 1788 2245 1789 2246 if ( RT_SUCCESS(rc) … … 1836 2293 1837 2294 /** 1838 * Invalidates a guest page by guest virtual address. Only relevant for 1839 * EPT/VPID,otherwise there is nothing really to invalidate.2295 * Invalidates a guest page by guest virtual address. Only relevant for EPT/VPID, 2296 * otherwise there is nothing really to invalidate. 1840 2297 * 1841 2298 * @returns VBox status code. … … 1848 2305 LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt)); 1849 2306 1850 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1851 if (!fFlushPending) 2307 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)) 1852 2308 { 1853 2309 /* … … 1906 2362 1907 2363 Assert(pHostCpu->idCpu != NIL_RTCPUID); 1908 pVCpu->hm.s.idLastCpu 1909 pVCpu->hm.s.cTlbFlushes 1910 pVCpu->hm.s.fForceTLBFlush 2364 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu; 2365 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 2366 pVCpu->hm.s.fForceTLBFlush = false; 1911 2367 return; 1912 2368 } … … 1918 2374 * @param pHostCpu The HM physical-CPU structure. 1919 2375 * @param pVCpu The cross context virtual CPU structure. 2376 * @param pVmcsInfo The VMCS info. object. 1920 2377 * 1921 2378 * @remarks All references to "ASID" in this function pertains to "VPID" in Intel's … … 1925 2382 * @remarks Called with interrupts disabled. 1926 2383 */ 1927 static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPU pVCpu )2384 static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 1928 2385 { 1929 2386 #ifdef VBOX_WITH_STATISTICS … … 1973 2430 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}. 1974 2431 */ 1975 hmR0VmxFlushEpt(pVCpu, pV M->hm.s.vmx.enmTlbFlushEpt);2432 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hm.s.vmx.enmTlbFlushEpt); 1976 2433 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch); 1977 2434 HMVMX_SET_TAGGED_TLB_FLUSHED(); … … 1989 2446 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". 1990 2447 */ 1991 hmR0VmxFlushEpt(pVCpu, pV M->hm.s.vmx.enmTlbFlushEpt);2448 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hm.s.vmx.enmTlbFlushEpt); 1992 2449 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb); 1993 2450 HMVMX_SET_TAGGED_TLB_FLUSHED(); … … 2020 2477 * @param pHostCpu The HM physical-CPU structure. 2021 2478 * @param pVCpu The cross context virtual CPU structure. 2479 * @param pVmcsInfo The VMCS info. object. 2022 2480 * 2023 2481 * @remarks Called with interrupts disabled. 2024 2482 */ 2025 static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPU pVCpu )2483 static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 2026 2484 { 2027 2485 AssertPtr(pVCpu); … … 2054 2512 if (pVCpu->hm.s.fForceTLBFlush) 2055 2513 { 2056 hmR0VmxFlushEpt(pVCpu, pV Cpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt);2514 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt); 2057 2515 pVCpu->hm.s.fForceTLBFlush = false; 2058 2516 } … … 2152 2610 * @param pHostCpu The HM physical-CPU structure. 2153 2611 * @param pVCpu The cross context virtual CPU structure. 2612 * @param pVmcsInfo The VMCS info. object. 2154 2613 * 2155 2614 * @remarks Called with interrupts disabled. 2156 2615 */ 2157 DECLINLINE(void) hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu )2616 DECLINLINE(void) hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 2158 2617 { 2159 2618 #ifdef HMVMX_ALWAYS_FLUSH_TLB … … 2163 2622 switch (pVM->hm.s.vmx.enmTlbFlushType) 2164 2623 { 2165 case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu ); break;2166 case VMXTLBFLUSHTYPE_EPT: hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu ); break;2167 case VMXTLBFLUSHTYPE_VPID: hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu); break;2168 case VMXTLBFLUSHTYPE_NONE: hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu); break;2624 case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu, pVmcsInfo); break; 2625 case VMXTLBFLUSHTYPE_EPT: hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu, pVmcsInfo); break; 2626 case VMXTLBFLUSHTYPE_VPID: hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu); break; 2627 case VMXTLBFLUSHTYPE_NONE: hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu); break; 2169 2628 default: 2170 2629 AssertMsgFailed(("Invalid flush-tag function identifier\n")); … … 2185 2644 { 2186 2645 /* 2187 * Determine optimal flush type for Nested Paging.2188 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted2189 * guest execution (see hmR3InitFinalizeR0()).2646 * Determine optimal flush type for nested paging. 2647 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup 2648 * unrestricted guest execution (see hmR3InitFinalizeR0()). 2190 2649 */ 2191 2650 if (pVM->hm.s.fNestedPaging) … … 2277 2736 2278 2737 /** 2279 * Sets up pin-based VM-execution controls in the VMCS. 2738 * Sets up the virtual-APIC page address for the VMCS. 2739 * 2740 * @returns VBox status code. 2741 * @param pVmcsInfo The VMCS info. object. 2742 */ 2743 DECLINLINE(int) hmR0VmxSetupVmcsVirtApicAddr(PCVMXVMCSINFO pVmcsInfo) 2744 { 2745 RTHCPHYS const HCPhysVirtApic = pVmcsInfo->HCPhysVirtApic; 2746 Assert(HCPhysVirtApic != NIL_RTHCPHYS); 2747 Assert(!(HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */ 2748 return VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic); 2749 } 2750 2751 2752 /** 2753 * Sets up the MSR-bitmap address for the VMCS. 2754 * 2755 * @returns VBox status code. 2756 * @param pVmcsInfo The VMCS info. object. 2757 */ 2758 DECLINLINE(int) hmR0VmxSetupVmcsMsrBitmapAddr(PCVMXVMCSINFO pVmcsInfo) 2759 { 2760 RTHCPHYS const HCPhysMsrBitmap = pVmcsInfo->HCPhysMsrBitmap; 2761 Assert(HCPhysMsrBitmap != NIL_RTHCPHYS); 2762 Assert(!(HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */ 2763 return VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, HCPhysMsrBitmap); 2764 } 2765 2766 2767 /** 2768 * Sets up the APIC-access page address for the VMCS. 2280 2769 * 2281 2770 * @returns VBox status code. 2282 2771 * @param pVCpu The cross context virtual CPU structure. 2283 * 2284 * @remarks We don't really care about optimizing vmwrites here as it's done only 2285 * once per VM and hence we don't care about VMCS-field cache comparisons. 2286 */ 2287 static int hmR0VmxSetupPinCtls(PVMCPU pVCpu) 2772 */ 2773 DECLINLINE(int) hmR0VmxSetupVmcsApicAccessAddr(PVMCPU pVCpu) 2774 { 2775 RTHCPHYS const HCPhysApicAccess = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.HCPhysApicAccess; 2776 Assert(HCPhysApicAccess != NIL_RTHCPHYS); 2777 Assert(!(HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */ 2778 return VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess); 2779 } 2780 2781 2782 /** 2783 * Sets up the VMCS link pointer for the VMCS. 2784 * 2785 * @returns VBox status code. 2786 * @param pVmcsInfo The VMCS info. object. 2787 */ 2788 DECLINLINE(int) hmR0VmxSetupVmcsLinkPtr(PVMXVMCSINFO pVmcsInfo) 2789 { 2790 uint64_t const u64VmcsLinkPtr = pVmcsInfo->u64VmcsLinkPtr; 2791 Assert(u64VmcsLinkPtr == UINT64_C(0xffffffffffffffff)); /* Bits 63:0 MB1. */ 2792 return VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, u64VmcsLinkPtr); 2793 } 2794 2795 2796 /** 2797 * Sets up the VM-entry MSR load, VM-exit MSR-store and VM-exit MSR-load addresses 2798 * in the VMCS. 2799 * 2800 * @returns VBox status code. 2801 * @param pVmcsInfo The VMCS info. object. 2802 */ 2803 DECLINLINE(int) hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(PVMXVMCSINFO pVmcsInfo) 2804 { 2805 RTHCPHYS const HCPhysGuestMsrLoad = pVmcsInfo->HCPhysGuestMsrLoad; 2806 Assert(HCPhysGuestMsrLoad != NIL_RTHCPHYS); 2807 Assert(!(HCPhysGuestMsrLoad & 0xf)); /* Bits 3:0 MBZ. */ 2808 2809 RTHCPHYS const HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrStore; 2810 Assert(HCPhysGuestMsrStore != NIL_RTHCPHYS); 2811 Assert(!(HCPhysGuestMsrStore & 0xf)); /* Bits 3:0 MBZ. */ 2812 2813 RTHCPHYS const HCPhysHostMsrLoad = pVmcsInfo->HCPhysHostMsrLoad; 2814 Assert(HCPhysHostMsrLoad != NIL_RTHCPHYS); 2815 Assert(!(HCPhysHostMsrLoad & 0xf)); /* Bits 3:0 MBZ. */ 2816 2817 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, HCPhysGuestMsrLoad); 2818 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, HCPhysGuestMsrStore); 2819 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, HCPhysHostMsrLoad); 2820 AssertRCReturn(rc, rc); 2821 return VINF_SUCCESS; 2822 } 2823 2824 2825 /** 2826 * Sets up MSR permissions in the MSR bitmap of a VMCS info. object. 2827 * 2828 * @param pVCpu The cross context virtual CPU structure. 2829 * @param pVmcsInfo The VMCS info. object. 2830 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS. 2831 */ 2832 static void hmR0VmxSetupVmcsMsrPermissions(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs) 2833 { 2834 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS); 2835 2836 /* 2837 * The guest can access the following MSRs (read, write) without causing 2838 * VM-exits; they are loaded/stored automatically using fields in the VMCS. 2839 */ 2840 PVM pVM = pVCpu->CTX_SUFF(pVM); 2841 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_CS, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2842 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_ESP, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2843 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_EIP, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2844 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_GS_BASE, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2845 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_FS_BASE, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2846 2847 #ifdef VBOX_STRICT 2848 /** @todo NSTVMX: Remove this later. */ 2849 uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_IA32_SYSENTER_CS); 2850 Assert((fMsrpm & (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)) == (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)); 2851 2852 fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_K8_GS_BASE); 2853 Assert((fMsrpm & (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)) == (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)); 2854 #endif 2855 2856 /* 2857 * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state 2858 * associated with then. We never need to intercept access (writes need to be 2859 * executed without causing a VM-exit, reads will #GP fault anyway). 2860 * 2861 * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to 2862 * read/write them. We swap the the guest/host MSR value using the 2863 * auto-load/store MSR area. 2864 */ 2865 if (pVM->cpum.ro.GuestFeatures.fIbpb) 2866 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_PRED_CMD, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2867 if (pVM->cpum.ro.GuestFeatures.fFlushCmd) 2868 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_FLUSH_CMD, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2869 if (pVM->cpum.ro.GuestFeatures.fIbrs) 2870 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SPEC_CTRL, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2871 2872 /* 2873 * IA32_EFER MSR is always intercepted, see @bugref{9180#c37}. 2874 */ 2875 2876 #if HC_ARCH_BITS == 64 2877 /* 2878 * Allow full read/write access for the following MSRs (mandatory for VT-x) 2879 * required for 64-bit guests. 2880 */ 2881 if (pVM->hm.s.fAllow64BitGuests) 2882 { 2883 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_LSTAR, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2884 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K6_STAR, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2885 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_SF_MASK, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2886 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_KERNEL_GS_BASE, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR); 2887 2888 # ifdef VBOX_STRICT 2889 fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_K8_GS_BASE); 2890 Assert((fMsrpm & (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)) == (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)); 2891 # endif 2892 } 2893 #endif 2894 } 2895 2896 2897 /** 2898 * Sets up pin-based VM-execution controls in the VMCS. 2899 * 2900 * @returns VBox status code. 2901 * @param pVCpu The cross context virtual CPU structure. 2902 * @param pVmcsInfo The VMCS info. object. 2903 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS. 2904 */ 2905 static int hmR0VmxSetupVmcsPinCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 2288 2906 { 2289 2907 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2297 2915 fVal |= VMX_PIN_CTLS_VIRT_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */ 2298 2916 2299 /* Enable the VMX 2917 /* Enable the VMX-preemption timer. */ 2300 2918 if (pVM->hm.s.vmx.fUsePreemptTimer) 2301 2919 { … … 2325 2943 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal); 2326 2944 AssertRCReturn(rc, rc); 2327 pV Cpu->hm.s.vmx.Ctls.u32PinCtls = fVal;2945 pVmcsInfo->u32PinCtls = fVal; 2328 2946 2329 2947 return VINF_SUCCESS; … … 2335 2953 * 2336 2954 * @returns VBox status code. 2337 * @param pVCpu The cross context virtual CPU structure. 2338 * 2339 * @remarks We don't really care about optimizing vmwrites here as it's done only 2340 * once per VM and hence we don't care about VMCS-field cache comparisons. 2341 */ 2342 static int hmR0VmxSetupProcCtls2(PVMCPU pVCpu) 2955 * @param pVCpu The cross context virtual CPU structure. 2956 * @param pVmcsInfo The VMCS info. object. 2957 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS. 2958 */ 2959 static int hmR0VmxSetupVmcsProcCtls2(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 2343 2960 { 2344 2961 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2354 2971 fVal |= VMX_PROC_CTLS2_EPT; 2355 2972 2356 /* 2357 * Enable the INVPCID instruction if supported by the hardware and we expose 2358 * it to the guest. Without this, guest executing INVPCID would cause a #UD. 2359 */ 2973 /* Enable the INVPCID instruction if supported by the hardware and we expose 2974 it to the guest. Without this, guest executing INVPCID would cause a #UD. */ 2360 2975 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID) 2361 2976 && pVM->cpum.ro.GuestFeatures.fInvpcid) … … 2366 2981 fVal |= VMX_PROC_CTLS2_VPID; 2367 2982 2368 /* Enable Unrestricted guest execution. */2983 /* Enable unrestricted guest execution. */ 2369 2984 if (pVM->hm.s.vmx.fUnrestrictedGuest) 2370 2985 fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST; … … 2388 3003 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS) 2389 3004 { 2390 Assert(pVM->hm.s.vmx.HCPhysApicAccess); 2391 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */ 2392 fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS; /* Virtualize APIC accesses. */ 2393 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess); 3005 fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS; 3006 int rc = hmR0VmxSetupVmcsApicAccessAddr(pVCpu); 2394 3007 AssertRCReturn(rc, rc); 2395 3008 } 2396 3009 2397 /* Enable RDTSCP. */ 2398 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP) 3010 /* Enable the RDTSCP instruction if supported by the hardware and we expose 3011 it to the guest. Without this, guest executing RDTSCP would cause a #UD. */ 3012 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP) 3013 && pVM->cpum.ro.GuestFeatures.fRdTscP) 2399 3014 fVal |= VMX_PROC_CTLS2_RDTSCP; 2400 3015 … … 2422 3037 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal); 2423 3038 AssertRCReturn(rc, rc); 2424 pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2 = fVal;3039 pVmcsInfo->u32ProcCtls2 = fVal; 2425 3040 2426 3041 return VINF_SUCCESS; … … 2432 3047 * 2433 3048 * @returns VBox status code. 2434 * @param pVCpu The cross context virtual CPU structure. 2435 * 2436 * @remarks We don't really care about optimizing vmwrites here as it's done only 2437 * once per VM and hence we don't care about VMCS-field cache comparisons. 2438 */ 2439 static int hmR0VmxSetupProcCtls(PVMCPU pVCpu) 3049 * @param pVCpu The cross context virtual CPU structure. 3050 * @param pVmcsInfo The VMCS info. object. 3051 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS. 3052 */ 3053 static int hmR0VmxSetupVmcsProcCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 2440 3054 { 2441 3055 PVM pVM = pVCpu->CTX_SUFF(pVM); 3056 2442 3057 uint32_t fVal = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */ 2443 3058 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ … … 2455 3070 || (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT)) 2456 3071 { 2457 LogRelFunc(("Unsupported VMX_PROC_CTLS_MOV_DR_EXIT combo!"));2458 3072 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT; 2459 3073 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2460 3074 } 2461 3075 2462 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */3076 /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */ 2463 3077 if (!pVM->hm.s.fNestedPaging) 2464 3078 { 2465 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */3079 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); 2466 3080 fVal |= VMX_PROC_CTLS_INVLPG_EXIT 2467 3081 | VMX_PROC_CTLS_CR3_LOAD_EXIT … … 2473 3087 && pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW) 2474 3088 { 2475 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic); 2476 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */ 2477 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0); 2478 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic); 2479 AssertRCReturn(rc, rc); 2480 2481 fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */ 2482 /* CR8 writes cause a VM-exit based on TPR threshold. */ 3089 fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */ 3090 /* CR8 writes cause a VM-exit based on TPR threshold. */ 2483 3091 Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT)); 2484 3092 Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT)); 3093 int rc = hmR0VmxSetupVmcsVirtApicAddr(pVmcsInfo); 3094 AssertRCReturn(rc, rc); 2485 3095 } 2486 3096 else 2487 3097 { 2488 /* 2489 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs. 2490 * Set this control only for 64-bit guests. 2491 */ 3098 /* Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is 3099 invalid on 32-bit Intel CPUs. Set this control only for 64-bit guests. */ 2492 3100 if (pVM->hm.s.fAllow64BitGuests) 2493 3101 { 2494 fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT 2495 | VMX_PROC_CTLS_CR8_LOAD_EXIT; 3102 fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */ 3103 | VMX_PROC_CTLS_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */ 2496 3104 } 2497 3105 } … … 2501 3109 { 2502 3110 fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS; 2503 2504 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap); 2505 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */ 2506 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap); 3111 int rc = hmR0VmxSetupVmcsMsrBitmapAddr(pVmcsInfo); 2507 3112 AssertRCReturn(rc, rc); 2508 2509 /*2510 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored2511 * automatically using dedicated fields in the VMCS.2512 */2513 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2514 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2515 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2516 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2517 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2518 #if HC_ARCH_BITS == 642519 /*2520 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.2521 */2522 if (pVM->hm.s.fAllow64BitGuests)2523 {2524 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2525 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2526 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2527 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2528 }2529 #endif2530 /*2531 * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state2532 * associated with then. We never need to intercept access (writes need to2533 * be executed without exiting, reads will #GP-fault anyway).2534 */2535 if (pVM->cpum.ro.GuestFeatures.fIbpb)2536 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_PRED_CMD, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2537 if (pVM->cpum.ro.GuestFeatures.fFlushCmd)2538 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_FLUSH_CMD, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2539 2540 /* Though MSR_IA32_PERF_GLOBAL_CTRL is saved/restored lazily, we want intercept reads/write to it for now. */2541 3113 } 2542 3114 … … 2556 3128 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal); 2557 3129 AssertRCReturn(rc, rc); 2558 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = fVal; 3130 pVmcsInfo->u32ProcCtls = fVal; 3131 3132 /* Set up MSR permissions that don't change through the lifetime of the VM. */ 3133 hmR0VmxSetupVmcsMsrPermissions(pVCpu, pVmcsInfo, false /* fIsNstGstVmcs */); 2559 3134 2560 3135 /* Set up secondary processor-based VM-execution controls if the CPU supports it. */ 2561 if (pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)2562 return hmR0VmxSetup ProcCtls2(pVCpu);3136 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS) 3137 return hmR0VmxSetupVmcsProcCtls2(pVCpu, pVmcsInfo); 2563 3138 2564 3139 /* Sanity check, should not really happen. */ 2565 if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest)) 2566 { 2567 LogRelFunc(("Unrestricted Guest enabled when secondary processor-based VM-execution controls not available\n")); 3140 if (RT_LIKELY(!pVM->hm.s.vmx.fUnrestrictedGuest)) 3141 { /* likely */ } 3142 else 3143 { 2568 3144 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO; 2569 3145 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 2576 3152 2577 3153 /** 2578 * Sets up miscellaneous (everything other than Pin & Processor-based2579 * VM-execution) control fields in the VMCS.3154 * Sets up miscellaneous (everything other than Pin, Processor and secondary 3155 * Processor-based VM-execution) control fields in the VMCS. 2580 3156 * 2581 3157 * @returns VBox status code. 2582 * @param pVCpu The cross context virtual CPU structure. 2583 */ 2584 static int hmR0VmxSetupMiscCtls(PVMCPU pVCpu) 2585 { 2586 AssertPtr(pVCpu); 2587 2588 int rc = VERR_GENERAL_FAILURE; 2589 2590 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */ 2591 #if 0 2592 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxExportGuestCR3AndCR4())*/ 2593 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); 2594 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); 2595 2596 /* 2597 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is) 2598 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit. 2599 * We thus use the exception bitmap to control it rather than use both. 2600 */ 2601 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); 2602 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); 2603 2604 /* All IO & IOIO instructions cause VM-exits. */ 2605 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); 2606 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); 2607 2608 /* Initialize the MSR-bitmap area. */ 2609 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); 2610 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); 2611 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); 2612 AssertRCReturn(rc, rc); 2613 #endif 2614 2615 /* Setup MSR auto-load/store area. */ 2616 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr); 2617 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */ 2618 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr); 2619 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr); 2620 AssertRCReturn(rc, rc); 2621 2622 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr); 2623 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */ 2624 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr); 2625 AssertRCReturn(rc, rc); 2626 2627 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */ 2628 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff)); 2629 AssertRCReturn(rc, rc); 2630 2631 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */ 2632 #if 0 2633 /* Setup debug controls */ 2634 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); 2635 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0); 2636 AssertRCReturn(rc, rc); 2637 #endif 2638 3158 * @param pVCpu The cross context virtual CPU structure. 3159 * @param pVmcsInfo The VMCS info. object. 3160 */ 3161 static int hmR0VmxSetupVmcsMiscCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 3162 { 3163 /* Set the auto-load/store MSR area addresses in the VMCS. */ 3164 int rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo); 3165 if (RT_SUCCESS(rc)) 3166 { 3167 /* Set the VMCS link pointer in the VMCS. */ 3168 rc = hmR0VmxSetupVmcsLinkPtr(pVmcsInfo); 3169 if (RT_SUCCESS(rc)) 3170 { 3171 /* Set the CR0/CR4 guest/host mask. */ 3172 uint64_t const u64Cr0Mask = hmR0VmxGetFixedCr0Mask(pVCpu); 3173 uint64_t const u64Cr4Mask = hmR0VmxGetFixedCr4Mask(pVCpu); 3174 rc = VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask); 3175 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask); 3176 if (RT_SUCCESS(rc)) 3177 { 3178 pVmcsInfo->u64Cr0Mask = u64Cr0Mask; 3179 pVmcsInfo->u64Cr4Mask = u64Cr4Mask; 3180 return VINF_SUCCESS; 3181 } 3182 LogRelFunc(("Failed to initialize VMCS CR0/CR4 guest/host mask. rc=%Rrc\n", rc)); 3183 } 3184 else 3185 LogRelFunc(("Failed to initialize VMCS link pointer. rc=%Rrc\n", rc)); 3186 } 3187 else 3188 LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc)); 2639 3189 return rc; 2640 3190 } … … 2649 3199 * 2650 3200 * @returns VBox status code. 2651 * @param pVCpu The cross context virtual CPU structure. 2652 */ 2653 static int hmR0VmxInitXcptBitmap(PVMCPU pVCpu) 2654 { 2655 AssertPtr(pVCpu); 2656 2657 uint32_t uXcptBitmap; 2658 2659 /* Must always intercept #AC to prevent the guest from hanging the CPU. */ 2660 uXcptBitmap = RT_BIT_32(X86_XCPT_AC); 2661 2662 /* Because we need to maintain the DR6 state even when intercepting DRx reads 2663 and writes, and because recursive #DBs can cause the CPU hang, we must always 2664 intercept #DB. */ 2665 uXcptBitmap |= RT_BIT_32(X86_XCPT_DB); 2666 2667 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */ 2668 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging) 2669 uXcptBitmap |= RT_BIT(X86_XCPT_PF); 3201 * @param pVCpu The cross context virtual CPU structure. 3202 * @param pVmcsInfo The VMCS info. object. 3203 */ 3204 static int hmR0VmxSetupVmcsXcptBitmap(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 3205 { 3206 /* 3207 * The following exceptions are always intercepted: 3208 * 3209 * #AC - To prevent the guest from hanging the CPU. 3210 * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and 3211 * recursive #DBs can cause a CPU hang. 3212 * #PF - To sync our shadow page tables when nested-paging is not used. 3213 */ 3214 bool const fNestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging; 3215 uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC) 3216 | RT_BIT(X86_XCPT_DB) 3217 | (fNestedPaging ? 0 : RT_BIT(X86_XCPT_PF)); 2670 3218 2671 3219 /* Commit it to the VMCS. */ … … 2674 3222 2675 3223 /* Update our cache of the exception bitmap. */ 2676 pV Cpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap;3224 pVmcsInfo->u32XcptBitmap = uXcptBitmap; 2677 3225 return VINF_SUCCESS; 3226 } 3227 3228 3229 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 3230 /** 3231 * Sets up the VMCS for executing a nested-guest using hardware-assisted VMX. 3232 * 3233 * @returns VBox status code. 3234 * @param pVCpu The cross context virtual CPU structure. 3235 * @param pVmcsInfo The VMCS info. object. 3236 */ 3237 static int hmR0VmxSetupVmcsCtlsNested(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 3238 { 3239 PVM pVM = pVCpu->CTX_SUFF(pVM); 3240 int rc = hmR0VmxSetupVmcsLinkPtr(pVmcsInfo); 3241 if (RT_SUCCESS(rc)) 3242 { 3243 rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo); 3244 if (RT_SUCCESS(rc)) 3245 { 3246 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS) 3247 rc = hmR0VmxSetupVmcsMsrBitmapAddr(pVmcsInfo); 3248 if (RT_SUCCESS(rc)) 3249 { 3250 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS) 3251 rc = hmR0VmxSetupVmcsApicAccessAddr(pVCpu); 3252 if (RT_SUCCESS(rc)) 3253 return VINF_SUCCESS; 3254 3255 LogRelFunc(("Failed to set up the APIC-access address in the nested-guest VMCS. rc=%Rrc\n", rc)); 3256 } 3257 else 3258 LogRelFunc(("Failed to set up the MSR-bitmap address in the nested-guest VMCS. rc=%Rrc\n", rc)); 3259 } 3260 else 3261 LogRelFunc(("Failed to set up the VMCS link pointer in the nested-guest VMCS. rc=%Rrc\n", rc)); 3262 } 3263 else 3264 LogRelFunc(("Failed to set up the auto-load/store MSR addresses in the nested-guest VMCS. rc=%Rrc\n", rc)); 3265 3266 return rc; 3267 } 3268 #endif 3269 3270 3271 /** 3272 * Sets up the VMCS for executing a guest (or nested-guest) using hardware-assisted 3273 * VMX. 3274 * 3275 * @returns VBox status code. 3276 * @param pVCpu The cross context virtual CPU structure. 3277 * @param pVmcsInfo The VMCS info. object. 3278 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS. 3279 */ 3280 static int hmR0VmxSetupVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs) 3281 { 3282 Assert(pVmcsInfo); 3283 Assert(pVmcsInfo->pvVmcs); 3284 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 3285 3286 /* Set the CPU specified revision identifier at the beginning of the VMCS structure. */ 3287 PVM pVM = pVCpu->CTX_SUFF(pVM); 3288 *(uint32_t *)pVmcsInfo->pvVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID); 3289 const char * const pszVmcs = fIsNstGstVmcs ? "nested-guest VMCS" : "guest VMCS"; 3290 3291 LogFlowFunc(("\n")); 3292 3293 /* 3294 * Initialize the VMCS using VMCLEAR before loading the VMCS. 3295 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine". 3296 */ 3297 int rc = hmR0VmxClearVmcs(pVmcsInfo); 3298 if (RT_SUCCESS(rc)) 3299 { 3300 rc = hmR0VmxLoadVmcs(pVmcsInfo); 3301 if (RT_SUCCESS(rc)) 3302 { 3303 if (!fIsNstGstVmcs) 3304 { 3305 rc = hmR0VmxSetupVmcsPinCtls(pVCpu, pVmcsInfo); 3306 if (RT_SUCCESS(rc)) 3307 { 3308 rc = hmR0VmxSetupVmcsProcCtls(pVCpu, pVmcsInfo); 3309 if (RT_SUCCESS(rc)) 3310 { 3311 rc = hmR0VmxSetupVmcsMiscCtls(pVCpu, pVmcsInfo); 3312 if (RT_SUCCESS(rc)) 3313 { 3314 rc = hmR0VmxSetupVmcsXcptBitmap(pVCpu, pVmcsInfo); 3315 if (RT_SUCCESS(rc)) 3316 { /* likely */ } 3317 else 3318 LogRelFunc(("Failed to initialize exception bitmap. rc=%Rrc\n", rc)); 3319 } 3320 else 3321 LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc)); 3322 } 3323 else 3324 LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc)); 3325 } 3326 else 3327 LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc)); 3328 } 3329 else 3330 { 3331 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 3332 rc = hmR0VmxSetupVmcsCtlsNested(pVCpu, pVmcsInfo); 3333 if (RT_SUCCESS(rc)) 3334 { /* likely */ } 3335 else 3336 LogRelFunc(("Failed to initialize nested-guest VMCS. rc=%Rrc\n", rc)); 3337 #else 3338 AssertFailed(); 3339 #endif 3340 } 3341 } 3342 else 3343 LogRelFunc(("Failed to load the %s. rc=%Rrc\n", rc, pszVmcs)); 3344 } 3345 else 3346 LogRelFunc(("Failed to clear the %s. rc=%Rrc\n", rc, pszVmcs)); 3347 3348 /* Sync any CPU internal VMCS data back into our VMCS in memory. */ 3349 if (RT_SUCCESS(rc)) 3350 { 3351 rc = hmR0VmxClearVmcs(pVmcsInfo); 3352 if (RT_SUCCESS(rc)) 3353 { /* likely */ } 3354 else 3355 LogRelFunc(("Failed to clear the %s post setup. rc=%Rrc\n", rc, pszVmcs)); 3356 } 3357 3358 /* 3359 * Update the last-error record both for failures and success, so we 3360 * can propagate the status code back to ring-3 for diagnostics. 3361 */ 3362 hmR0VmxUpdateErrorRecord(pVCpu, rc); 3363 NOREF(pszVmcs); 3364 return rc; 3365 } 3366 3367 3368 /** 3369 * Does global VT-x initialization (called during module initialization). 3370 * 3371 * @returns VBox status code. 3372 */ 3373 VMMR0DECL(int) VMXR0GlobalInit(void) 3374 { 3375 #ifdef HMVMX_USE_FUNCTION_TABLE 3376 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers)); 3377 # ifdef VBOX_STRICT 3378 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++) 3379 Assert(g_apfnVMExitHandlers[i]); 3380 # endif 3381 #endif 3382 return VINF_SUCCESS; 3383 } 3384 3385 3386 /** 3387 * Does global VT-x termination (called during module termination). 3388 */ 3389 VMMR0DECL(void) VMXR0GlobalTerm() 3390 { 3391 /* Nothing to do currently. */ 3392 } 3393 3394 3395 /** 3396 * Sets up and activates VT-x on the current CPU. 3397 * 3398 * @returns VBox status code. 3399 * @param pHostCpu The HM physical-CPU structure. 3400 * @param pVM The cross context VM structure. Can be 3401 * NULL after a host resume operation. 3402 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a 3403 * fEnabledByHost is @c true). 3404 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if 3405 * @a fEnabledByHost is @c true). 3406 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to 3407 * enable VT-x on the host. 3408 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs. 3409 */ 3410 VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 3411 PCSUPHWVIRTMSRS pHwvirtMsrs) 3412 { 3413 Assert(pHostCpu); 3414 Assert(pHwvirtMsrs); 3415 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 3416 3417 /* Enable VT-x if it's not already enabled by the host. */ 3418 if (!fEnabledByHost) 3419 { 3420 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage); 3421 if (RT_FAILURE(rc)) 3422 return rc; 3423 } 3424 3425 /* 3426 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been 3427 * using EPTPs) so we don't retain any stale guest-physical mappings which won't get 3428 * invalidated when flushing by VPID. 3429 */ 3430 if (pHwvirtMsrs->u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS) 3431 { 3432 hmR0VmxFlushEpt(NULL /* pVCpu */, NULL /* pVmcsInfo */, VMXTLBFLUSHEPT_ALL_CONTEXTS); 3433 pHostCpu->fFlushAsidBeforeUse = false; 3434 } 3435 else 3436 pHostCpu->fFlushAsidBeforeUse = true; 3437 3438 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */ 3439 ++pHostCpu->cTlbFlushes; 3440 3441 return VINF_SUCCESS; 3442 } 3443 3444 3445 /** 3446 * Deactivates VT-x on the current CPU. 3447 * 3448 * @returns VBox status code. 3449 * @param pvCpuPage Pointer to the VMXON region. 3450 * @param HCPhysCpuPage Physical address of the VMXON region. 3451 * 3452 * @remarks This function should never be called when SUPR0EnableVTx() or 3453 * similar was used to enable VT-x on the host. 3454 */ 3455 VMMR0DECL(int) VMXR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 3456 { 3457 RT_NOREF2(pvCpuPage, HCPhysCpuPage); 3458 3459 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 3460 return hmR0VmxLeaveRootMode(); 2678 3461 } 2679 3462 … … 2692 3475 if (RT_FAILURE(rc)) 2693 3476 { 2694 LogRelFunc((" hmR0VmxStructsAlloc failed!rc=%Rrc\n", rc));3477 LogRelFunc(("Failed to allocated VMX structures. rc=%Rrc\n", rc)); 2695 3478 return rc; 2696 3479 } … … 2712 3495 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 2713 3496 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ) 2714 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE); 3497 { 3498 Assert(pVM->hm.s.vmx.pvScratch); 3499 ASMMemZero32(pVM->hm.s.vmx.pvScratch, X86_PAGE_4K_SIZE); 3500 } 2715 3501 #endif 2716 3502 hmR0VmxStructsFree(pVM); … … 2720 3506 2721 3507 /** 2722 * Sets up the VM for execution u nder VT-x.3508 * Sets up the VM for execution using hardware-assisted VMX. 2723 3509 * This function is only called once per-VM during initialization. 2724 3510 * … … 2734 3520 2735 3521 /* 2736 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be 2737 * allocated. We no longer support the highly unlikely case of UnrestrictedGuest without 2738 * pRealModeTSS, see hmR3InitFinalizeR0Intel(). 3522 * At least verify if VMX is enabled, since we can't check if we're in 3523 * VMX root mode or not without causing a #GP. 3524 */ 3525 RTCCUINTREG const uHostCR4 = ASMGetCR4(); 3526 if (RT_LIKELY(uHostCR4 & X86_CR4_VMXE)) 3527 { /* likely */ } 3528 else 3529 return VERR_VMX_NOT_IN_VMX_ROOT_MODE; 3530 3531 /* 3532 * Without unrestricted guest execution, pRealModeTSS and pNonPagingModeEPTPageTable *must* 3533 * always be allocated. We no longer support the highly unlikely case of unrestricted guest 3534 * without pRealModeTSS, see hmR3InitFinalizeR0Intel(). 2739 3535 */ 2740 3536 if ( !pVM->hm.s.vmx.fUnrestrictedGuest … … 2764 3560 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR) 2765 3561 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR)) 2766 {2767 3562 pVM->hm.s.vmx.fSupportsVmcsEfer = true; 2768 }2769 3563 #endif 2770 3564 2771 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */ 2772 RTCCUINTREG const uHostCR4 = ASMGetCR4(); 2773 if (RT_UNLIKELY(!(uHostCR4 & X86_CR4_VMXE))) 2774 return VERR_VMX_NOT_IN_VMX_ROOT_MODE; 2775 2776 for (VMCPUID i = 0; i < pVM->cCpus; i++) 2777 { 2778 PVMCPU pVCpu = &pVM->aCpus[i]; 2779 AssertPtr(pVCpu); 2780 AssertPtr(pVCpu->hm.s.vmx.pvVmcs); 2781 2782 /* Log the VCPU pointers, useful for debugging SMP VMs. */ 3565 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 3566 { 3567 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 2783 3568 Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu)); 2784 3569 2785 /* Set revision dword at the beginning of the VMCS structure. */ 2786 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID); 2787 2788 /* Set the VMCS launch state to "clear", see Intel spec. 31.6 "Preparation and launch a virtual machine". */ 2789 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 2790 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc\n", rc), 2791 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2792 2793 /* Load this VMCS as the current VMCS. */ 2794 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 2795 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc\n", rc), 2796 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2797 2798 rc = hmR0VmxSetupPinCtls(pVCpu); 2799 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc\n", rc), 2800 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2801 2802 rc = hmR0VmxSetupProcCtls(pVCpu); 2803 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc\n", rc), 2804 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2805 2806 rc = hmR0VmxSetupMiscCtls(pVCpu); 2807 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc\n", rc), 2808 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2809 2810 rc = hmR0VmxInitXcptBitmap(pVCpu); 2811 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc\n", rc), 2812 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2813 3570 rc = hmR0VmxSetupVmcs(pVCpu, &pVCpu->hm.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */); 3571 if (RT_SUCCESS(rc)) 3572 { 2814 3573 #if HC_ARCH_BITS == 32 2815 rc = hmR0VmxInitVmcsReadCache(pVCpu); 2816 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc\n", rc), 2817 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 3574 hmR0VmxInitVmcsReadCache(pVCpu); 2818 3575 #endif 2819 2820 /* Sync any CPU internal VMCS data back into our VMCS in memory. */ 2821 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 2822 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc\n", rc), 2823 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2824 2825 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR; 2826 2827 hmR0VmxUpdateErrorRecord(pVCpu, rc); 3576 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 3577 if (pVM->cpum.ro.GuestFeatures.fVmx) 3578 { 3579 rc = hmR0VmxSetupVmcs(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */); 3580 if (RT_SUCCESS(rc)) 3581 { /* likely */ } 3582 else 3583 { 3584 LogRelFunc(("Nested-guest VMCS setup failed. rc=%Rrc\n", rc)); 3585 return rc; 3586 } 3587 } 3588 #endif 3589 } 3590 else 3591 { 3592 LogRelFunc(("VMCS setup failed. rc=%Rrc\n", rc)); 3593 return rc; 3594 } 2828 3595 } 2829 3596 2830 3597 return VINF_SUCCESS; 2831 3598 } 3599 3600 3601 #if HC_ARCH_BITS == 32 3602 # ifdef VBOX_ENABLE_64_BITS_GUESTS 3603 /** 3604 * Check if guest state allows safe use of 32-bit switcher again. 3605 * 3606 * Segment bases and protected mode structures must be 32-bit addressable 3607 * because the 32-bit switcher will ignore high dword when writing these VMCS 3608 * fields. See @bugref{8432} for details. 3609 * 3610 * @returns true if safe, false if must continue to use the 64-bit switcher. 3611 * @param pCtx Pointer to the guest-CPU context. 3612 * 3613 * @remarks No-long-jump zone!!! 3614 */ 3615 static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pCtx) 3616 { 3617 if (pCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false; 3618 if (pCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false; 3619 if (pCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false; 3620 if (pCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false; 3621 if (pCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false; 3622 if (pCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false; 3623 if (pCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false; 3624 if (pCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false; 3625 if (pCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false; 3626 if (pCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false; 3627 3628 /* All good, bases are 32-bit. */ 3629 return true; 3630 } 3631 # endif 3632 3633 /** 3634 * Executes the specified handler in 64-bit mode. 3635 * 3636 * @returns VBox status code (no informational status codes). 3637 * @param pVCpu The cross context virtual CPU structure. 3638 * @param enmOp The operation to perform. 3639 * @param cParams Number of parameters. 3640 * @param paParam Array of 32-bit parameters. 3641 */ 3642 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam) 3643 { 3644 PVM pVM = pVCpu->CTX_SUFF(pVM); 3645 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); 3646 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END); 3647 Assert(pVCpu->hm.s.vmx.VmcsCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsCache.Write.aField)); 3648 Assert(pVCpu->hm.s.vmx.VmcsCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsCache.Read.aField)); 3649 3650 #ifdef VBOX_STRICT 3651 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VmcsCache.Write.cValidEntries; i++) 3652 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VmcsCache.Write.aField[i])); 3653 3654 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VmcsCache.Read.cValidEntries; i++) 3655 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VmcsCache.Read.aField[i])); 3656 #endif 3657 3658 /* Disable interrupts. */ 3659 RTCCUINTREG fOldEFlags = ASMIntDisableFlags(); 3660 3661 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 3662 RTCPUID idHostCpu = RTMpCpuId(); 3663 CPUMR0SetLApic(pVCpu, idHostCpu); 3664 #endif 3665 3666 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */ 3667 3668 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 3669 RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj; 3670 3671 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */ 3672 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 3673 hmR0VmxClearVmcs(pVmcsInfo); 3674 3675 /* Leave VMX root mode and disable VMX. */ 3676 VMXDisable(); 3677 SUPR0ChangeCR4(0, ~X86_CR4_VMXE); 3678 3679 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu)); 3680 CPUMSetHyperEIP(pVCpu, enmOp); 3681 for (int i = (int)cParams - 1; i >= 0; i--) 3682 CPUMPushHyper(pVCpu, paParam[i]); 3683 3684 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z); 3685 3686 /* Call the switcher. */ 3687 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_UOFFSETOF_DYN(VM, aCpus[pVCpu->idCpu].cpum) - RT_UOFFSETOF(VM, cpum)); 3688 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z); 3689 3690 /* Re-enable VMX to make sure the VMX instructions don't cause #UD faults. */ 3691 SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX); 3692 3693 /* Re-enter VMX root mode. */ 3694 int rc2 = VMXEnable(HCPhysCpuPage); 3695 if (RT_FAILURE(rc2)) 3696 { 3697 SUPR0ChangeCR4(0, ~X86_CR4_VMXE); 3698 ASMSetFlags(fOldEFlags); 3699 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage; 3700 return rc2; 3701 } 3702 3703 /* Restore the VMCS as the current VMCS. */ 3704 rc2 = hmR0VmxLoadVmcs(pVmcsInfo); 3705 AssertRC(rc2); 3706 Assert(!(ASMGetFlags() & X86_EFL_IF)); 3707 ASMSetFlags(fOldEFlags); 3708 return rc; 3709 } 3710 3711 3712 /** 3713 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts 3714 * supporting 64-bit guests. 3715 * 3716 * @returns VBox status code. 3717 * @param fResume Whether to VMLAUNCH or VMRESUME. 3718 * @param pCtx Pointer to the guest-CPU context. 3719 * @param pCache Pointer to the VMCS batch cache. 3720 * @param pVM The cross context VM structure. 3721 * @param pVCpu The cross context virtual CPU structure. 3722 */ 3723 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu) 3724 { 3725 NOREF(fResume); 3726 3727 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 3728 RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj; 3729 3730 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 3731 pCache->uPos = 1; 3732 pCache->interPD = PGMGetInterPaeCR3(pVM); 3733 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0; 3734 #endif 3735 3736 #if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES) 3737 pCache->TestIn.HCPhysCpuPage = 0; 3738 pCache->TestIn.HCPhysVmcs = 0; 3739 pCache->TestIn.pCache = 0; 3740 pCache->TestOut.HCPhysVmcs = 0; 3741 pCache->TestOut.pCache = 0; 3742 pCache->TestOut.pCtx = 0; 3743 pCache->TestOut.eflags = 0; 3744 #else 3745 NOREF(pCache); 3746 #endif 3747 3748 uint32_t aParam[10]; 3749 aParam[0] = RT_LO_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */ 3750 aParam[1] = RT_HI_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Hi. */ 3751 aParam[2] = RT_LO_U32(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */ 3752 aParam[3] = RT_HI_U32(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Hi. */ 3753 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache); 3754 aParam[5] = 0; 3755 aParam[6] = VM_RC_ADDR(pVM, pVM); 3756 aParam[7] = 0; 3757 aParam[8] = VM_RC_ADDR(pVM, pVCpu); 3758 aParam[9] = 0; 3759 3760 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 3761 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8; 3762 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1; 3763 #endif 3764 int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]); 3765 3766 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 3767 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5); 3768 Assert(pCtx->dr[4] == 10); 3769 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff; 3770 #endif 3771 3772 #if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES) 3773 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage)); 3774 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs, 3775 pVCpu->hm.s.vmx.HCPhysVmcs)); 3776 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs, 3777 pCache->TestOut.HCPhysVmcs)); 3778 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache, 3779 pCache->TestOut.pCache)); 3780 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache), 3781 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache))); 3782 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx, 3783 pCache->TestOut.pCtx)); 3784 Assert(!(pCache->TestOut.eflags & X86_EFL_IF)); 3785 #endif 3786 NOREF(pCtx); 3787 return rc; 3788 } 3789 #endif 2832 3790 2833 3791 … … 2887 3845 2888 3846 /* 2889 * If we've executed guest code using VT-x, the host-state bits will be messed up. We2890 * should -not- save the messed up state without restoring the original host-state,2891 * see @bugref{7240}.3847 * If we've executed guest code using hardware-assisted VMX, the host-state bits 3848 * will be messed up. We should -not- save the messed up state without restoring 3849 * the original host-state, see @bugref{7240}. 2892 3850 * 2893 3851 * This apparently can happen (most likely the FPU changes), deal with it rather than … … 2956 3914 Assert(uSelCS); 2957 3915 Assert(uSelTR); 2958 2959 /* Assertion is right but we would not have updated u32ExitCtls yet. */2960 #if 02961 if (!(pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE))2962 Assert(uSelSS != 0);2963 #endif2964 3916 2965 3917 /* Write these host selector fields into the host-state area in the VMCS. */ … … 3032 3984 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK)); 3033 3985 #if HC_ARCH_BITS == 64 3034 uintptr_t uTRBase = X86DESC64_BASE(pDesc);3986 uintptr_t const uTRBase = X86DESC64_BASE(pDesc); 3035 3987 3036 3988 /* … … 3073 4025 } 3074 4026 #else 3075 uintptr_t uTRBase = X86DESC_BASE(pDesc);4027 uintptr_t const uTRBase = X86DESC_BASE(pDesc); 3076 4028 #endif 3077 4029 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase); … … 3082 4034 */ 3083 4035 #if HC_ARCH_BITS == 64 3084 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);3085 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);4036 uint64_t const u64FSBase = ASMRdMsr(MSR_K8_FS_BASE); 4037 uint64_t const u64GSBase = ASMRdMsr(MSR_K8_GS_BASE); 3086 4038 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); 3087 4039 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); … … 3102 4054 * host-state area of the VMCS. 3103 4055 * 3104 * These sMSRs will be automatically restored on the host after every successful4056 * These MSRs will be automatically restored on the host after every successful 3105 4057 * VM-exit. 3106 4058 * … … 3113 4065 { 3114 4066 AssertPtr(pVCpu); 3115 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);3116 4067 3117 4068 /* … … 3126 4077 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 3127 4078 #if HC_ARCH_BITS == 32 3128 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP,ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));3129 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP,ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));4079 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 4080 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 3130 4081 #else 3131 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP,ASMRdMsr(MSR_IA32_SYSENTER_ESP));3132 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP,ASMRdMsr(MSR_IA32_SYSENTER_EIP));4082 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 4083 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 3133 4084 #endif 3134 4085 AssertRCReturn(rc, rc); … … 3143 4094 if (pVM->hm.s.vmx.fSupportsVmcsEfer) 3144 4095 { 3145 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, pVM->hm.s.vmx.u64Host Efer);4096 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, pVM->hm.s.vmx.u64HostMsrEfer); 3146 4097 AssertRCReturn(rc, rc); 3147 4098 } 3148 4099 3149 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see hmR0VmxExportGuestExitCtls(). */ 4100 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see 4101 * hmR0VmxExportGuestEntryExitCtls(). */ 3150 4102 3151 4103 return VINF_SUCCESS; … … 3157 4109 * 3158 4110 * We check all relevant bits. For now, that's everything besides LMA/LME, as 3159 * these two bits are handled by VM-entry, see hmR0VmxExportGuestExitCtls() and 3160 * hmR0VMxExportGuestEntryCtls(). 4111 * these two bits are handled by VM-entry, see hmR0VMxExportGuestEntryExitCtls(). 3161 4112 * 3162 4113 * @returns true if we need to load guest EFER, false otherwise. … … 3172 4123 return true; 3173 4124 #else 3174 3175 4125 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 3176 4126 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 3177 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */4127 /* For 32-bit hosts running 64-bit guests, we always swap EFER MSR in the world-switcher. Nothing to do here. */ 3178 4128 if (CPUMIsGuestInLongModeEx(pCtx)) 3179 4129 return false; … … 3181 4131 3182 4132 PVM pVM = pVCpu->CTX_SUFF(pVM); 3183 uint64_t const u64HostEfer = pVM->hm.s.vmx.u64Host Efer;4133 uint64_t const u64HostEfer = pVM->hm.s.vmx.u64HostMsrEfer; 3184 4134 uint64_t const u64GuestEfer = pCtx->msrEFER; 3185 4135 3186 4136 /* 3187 * For 64-bit guests, if EFER.SCE bit differs, we need to swap EFER to ensure that the3188 * guest's SYSCALL behaviour isn't broken, see @bugref{7386}.4137 * For 64-bit guests, if EFER.SCE bit differs, we need to swap the EFER MSR 4138 * to ensure that the guest's SYSCALL behaviour isn't broken, see @bugref{7386}. 3189 4139 */ 3190 4140 if ( CPUMIsGuestInLongModeEx(pCtx) 3191 4141 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE)) 3192 {3193 4142 return true; 3194 } 3195 3196 /* 3197 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it 3198 * affects guest paging. 64-bit paging implies CR4.PAE as well. 3199 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes". 3200 */ 4143 4144 /* 4145 * If the guest uses PAE and EFER.NXE bit differs, we need to swap the EFER MSR 4146 * as it affects guest paging. 64-bit paging implies CR4.PAE as well. 4147 * 4148 * See Intel spec. 4.5 "IA-32e Paging". 4149 * See Intel spec. 4.1.1 "Three Paging Modes". 4150 * 4151 * Verify that we always intercept CR4.PAE and CR0.PG bits, so we don't need to 4152 * import CR4 and CR0 from the VMCS here as those bits are always up to date. 4153 */ 4154 Assert(hmR0VmxGetFixedCr4Mask(pVCpu) & X86_CR4_PAE); 4155 Assert(hmR0VmxGetFixedCr0Mask(pVCpu) & X86_CR0_PG); 3201 4156 if ( (pCtx->cr4 & X86_CR4_PAE) 3202 4157 && (pCtx->cr0 & X86_CR0_PG) … … 3212 4167 } 3213 4168 3214 3215 /** 3216 * Exports the guest state with appropriate VM-entry controls in the VMCS. 3217 * 3218 * These controls can affect things done on VM-exit; e.g. "load debug controls", 3219 * see Intel spec. 24.8.1 "VM-entry controls". 4169 /** 4170 * Exports the guest state with appropriate VM-entry and VM-exit controls in the 4171 * VMCS. 4172 * 4173 * This is typically required when the guest changes paging mode. 3220 4174 * 3221 4175 * @returns VBox status code. 3222 * @param pVCpu The cross context virtual CPU structure. 4176 * @param pVCpu The cross context virtual CPU structure. 4177 * @param pVmxTransient The VMX-transient structure. 3223 4178 * 3224 4179 * @remarks Requires EFER. 3225 4180 * @remarks No-long-jump zone!!! 3226 4181 */ 3227 static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu) 3228 { 3229 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS) 3230 { 3231 PVM pVM = pVCpu->CTX_SUFF(pVM); 3232 uint32_t fVal = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */ 3233 uint32_t const fZap = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3234 3235 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */ 3236 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG; 3237 3238 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */ 3239 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)) 3240 { 3241 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST; 3242 Log4Func(("VMX_ENTRY_CTLS_IA32E_MODE_GUEST\n")); 3243 } 3244 else 3245 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)); 3246 3247 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */ 3248 if ( pVM->hm.s.vmx.fSupportsVmcsEfer 3249 && hmR0VmxShouldSwapEferMsr(pVCpu)) 3250 { 3251 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR; 3252 Log4Func(("VMX_ENTRY_CTLS_LOAD_EFER_MSR\n")); 3253 } 4182 static int hmR0VmxExportGuestEntryExitCtls(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 4183 { 4184 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS) 4185 { 4186 PVM pVM = pVCpu->CTX_SUFF(pVM); 4187 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 3254 4188 3255 4189 /* 3256 * The following should -not- be set (since we're not in SMM mode): 3257 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM 3258 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON 4190 * VM-entry controls. 3259 4191 */ 3260 3261 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR, 3262 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */ 3263 3264 if ((fVal & fZap) != fVal) 3265 { 3266 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n", 3267 pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0, fVal, fZap)); 3268 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY; 3269 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3270 } 3271 3272 /* Commit it to the VMCS and update our cache. */ 3273 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls != fVal) 3274 { 3275 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal); 3276 AssertRCReturn(rc, rc); 3277 pVCpu->hm.s.vmx.Ctls.u32EntryCtls = fVal; 3278 } 3279 3280 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_CTLS); 3281 } 3282 return VINF_SUCCESS; 3283 } 3284 3285 3286 /** 3287 * Exports the guest state with appropriate VM-exit controls in the VMCS. 3288 * 3289 * @returns VBox status code. 3290 * @param pVCpu The cross context virtual CPU structure. 3291 * 3292 * @remarks Requires EFER. 3293 */ 3294 static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu) 3295 { 3296 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS) 3297 { 3298 PVM pVM = pVCpu->CTX_SUFF(pVM); 3299 uint32_t fVal = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */ 3300 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3301 3302 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */ 3303 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG; 4192 { 4193 uint32_t fVal = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */ 4194 uint32_t const fZap = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 4195 4196 /* 4197 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry. 4198 * The first VT-x capable CPUs only supported the 1-setting of this bit. 4199 * 4200 * For nested-guests, this is a mandatory VM-entry control. It's also 4201 * required because we do not want to leak host bits to the nested-guest. 4202 */ 4203 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG; 4204 4205 /* 4206 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. 4207 * 4208 * For nested-guests, the "IA-32e mode guest" control we initialize with what is 4209 * required to get the nested-guest working with hardware-assisted VMX execution. 4210 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested-hypervisor 4211 * can skip intercepting changes to the EFER MSR. This is why it it needs to be done 4212 * here rather than while merging the guest VMCS controls. 4213 */ 4214 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)) 4215 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST; 4216 else 4217 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)); 4218 4219 /* 4220 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. 4221 * 4222 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it, 4223 * regardless of whether the nested-guest VMCS specifies it because we are free to 4224 * load whatever MSRs we require and we do not need to modify the guest visible copy 4225 * of the VM-entry MSR load area. 4226 */ 4227 if ( pVM->hm.s.vmx.fSupportsVmcsEfer 4228 && hmR0VmxShouldSwapEferMsr(pVCpu)) 4229 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR; 4230 else 4231 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR)); 4232 4233 /* 4234 * The following should -not- be set (since we're not in SMM mode): 4235 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM 4236 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON 4237 */ 4238 4239 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR, 4240 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */ 4241 4242 if ((fVal & fZap) == fVal) 4243 { /* likely */ } 4244 else 4245 { 4246 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n", 4247 pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0, fVal, fZap)); 4248 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY; 4249 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 4250 } 4251 4252 /* Commit it to the VMCS. */ 4253 if (pVmcsInfo->u32EntryCtls != fVal) 4254 { 4255 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal); 4256 AssertRCReturn(rc, rc); 4257 pVmcsInfo->u32EntryCtls = fVal; 4258 } 4259 } 3304 4260 3305 4261 /* 3306 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. 3307 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in 3308 * hmR0VmxExportHostMsrs(). 4262 * VM-exit controls. 3309 4263 */ 4264 { 4265 uint32_t fVal = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */ 4266 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 4267 4268 /* 4269 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only 4270 * supported the 1-setting of this bit. 4271 * 4272 * For nested-guests, we set the "save debug controls" as the converse 4273 * "load debug controls" is mandatory for nested-guests anyway. 4274 */ 4275 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG; 4276 4277 /* 4278 * Set the host long mode active (EFER.LMA) bit (which Intel calls 4279 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the 4280 * host EFER.LMA and EFER.LME bit to this value. See assertion in 4281 * hmR0VmxExportHostMsrs(). 4282 * 4283 * For nested-guests, we always set this bit as we do not support 32-bit 4284 * hosts. 4285 */ 3310 4286 #if HC_ARCH_BITS == 64 3311 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE; 3312 Log4Func(("VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE\n")); 4287 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE; 3313 4288 #else 3314 Assert( pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM643315 || pVCpu->hm.s.vmx.pfnStartVM == VMXR0StartVM32);3316 /* Set the host address-space size based on the switcher, not guest state. See @bugref{8432}. */3317 if (pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64)3318 {3319 /* The switcher returns to long mode, EFER is managed by the switcher. */3320 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;3321 Log4Func(("VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE\n"));3322 }3323 else3324 Assert(!(fVal & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE));4289 Assert(!pVmxTransient->fIsNestedGuest); 4290 Assert( pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64 4291 || pVmcsInfo->pfnStartVM == VMXR0StartVM32); 4292 /* Set the host address-space size based on the switcher, not guest state. See @bugref{8432}. */ 4293 if (pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64) 4294 { 4295 /* The switcher returns to long mode, the EFER MSR is managed by the switcher. */ 4296 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE; 4297 } 4298 else 4299 Assert(!(fVal & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE)); 3325 4300 #endif 3326 4301 3327 /* If the newer VMCS fields for managing EFER exists, use it. */ 3328 if ( pVM->hm.s.vmx.fSupportsVmcsEfer 3329 && hmR0VmxShouldSwapEferMsr(pVCpu)) 3330 { 3331 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR 3332 | VMX_EXIT_CTLS_LOAD_EFER_MSR; 3333 Log4Func(("VMX_EXIT_CTLS_SAVE_EFER_MSR and VMX_EXIT_CTLS_LOAD_EFER_MSR\n")); 3334 } 3335 3336 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */ 3337 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT)); 3338 3339 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR, 3340 * VMX_EXIT_CTLS_SAVE_PAT_MSR, 3341 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */ 3342 3343 /* Enable saving of the VMX preemption timer value on VM-exit. */ 3344 if ( pVM->hm.s.vmx.fUsePreemptTimer 3345 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)) 3346 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER; 3347 3348 if ((fVal & fZap) != fVal) 3349 { 3350 LogRelFunc(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n", 3351 pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0, fVal, fZap)); 3352 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT; 3353 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3354 } 3355 3356 /* Commit it to the VMCS and update our cache. */ 3357 if (pVCpu->hm.s.vmx.Ctls.u32ExitCtls != fVal) 3358 { 3359 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal); 3360 AssertRCReturn(rc, rc); 3361 pVCpu->hm.s.vmx.Ctls.u32ExitCtls = fVal; 3362 } 3363 3364 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_EXIT_CTLS); 4302 /* 4303 * If the VMCS EFER MSR fields are supported by the hardware, we use it. 4304 * 4305 * For nested-guests, we should use the "save IA32_EFER" control if we also 4306 * used the "load IA32_EFER" control while exporting VM-entry controls. 4307 */ 4308 if ( pVM->hm.s.vmx.fSupportsVmcsEfer 4309 && hmR0VmxShouldSwapEferMsr(pVCpu)) 4310 { 4311 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR 4312 | VMX_EXIT_CTLS_LOAD_EFER_MSR; 4313 } 4314 4315 /* 4316 * Enable saving of the VMX-preemption timer value on VM-exit. 4317 * For nested-guests, currently not exposed/used. 4318 */ 4319 if ( pVM->hm.s.vmx.fUsePreemptTimer 4320 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)) 4321 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER; 4322 4323 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */ 4324 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT)); 4325 4326 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR, 4327 * VMX_EXIT_CTLS_SAVE_PAT_MSR, 4328 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */ 4329 4330 if ((fVal & fZap) == fVal) 4331 { /* likely */ } 4332 else 4333 { 4334 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n", 4335 pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0, fVal, fZap)); 4336 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT; 4337 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 4338 } 4339 4340 /* Commit it to the VMCS. */ 4341 if (pVmcsInfo->u32ExitCtls != fVal) 4342 { 4343 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal); 4344 AssertRCReturn(rc, rc); 4345 pVmcsInfo->u32ExitCtls = fVal; 4346 } 4347 } 4348 4349 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS); 3365 4350 } 3366 4351 return VINF_SUCCESS; … … 3373 4358 * @returns VBox status code. 3374 4359 * @param pVCpu The cross context virtual CPU structure. 4360 * @param pVmcsInfo The VMCS info. object. 3375 4361 * @param u32TprThreshold The TPR threshold (task-priority class only). 3376 4362 */ 3377 DECLINLINE(int) hmR0VmxApicSetTprThreshold(PVMCPU pVCpu, uint32_t u32TprThreshold)4363 DECLINLINE(int) hmR0VmxApicSetTprThreshold(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold) 3378 4364 { 3379 4365 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */ 3380 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); RT_NOREF_PV(pVCpu); 4366 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 4367 RT_NOREF2(pVCpu, pVmcsInfo); 3381 4368 return VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold); 3382 4369 } … … 3387 4374 * 3388 4375 * @returns VBox status code. 3389 * @param pVCpu The cross context virtual CPU structure. 4376 * @param pVCpu The cross context virtual CPU structure. 4377 * @param pVmxTransient The VMX-transient structure. 3390 4378 * 3391 4379 * @remarks No-long-jump zone!!! 3392 4380 */ 3393 static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu )4381 static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 3394 4382 { 3395 4383 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR) … … 3397 4385 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR); 3398 4386 3399 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM)) 3400 && APICIsEnabled(pVCpu)) 3401 { 3402 /* 3403 * Setup TPR shadowing. 3404 */ 3405 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 4387 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 4388 if (!pVmxTransient->fIsNestedGuest) 4389 { 4390 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM)) 4391 && APICIsEnabled(pVCpu)) 3406 4392 { 3407 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);3408 3409 bool fPendingIntr = false;3410 uint8_t u8Tpr = 0;3411 uint8_t u8PendingIntr = 0;3412 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);3413 AssertRCReturn(rc, rc);3414 3415 4393 /* 3416 * If there are interrupts pending but masked by the TPR, instruct VT-x to 3417 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the 3418 * priority of the pending interrupt so we can deliver the interrupt. If there 3419 * are no interrupts pending, set threshold to 0 to not cause any 3420 * TPR-below-threshold VM-exits. 4394 * Setup TPR shadowing. 3421 4395 */ 3422 pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR] = u8Tpr; 3423 uint32_t u32TprThreshold = 0; 3424 if (fPendingIntr) 4396 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 3425 4397 { 3426 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */ 3427 const uint8_t u8PendingPriority = u8PendingIntr >> 4; 3428 const uint8_t u8TprPriority = u8Tpr >> 4; 3429 if (u8PendingPriority <= u8TprPriority) 3430 u32TprThreshold = u8PendingPriority; 4398 bool fPendingIntr = false; 4399 uint8_t u8Tpr = 0; 4400 uint8_t u8PendingIntr = 0; 4401 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr); 4402 AssertRCReturn(rc, rc); 4403 4404 /* 4405 * If there are interrupts pending but masked by the TPR, instruct VT-x to 4406 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the 4407 * priority of the pending interrupt so we can deliver the interrupt. If there 4408 * are no interrupts pending, set threshold to 0 to not cause any 4409 * TPR-below-threshold VM-exits. 4410 */ 4411 Assert(pVmcsInfo->pbVirtApic); 4412 pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR] = u8Tpr; 4413 uint32_t u32TprThreshold = 0; 4414 if (fPendingIntr) 4415 { 4416 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR 4417 (which is the Task-Priority Class). */ 4418 const uint8_t u8PendingPriority = u8PendingIntr >> 4; 4419 const uint8_t u8TprPriority = u8Tpr >> 4; 4420 if (u8PendingPriority <= u8TprPriority) 4421 u32TprThreshold = u8PendingPriority; 4422 } 4423 4424 rc = hmR0VmxApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold); 4425 AssertRCReturn(rc, rc); 3431 4426 } 3432 3433 rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold);3434 AssertRCReturn(rc, rc);3435 4427 } 3436 4428 } 4429 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */ 3437 4430 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR); 3438 4431 } … … 3442 4435 3443 4436 /** 3444 * Gets the guest 's interruptibility-state ("interrupt shadow" as AMD calls it).4437 * Gets the guest interruptibility-state. 3445 4438 * 3446 4439 * @returns Guest's interruptibility-state. 3447 4440 * @param pVCpu The cross context virtual CPU structure. 4441 * @param pVmcsInfo The VMCS info. object. 3448 4442 * 3449 4443 * @remarks No-long-jump zone!!! 3450 4444 */ 3451 static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu )4445 static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 3452 4446 { 3453 4447 /* … … 3457 4451 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3458 4452 { 3459 /* If inhibition is active, RIP & RFLAGS should've been accessed4453 /* If inhibition is active, RIP and RFLAGS should've been updated 3460 4454 (i.e. read previously from the VMCS or from ring-3). */ 3461 4455 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 3462 4456 #ifdef VBOX_STRICT 3463 4457 uint64_t const fExtrn = ASMAtomicUoReadU64(&pCtx->fExtrn); 4458 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 3464 4459 AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn)); 3465 4460 #endif … … 3487 4482 * setting this would block host-NMIs and IRET will not clear the blocking. 3488 4483 * 4484 * We always set NMI-exiting so when the host receives an NMI we get a VM-exit. 4485 * 3489 4486 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}. 3490 4487 */ 3491 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) 3492 && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 3493 { 4488 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 4489 && CPUMIsGuestNmiBlocking(pVCpu)) 3494 4490 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI; 3495 }3496 4491 3497 4492 return fIntrState; … … 3503 4498 * 3504 4499 * @returns VBox status code. 3505 * @param pVCpu The cross context virtual CPU structure. 4500 * @param pVCpu The cross context virtual CPU structure. 4501 * @param pVmxTransient The VMX-transient structure. 3506 4502 * 3507 4503 * @remarks No-long-jump zone!!! 3508 4504 */ 3509 static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu )4505 static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 3510 4506 { 3511 4507 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS) 3512 4508 { 3513 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap; 3514 3515 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxExportGuestCR0(). */ 3516 if (pVCpu->hm.s.fGIMTrapXcptUD) 3517 uXcptBitmap |= RT_BIT(X86_XCPT_UD); 3518 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 4509 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */ 4510 if ( !pVmxTransient->fIsNestedGuest 4511 && pVCpu->hm.s.fGIMTrapXcptUD) 4512 hmR0VmxAddXcptIntercept(pVmxTransient, X86_XCPT_UD); 3519 4513 else 3520 uXcptBitmap &= ~RT_BIT(X86_XCPT_UD); 3521 #endif 3522 3523 Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_AC)); 3524 Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_DB)); 3525 3526 if (uXcptBitmap != pVCpu->hm.s.vmx.Ctls.u32XcptBitmap) 3527 { 3528 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap); 3529 AssertRCReturn(rc, rc); 3530 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap; 3531 } 3532 4514 hmR0VmxRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD); 4515 4516 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */ 3533 4517 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS); 3534 Log4Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64\n", uXcptBitmap));3535 4518 } 3536 4519 return VINF_SUCCESS; … … 3542 4525 * 3543 4526 * @returns VBox status code. 3544 * @param pVCpu The cross context virtual CPU structure.4527 * @param pVCpu The cross context virtual CPU structure. 3545 4528 * 3546 4529 * @remarks No-long-jump zone!!! … … 3548 4531 static int hmR0VmxExportGuestRip(PVMCPU pVCpu) 3549 4532 { 3550 int rc = VINF_SUCCESS;3551 4533 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP) 3552 4534 { 3553 4535 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP); 3554 4536 3555 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);4537 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip); 3556 4538 AssertRCReturn(rc, rc); 3557 4539 3558 4540 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP); 3559 Log4Func((" RIP=%#RX64\n", pVCpu->cpum.GstCtx.rip));3560 } 3561 return rc;4541 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip)); 4542 } 4543 return VINF_SUCCESS; 3562 4544 } 3563 4545 … … 3567 4549 * 3568 4550 * @returns VBox status code. 3569 * @param pVCpu The cross context virtual CPU structure.4551 * @param pVCpu The cross context virtual CPU structure. 3570 4552 * 3571 4553 * @remarks No-long-jump zone!!! … … 3590 4572 * 3591 4573 * @returns VBox status code. 3592 * @param pVCpu The cross context virtual CPU structure. 4574 * @param pVCpu The cross context virtual CPU structure. 4575 * @param pVmxTransient The VMX-transient structure. 3593 4576 * 3594 4577 * @remarks No-long-jump zone!!! 3595 4578 */ 3596 static int hmR0VmxExportGuestRflags(PVMCPU pVCpu )4579 static int hmR0VmxExportGuestRflags(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 3597 4580 { 3598 4581 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS) … … 3612 4595 * can run the real-mode guest code under Virtual 8086 mode. 3613 4596 */ 3614 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4597 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 4598 if (pVmcsInfo->RealMode.fRealOnV86Active) 3615 4599 { 3616 4600 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 3617 4601 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM))); 3618 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */ 3619 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */ 3620 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */ 4602 Assert(!pVmxTransient->fIsNestedGuest); 4603 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */ 4604 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */ 4605 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */ 3621 4606 } 3622 4607 … … 3630 4615 * through the hypervisor debugger using EFLAGS.TF. 3631 4616 */ 3632 if ( !pVCpu->hm.s.fSingleInstruction 4617 if ( !pVmxTransient->fIsNestedGuest 4618 && !pVCpu->hm.s.fSingleInstruction 3633 4619 && fEFlags.Bits.u1TF) 3634 4620 { 3635 /** @todo r=ramshankar: Warning! We ASSUME EFLAGS.TF will not cleared on4621 /** @todo r=ramshankar: Warning!! We ASSUME EFLAGS.TF will not cleared on 3636 4622 * premature trips to ring-3 esp since IEM does not yet handle it. */ 3637 4623 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS); 3638 4624 AssertRCReturn(rc, rc); 3639 4625 } 4626 /** @todo NSTVMX: Handling copying of VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS from 4627 * nested-guest VMCS. */ 3640 4628 3641 4629 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS); … … 3653 4641 * 3654 4642 * @returns VBox status code. 3655 * @param pVCpu The cross context virtual CPU structure. 4643 * @param pVCpu The cross context virtual CPU structure. 4644 * @param pVmxTransient The VMX-transient structure. 3656 4645 * 3657 4646 * @remarks No-long-jump zone!!! 3658 4647 */ 3659 static int hmR0VmxExportGuestCR0(PVMCPU pVCpu )4648 static int hmR0VmxExportGuestCR0(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 3660 4649 { 3661 4650 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0) 3662 4651 { 3663 PVM pVM = pVCpu->CTX_SUFF(pVM); 3664 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 3665 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.cr0)); 3666 3667 uint32_t const u32ShadowCr0 = pVCpu->cpum.GstCtx.cr0; 3668 uint32_t u32GuestCr0 = pVCpu->cpum.GstCtx.cr0; 4652 PVM pVM = pVCpu->CTX_SUFF(pVM); 4653 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 3669 4654 3670 4655 /* 3671 * Setup VT-x's view of the guest CR0. 3672 * Minimize VM-exits due to CR3 changes when we have NestedPaging. 4656 * Figure out fixed CR0 bits in VMX operation. 3673 4657 */ 3674 uint32_t uProcCtls = pVCpu->hm.s.vmx.Ctls.u32ProcCtls; 3675 if (pVM->hm.s.fNestedPaging) 3676 { 3677 if (CPUMIsGuestPagingEnabled(pVCpu)) 4658 uint64_t fSetCr0 = pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1; 4659 uint64_t const fZapCr0 = pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1; 4660 if (pVM->hm.s.vmx.fUnrestrictedGuest) 4661 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG); 4662 else 4663 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)); 4664 4665 if (!pVmxTransient->fIsNestedGuest) 4666 { 4667 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 4668 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0; 4669 uint64_t const u64ShadowCr0 = u64GuestCr0; 4670 Assert(!RT_HI_U32(u64GuestCr0)); 4671 4672 /* 4673 * Setup VT-x's view of the guest CR0. 4674 */ 4675 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls; 4676 if (pVM->hm.s.fNestedPaging) 3678 4677 { 3679 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */ 3680 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT 3681 | VMX_PROC_CTLS_CR3_STORE_EXIT); 4678 if (CPUMIsGuestPagingEnabled(pVCpu)) 4679 { 4680 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */ 4681 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT 4682 | VMX_PROC_CTLS_CR3_STORE_EXIT); 4683 } 4684 else 4685 { 4686 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */ 4687 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT 4688 | VMX_PROC_CTLS_CR3_STORE_EXIT; 4689 } 4690 4691 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */ 4692 if (pVM->hm.s.vmx.fUnrestrictedGuest) 4693 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT; 3682 4694 } 3683 4695 else 3684 4696 { 3685 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */ 3686 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT 3687 | VMX_PROC_CTLS_CR3_STORE_EXIT; 4697 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */ 4698 u64GuestCr0 |= X86_CR0_WP; 3688 4699 } 3689 4700 3690 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */ 3691 if (pVM->hm.s.vmx.fUnrestrictedGuest) 3692 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT; 4701 /* 4702 * Guest FPU bits. 4703 * 4704 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state 4705 * using CR0.TS. 4706 * 4707 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be 4708 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks. 4709 */ 4710 u64GuestCr0 |= X86_CR0_NE; 4711 4712 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */ 4713 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE); 4714 4715 /* 4716 * Update exception intercepts. 4717 */ 4718 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap; 4719 if (pVmcsInfo->RealMode.fRealOnV86Active) 4720 { 4721 Assert(PDMVmmDevHeapIsEnabled(pVM)); 4722 Assert(pVM->hm.s.vmx.pRealModeTSS); 4723 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK; 4724 } 4725 else 4726 { 4727 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */ 4728 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK; 4729 if (fInterceptMF) 4730 uXcptBitmap |= RT_BIT(X86_XCPT_MF); 4731 } 4732 4733 /* Additional intercepts for debugging, define these yourself explicitly. */ 4734 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 4735 uXcptBitmap |= 0 4736 | RT_BIT(X86_XCPT_BP) 4737 | RT_BIT(X86_XCPT_DE) 4738 | RT_BIT(X86_XCPT_NM) 4739 | RT_BIT(X86_XCPT_TS) 4740 | RT_BIT(X86_XCPT_UD) 4741 | RT_BIT(X86_XCPT_NP) 4742 | RT_BIT(X86_XCPT_SS) 4743 | RT_BIT(X86_XCPT_GP) 4744 | RT_BIT(X86_XCPT_PF) 4745 | RT_BIT(X86_XCPT_MF) 4746 ; 4747 #elif defined(HMVMX_ALWAYS_TRAP_PF) 4748 uXcptBitmap |= RT_BIT(X86_XCPT_PF); 4749 #endif 4750 if (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv) 4751 uXcptBitmap |= RT_BIT(X86_XCPT_GP); 4752 Assert(pVM->hm.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF))); 4753 4754 /* Apply the fixed CR0 bits and enable caching. */ 4755 u64GuestCr0 |= fSetCr0; 4756 u64GuestCr0 &= fZapCr0; 4757 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW); 4758 4759 /* Commit the CR0 and related fields to the guest VMCS. */ 4760 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR0, u64GuestCr0); 4761 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); 4762 if (uProcCtls != pVmcsInfo->u32ProcCtls) 4763 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 4764 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap) 4765 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap); 4766 AssertRCReturn(rc, rc); 4767 4768 /* Update our caches. */ 4769 pVmcsInfo->u32ProcCtls = uProcCtls; 4770 pVmcsInfo->u32XcptBitmap = uXcptBitmap; 4771 4772 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0)); 3693 4773 } 3694 4774 else 3695 4775 { 3696 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */ 3697 u32GuestCr0 |= X86_CR0_WP; 3698 } 3699 3700 /* 3701 * Guest FPU bits. 3702 * 3703 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state 3704 * using CR0.TS. 3705 * 3706 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be 3707 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks. 3708 */ 3709 u32GuestCr0 |= X86_CR0_NE; 3710 3711 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */ 3712 bool const fInterceptMF = !(u32ShadowCr0 & X86_CR0_NE); 3713 3714 /* 3715 * Update exception intercepts. 3716 */ 3717 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap; 3718 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 3719 { 3720 Assert(PDMVmmDevHeapIsEnabled(pVM)); 3721 Assert(pVM->hm.s.vmx.pRealModeTSS); 3722 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK; 3723 } 3724 else 3725 { 3726 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */ 3727 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK; 3728 if (fInterceptMF) 3729 uXcptBitmap |= RT_BIT(X86_XCPT_MF); 3730 } 3731 3732 /* Additional intercepts for debugging, define these yourself explicitly. */ 3733 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 3734 uXcptBitmap |= 0 3735 | RT_BIT(X86_XCPT_BP) 3736 | RT_BIT(X86_XCPT_DE) 3737 | RT_BIT(X86_XCPT_NM) 3738 | RT_BIT(X86_XCPT_TS) 3739 | RT_BIT(X86_XCPT_UD) 3740 | RT_BIT(X86_XCPT_NP) 3741 | RT_BIT(X86_XCPT_SS) 3742 | RT_BIT(X86_XCPT_GP) 3743 | RT_BIT(X86_XCPT_PF) 3744 | RT_BIT(X86_XCPT_MF) 3745 ; 3746 #elif defined(HMVMX_ALWAYS_TRAP_PF) 3747 uXcptBitmap |= RT_BIT(X86_XCPT_PF); 3748 #endif 3749 if (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv) 3750 uXcptBitmap |= RT_BIT(X86_XCPT_GP); 3751 Assert(pVM->hm.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF))); 3752 3753 /* 3754 * Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). 3755 */ 3756 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3757 uint32_t fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3758 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */ 3759 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG); 3760 else 3761 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)); 3762 3763 u32GuestCr0 |= fSetCr0; 3764 u32GuestCr0 &= fZapCr0; 3765 u32GuestCr0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */ 3766 3767 /* 3768 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed 3769 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits 3770 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables). 3771 */ 3772 uint32_t u32Cr0Mask = X86_CR0_PE 3773 | X86_CR0_NE 3774 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP) 3775 | X86_CR0_PG 3776 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */ 3777 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */ 3778 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */ 3779 3780 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM 3781 * enmGuestMode to be in-sync with the current mode. See @bugref{6398} 3782 * and @bugref{6944}. */ 3783 #if 0 3784 if (pVM->hm.s.vmx.fUnrestrictedGuest) 3785 u32Cr0Mask &= ~X86_CR0_PE; 3786 #endif 3787 /* 3788 * Finally, update VMCS fields with the CR0 values and the exception bitmap. 3789 */ 3790 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCr0); 3791 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32ShadowCr0); 3792 if (u32Cr0Mask != pVCpu->hm.s.vmx.Ctls.u32Cr0Mask) 3793 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32Cr0Mask); 3794 if (uProcCtls != pVCpu->hm.s.vmx.Ctls.u32ProcCtls) 3795 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 3796 if (uXcptBitmap != pVCpu->hm.s.vmx.Ctls.u32XcptBitmap) 3797 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap); 3798 AssertRCReturn(rc, rc); 3799 3800 /* Update our caches. */ 3801 pVCpu->hm.s.vmx.Ctls.u32Cr0Mask = u32Cr0Mask; 3802 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls; 3803 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap; 4776 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4777 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 4778 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0; 4779 uint64_t const u64ShadowCr0 = pVmcsNstGst->u64Cr0ReadShadow.u; 4780 Assert(!RT_HI_U32(u64GuestCr0)); 4781 Assert(u64GuestCr0 & X86_CR0_NE); 4782 4783 /* Apply the fixed CR0 bits and enable caching. */ 4784 u64GuestCr0 |= fSetCr0; 4785 u64GuestCr0 &= fZapCr0; 4786 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW); 4787 4788 /* Commit the CR0 and CR0 read shadow to the nested-guest VMCS. */ 4789 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR0, u64GuestCr0); 4790 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); 4791 AssertRCReturn(rc, rc); 4792 4793 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0)); 4794 } 3804 4795 3805 4796 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0); 3806 3807 Log4Func(("u32Cr0Mask=%#RX32 u32ShadowCr0=%#RX32 u32GuestCr0=%#RX32 (fSetCr0=%#RX32 fZapCr0=%#RX32\n", u32Cr0Mask,3808 u32ShadowCr0, u32GuestCr0, fSetCr0, fZapCr0));3809 4797 } 3810 4798 … … 3822 4810 * mapped (e.g. EFI32). 3823 4811 * 3824 * @param pVCpu The cross context virtual CPU structure. 4812 * @param pVCpu The cross context virtual CPU structure. 4813 * @param pVmxTransient The VMX-transient structure. 3825 4814 * 3826 4815 * @remarks No-long-jump zone!!! 3827 4816 */ 3828 static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu )4817 static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 3829 4818 { 3830 4819 int rc = VINF_SUCCESS; … … 3846 4835 if (pVM->hm.s.fNestedPaging) 3847 4836 { 3848 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu); 4837 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 4838 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu); 3849 4839 3850 4840 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */ 3851 Assert(pV Cpu->hm.s.vmx.HCPhysEPTP);3852 Assert(!(pV Cpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));3853 Assert(!(pV Cpu->hm.s.vmx.HCPhysEPTP & 0xfff));4841 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS); 4842 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000))); 4843 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff)); 3854 4844 3855 4845 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */ 3856 pV Cpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB3857 4846 pVmcsInfo->HCPhysEPTP |= VMX_EPT_MEMTYPE_WB 4847 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT); 3858 4848 3859 4849 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */ 3860 AssertMsg( ((pV Cpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */3861 && ((pV Cpu->hm.s.vmx.HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */3862 ("EPTP %#RX64\n", pV Cpu->hm.s.vmx.HCPhysEPTP));3863 AssertMsg( !((pV Cpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */4850 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */ 4851 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */ 4852 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP)); 4853 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */ 3864 4854 || (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY), 3865 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pV Cpu->hm.s.vmx.HCPhysEPTP));3866 3867 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pV Cpu->hm.s.vmx.HCPhysEPTP);4855 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP)); 4856 4857 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP); 3868 4858 AssertRCReturn(rc, rc); 3869 4859 … … 3885 4875 3886 4876 /* 3887 * The guest's view of its CR3 is unblemished with Nested Paging when the4877 * The guest's view of its CR3 is unblemished with nested paging when the 3888 4878 * guest is using paging or we have unrestricted guest execution to handle 3889 4879 * the guest when it's not using paging. 3890 4880 */ 4881 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3); 3891 4882 GCPhysGuestCR3 = pCtx->cr3; 3892 4883 } … … 3924 4915 { 3925 4916 /* Non-nested paging case, just use the hypervisor's CR3. */ 3926 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);4917 RTHCPHYS const HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu); 3927 4918 3928 4919 Log4Func(("u32GuestCr3=%#RHv (HstN)\n", HCPhysGuestCR3)); … … 3940 4931 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4) 3941 4932 { 3942 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4933 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4934 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 4935 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4936 4937 /* 4938 * Figure out fixed CR4 bits in VMX operation. 4939 */ 4940 uint64_t const fSetCr4 = pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1; 4941 uint64_t const fZapCr4 = pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1; 4942 3943 4943 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 3944 Assert(!RT_HI_U32(pCtx->cr4)); 3945 3946 uint32_t u32GuestCr4 = pCtx->cr4; 3947 uint32_t const u32ShadowCr4 = pCtx->cr4; 4944 uint64_t u64GuestCr4 = pCtx->cr4; 4945 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest ? pCtx->cr4 : pVmcsNstGst->u64Cr4ReadShadow.u; 4946 Assert(!RT_HI_U32(u64GuestCr4)); 3948 4947 3949 4948 /* … … 3956 4955 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode". 3957 4956 */ 3958 if (pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active)4957 if (pVmcsInfo->RealMode.fRealOnV86Active) 3959 4958 { 3960 4959 Assert(pVM->hm.s.vmx.pRealModeTSS); 3961 4960 Assert(PDMVmmDevHeapIsEnabled(pVM)); 3962 u 32GuestCr4 &= ~X86_CR4_VME;4961 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME; 3963 4962 } 3964 4963 … … 3969 4968 { 3970 4969 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */ 3971 u 32GuestCr4 |= X86_CR4_PSE;4970 u64GuestCr4 |= X86_CR4_PSE; 3972 4971 /* Our identity mapping is a 32-bit page directory. */ 3973 u 32GuestCr4 &= ~X86_CR4_PAE;4972 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE; 3974 4973 } 3975 4974 /* else use guest CR4.*/ … … 3977 4976 else 3978 4977 { 4978 Assert(!pVmxTransient->fIsNestedGuest); 4979 3979 4980 /* 3980 4981 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host … … 3987 4988 case PGMMODE_32_BIT: /* 32-bit paging. */ 3988 4989 { 3989 u 32GuestCr4 &= ~X86_CR4_PAE;4990 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE; 3990 4991 break; 3991 4992 } … … 3994 4995 case PGMMODE_PAE_NX: /* PAE paging with NX. */ 3995 4996 { 3996 u 32GuestCr4 |= X86_CR4_PAE;4997 u64GuestCr4 |= X86_CR4_PAE; 3997 4998 break; 3998 4999 } … … 4009 5010 } 4010 5011 4011 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */ 4012 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 4013 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 4014 u32GuestCr4 |= fSetCr4; 4015 u32GuestCr4 &= fZapCr4; 4016 4017 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, 4018 that would cause a VM-exit. */ 4019 uint32_t u32Cr4Mask = X86_CR4_VME 4020 | X86_CR4_PAE 4021 | X86_CR4_PGE 4022 | X86_CR4_PSE 4023 | X86_CR4_VMXE; 4024 if (pVM->cpum.ro.HostFeatures.fXSaveRstor) 4025 u32Cr4Mask |= X86_CR4_OSXSAVE; 4026 if (pVM->cpum.ro.GuestFeatures.fPcid) 4027 u32Cr4Mask |= X86_CR4_PCIDE; 4028 4029 /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow 4030 into the VMCS and update our cache. */ 4031 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCr4); 4032 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32ShadowCr4); 4033 if (pVCpu->hm.s.vmx.Ctls.u32Cr4Mask != u32Cr4Mask) 4034 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32Cr4Mask); 5012 /* Apply the fixed CR4 bits (mainly CR4.VMXE). */ 5013 u64GuestCr4 |= fSetCr4; 5014 u64GuestCr4 &= fZapCr4; 5015 5016 /* Commit the CR4 and CR4 read shadow to the guest VMCS. */ 5017 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR4, u64GuestCr4); 5018 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); 4035 5019 AssertRCReturn(rc, rc); 4036 pVCpu->hm.s.vmx.Ctls.u32Cr4Mask = u32Cr4Mask;4037 5020 4038 5021 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ … … 4041 5024 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4); 4042 5025 4043 Log4Func(("u32GuestCr4=%#RX32 u32ShadowCr4=%#RX32 (fSetCr4=%#RX32 fZapCr4=%#RX32)\n", u32GuestCr4, u32ShadowCr4, fSetCr4, 4044 fZapCr4)); 5026 Log4Func(("cr4=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4)); 4045 5027 } 4046 5028 return rc; … … 4055 5037 * 4056 5038 * @returns VBox status code. 4057 * @param pVCpu The cross context virtual CPU structure. 5039 * @param pVCpu The cross context virtual CPU structure. 5040 * @param pVmxTransient The VMX-transient structure. 4058 5041 * 4059 5042 * @remarks No-long-jump zone!!! 4060 5043 */ 4061 static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu )5044 static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 4062 5045 { 4063 5046 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 5047 5048 /** @todo NSTVMX: Figure out what we want to do with nested-guest instruction 5049 * stepping. */ 5050 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 5051 if (pVmxTransient->fIsNestedGuest) 5052 { 5053 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, CPUMGetGuestDR7(pVCpu)); 5054 AssertRCReturn(rc, rc); 5055 return VINF_SUCCESS; 5056 } 4064 5057 4065 5058 #ifdef VBOX_STRICT 4066 5059 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */ 4067 if (pV Cpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)5060 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 4068 5061 { 4069 5062 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */ … … 4075 5068 bool fSteppingDB = false; 4076 5069 bool fInterceptMovDRx = false; 4077 uint32_t uProcCtls = pV Cpu->hm.s.vmx.Ctls.u32ProcCtls;5070 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls; 4078 5071 if (pVCpu->hm.s.fSingleInstruction) 4079 5072 { … … 4100 5093 /* 4101 5094 * Use the combined guest and host DRx values found in the hypervisor register set 4102 * because the debugger has breakpoints active or someone is single stepping on the4103 * host side without a monitor trap flag.5095 * because the hypervisor debugger has breakpoints active or someone is single stepping 5096 * on the host side without a monitor trap flag. 4104 5097 * 4105 5098 * Note! DBGF expects a clean DR6 state before executing guest code. … … 4159 5152 * must intercept #DB in order to maintain a correct DR6 guest value, and 4160 5153 * because we need to intercept it to prevent nested #DBs from hanging the 4161 * CPU, we end up always having to intercept it. See hmR0VmxInitXcptBitmap.5154 * CPU, we end up always having to intercept it. See hmR0VmxSetupVmcsXcptBitmap(). 4162 5155 */ 4163 5156 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) … … 4185 5178 * monitor-trap flag and update our cache. 4186 5179 */ 4187 if (uProcCtls != pV Cpu->hm.s.vmx.Ctls.u32ProcCtls)5180 if (uProcCtls != pVmcsInfo->u32ProcCtls) 4188 5181 { 4189 5182 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 4190 5183 AssertRCReturn(rc2, rc2); 4191 pV Cpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls;5184 pVmcsInfo->u32ProcCtls = uProcCtls; 4192 5185 } 4193 5186 … … 4230 5223 * 4231 5224 * @param pVCpu The cross context virtual CPU structure. 5225 * @param pVmcsInfo The VMCS info. object. 4232 5226 * 4233 5227 * @remarks Will import guest CR0 on strict builds during validation of 4234 5228 * segments. 4235 5229 */ 4236 static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu )5230 static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 4237 5231 { 4238 5232 /* … … 4240 5234 * 4241 5235 * The reason we check for attribute value 0 in this function and not just the unusable bit is 4242 * because hmR0VmxExportGuestSeg mentReg() only updates the VMCS' copy of the value with the unusable bit4243 * and doesn't change the guest-context value.5236 * because hmR0VmxExportGuestSegReg() only updates the VMCS' copy of the value with the 5237 * unusable bit and doesn't change the guest-context value. 4244 5238 */ 4245 5239 PVM pVM = pVCpu->CTX_SUFF(pVM); 4246 5240 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4247 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);5241 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0); 4248 5242 if ( !pVM->hm.s.vmx.fUnrestrictedGuest 4249 5243 && ( !CPUMIsGuestInRealModeEx(pCtx) … … 4287 5281 || (pCtx->ss.Attr.n.u1Granularity)); 4288 5282 } 4289 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSeg mentReg(). */5283 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegReg(). */ 4290 5284 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE)) 4291 5285 { … … 4357 5351 { 4358 5352 /* Real and v86 mode checks. */ 4359 /* hmR0VmxExportGuestSeg mentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */5353 /* hmR0VmxExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */ 4360 5354 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr; 4361 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4362 { 4363 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3; 5355 if (pVmcsInfo->RealMode.fRealOnV86Active) 5356 { 5357 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; 5358 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3; 4364 5359 } 4365 5360 else … … 4409 5404 * 4410 5405 * @returns VBox status code. 4411 * @param pVCpu The cross context virtual CPU structure. 4412 * @param idxSel Index of the selector in the VMCS. 4413 * @param idxLimit Index of the segment limit in the VMCS. 4414 * @param idxBase Index of the segment base in the VMCS. 4415 * @param idxAccess Index of the access rights of the segment in the VMCS. 4416 * @param pSelReg Pointer to the segment selector. 5406 * @param pVCpu The cross context virtual CPU structure. 5407 * @param pVmcsInfo The VMCS info. object. 5408 * @param iSegReg The segment register number (X86_SREG_XXX). 5409 * @param pSelReg Pointer to the segment selector. 4417 5410 * 4418 5411 * @remarks No-long-jump zone!!! 4419 5412 */ 4420 static int hmR0VmxExportGuestSeg mentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,4421 PCCPUMSELREG pSelReg) 4422 { 4423 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */4424 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */4425 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/4426 AssertRCReturn(rc, rc);5413 static int hmR0VmxExportGuestSegReg(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t iSegReg, PCCPUMSELREG pSelReg) 5414 { 5415 Assert(iSegReg < X86_SREG_COUNT); 5416 uint32_t const idxSel = g_aVmcsSegSel[iSegReg]; 5417 uint32_t const idxLimit = g_aVmcsSegLimit[iSegReg]; 5418 uint32_t const idxBase = g_aVmcsSegBase[iSegReg]; 5419 uint32_t const idxAttr = g_aVmcsSegAttr[iSegReg]; 4427 5420 4428 5421 uint32_t u32Access = pSelReg->Attr.u; 4429 if (pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active)5422 if (pVmcsInfo->RealMode.fRealOnV86Active) 4430 5423 { 4431 5424 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */ … … 4433 5426 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 4434 5427 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM))); 5428 RT_NOREF_PV(pVCpu); 4435 5429 } 4436 5430 else … … 4451 5445 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u)); 4452 5446 4453 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */ 5447 /* 5448 * Commit it to the VMCS. 5449 */ 5450 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); 5451 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); 5452 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); 5453 rc |= VMXWriteVmcs32(idxAttr, u32Access); 4454 5454 AssertRCReturn(rc, rc); 4455 5455 return rc; … … 4458 5458 4459 5459 /** 4460 * Exports the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)4461 * into the guest-statearea in the VMCS.5460 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state 5461 * area in the VMCS. 4462 5462 * 4463 5463 * @returns VBox status code. 4464 * @param pVCpu The cross context virtual CPU structure. 5464 * @param pVCpu The cross context virtual CPU structure. 5465 * @param pVmxTransient The VMX-transient structure. 4465 5466 * 4466 5467 * @remarks Will import guest CR0 on strict builds during validation of … … 4468 5469 * @remarks No-long-jump zone!!! 4469 5470 */ 4470 static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu) 4471 { 4472 int rc = VERR_INTERNAL_ERROR_5; 4473 PVM pVM = pVCpu->CTX_SUFF(pVM); 4474 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 5471 static int hmR0VmxExportGuestSegRegsXdtr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 5472 { 5473 int rc = VERR_INTERNAL_ERROR_5; 5474 PVM pVM = pVCpu->CTX_SUFF(pVM); 5475 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 5476 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 4475 5477 4476 5478 /* … … 4482 5484 if (!pVM->hm.s.vmx.fUnrestrictedGuest) 4483 5485 { 5486 Assert(!pVmxTransient->fIsNestedGuest); 4484 5487 Assert(pVM->hm.s.vmx.pRealModeTSS); 4485 5488 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED); 4486 if ( pV Cpu->hm.s.vmx.fWasInRealMode5489 if ( pVmcsInfo->fWasInRealMode 4487 5490 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED) 4488 5491 { … … 4491 5494 REMFlushTBs(pVM); 4492 5495 Log4Func(("Switch to protected mode detected!\n")); 4493 pV Cpu->hm.s.vmx.fWasInRealMode = false;5496 pVmcsInfo->fWasInRealMode = false; 4494 5497 } 4495 5498 } … … 4498 5501 { 4499 5502 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS); 4500 if (pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active)4501 pV Cpu->hm.s.vmx.RealMode.AttrCS.u = pCtx->cs.Attr.u;4502 rc = HMVMX_EXPORT_SREG(CS, &pCtx->cs);5503 if (pVmcsInfo->RealMode.fRealOnV86Active) 5504 pVmcsInfo->RealMode.AttrCS.u = pCtx->cs.Attr.u; 5505 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs); 4503 5506 AssertRCReturn(rc, rc); 4504 5507 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS); … … 4508 5511 { 4509 5512 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS); 4510 if (pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active)4511 pV Cpu->hm.s.vmx.RealMode.AttrSS.u = pCtx->ss.Attr.u;4512 rc = HMVMX_EXPORT_SREG(SS, &pCtx->ss);5513 if (pVmcsInfo->RealMode.fRealOnV86Active) 5514 pVmcsInfo->RealMode.AttrSS.u = pCtx->ss.Attr.u; 5515 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss); 4513 5516 AssertRCReturn(rc, rc); 4514 5517 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS); … … 4518 5521 { 4519 5522 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS); 4520 if (pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active)4521 pV Cpu->hm.s.vmx.RealMode.AttrDS.u = pCtx->ds.Attr.u;4522 rc = HMVMX_EXPORT_SREG(DS, &pCtx->ds);5523 if (pVmcsInfo->RealMode.fRealOnV86Active) 5524 pVmcsInfo->RealMode.AttrDS.u = pCtx->ds.Attr.u; 5525 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds); 4523 5526 AssertRCReturn(rc, rc); 4524 5527 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS); … … 4528 5531 { 4529 5532 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES); 4530 if (pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active)4531 pV Cpu->hm.s.vmx.RealMode.AttrES.u = pCtx->es.Attr.u;4532 rc = HMVMX_EXPORT_SREG(ES, &pCtx->es);5533 if (pVmcsInfo->RealMode.fRealOnV86Active) 5534 pVmcsInfo->RealMode.AttrES.u = pCtx->es.Attr.u; 5535 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es); 4533 5536 AssertRCReturn(rc, rc); 4534 5537 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES); … … 4538 5541 { 4539 5542 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS); 4540 if (pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active)4541 pV Cpu->hm.s.vmx.RealMode.AttrFS.u = pCtx->fs.Attr.u;4542 rc = HMVMX_EXPORT_SREG(FS, &pCtx->fs);5543 if (pVmcsInfo->RealMode.fRealOnV86Active) 5544 pVmcsInfo->RealMode.AttrFS.u = pCtx->fs.Attr.u; 5545 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs); 4543 5546 AssertRCReturn(rc, rc); 4544 5547 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS); … … 4548 5551 { 4549 5552 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS); 4550 if (pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active)4551 pV Cpu->hm.s.vmx.RealMode.AttrGS.u = pCtx->gs.Attr.u;4552 rc = HMVMX_EXPORT_SREG(GS, &pCtx->gs);5553 if (pVmcsInfo->RealMode.fRealOnV86Active) 5554 pVmcsInfo->RealMode.AttrGS.u = pCtx->gs.Attr.u; 5555 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs); 4553 5556 AssertRCReturn(rc, rc); 4554 5557 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS); … … 4556 5559 4557 5560 #ifdef VBOX_STRICT 4558 hmR0VmxValidateSegmentRegs(pVCpu );5561 hmR0VmxValidateSegmentRegs(pVCpu, pVmcsInfo); 4559 5562 #endif 4560 4561 Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pCtx->cs.Sel, pCtx->cs.u64Base, 4562 pCtx->cs.u32Limit, pCtx->cs.Attr.u)); 5563 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, 5564 pCtx->cs.Attr.u)); 4563 5565 } 4564 5566 … … 4575 5577 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup. 4576 5578 */ 4577 uint16_t u16Sel = 0; 4578 uint32_t u32Limit = 0; 4579 uint64_t u64Base = 0; 4580 uint32_t u32AccessRights = 0; 4581 4582 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 5579 uint16_t u16Sel; 5580 uint32_t u32Limit; 5581 uint64_t u64Base; 5582 uint32_t u32AccessRights; 5583 if (!pVmcsInfo->RealMode.fRealOnV86Active) 4583 5584 { 4584 5585 u16Sel = pCtx->tr.Sel; … … 4589 5590 else 4590 5591 { 5592 Assert(!pVmxTransient->fIsNestedGuest); 4591 5593 Assert(pVM->hm.s.vmx.pRealModeTSS); 4592 5594 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */ … … 4604 5606 u16Sel = 0; 4605 5607 u32Limit = HM_VTX_TSS_SIZE; 4606 u64Base = GCPhys; /* in real-mode phys = virt. */5608 u64Base = GCPhys; 4607 5609 u32AccessRights = DescAttr.u; 4608 5610 } … … 4629 5631 4630 5632 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR); 4631 Log4Func((" TR base=%#RX64\n", pCtx->tr.u64Base));5633 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit)); 4632 5634 } 4633 5635 … … 4647 5649 4648 5650 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR); 4649 Log4Func((" GDTR base=%#RX64\n", pCtx->gdtr.pGdt));5651 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt)); 4650 5652 } 4651 5653 … … 4658 5660 4659 5661 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */ 4660 uint32_t u32Access = 0; 4661 if (!pCtx->ldtr.Attr.u) 5662 uint32_t u32Access; 5663 if ( !pVmxTransient->fIsNestedGuest 5664 && !pCtx->ldtr.Attr.u) 4662 5665 u32Access = X86DESCATTR_UNUSABLE; 4663 5666 else … … 4686 5689 4687 5690 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR); 4688 Log4Func((" LDTR base=%#RX64\n", pCtx->ldtr.u64Base));5691 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit)); 4689 5692 } 4690 5693 … … 4704 5707 4705 5708 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR); 4706 Log4Func((" IDTR base=%#RX64\n", pCtx->idtr.pIdt));5709 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt)); 4707 5710 } 4708 5711 … … 4716 5719 * 4717 5720 * These MSRs will automatically be loaded to the host CPU on every successful 4718 * VM-entry and stored from the host CPU on every successful VM-exit. This also 4719 * creates/updates MSR slots for the host MSRs. The actual host MSR values are 4720 * -not- updated here for performance reasons. See hmR0VmxExportHostMsrs(). 4721 * 4722 * Also exports the guest sysenter MSRs into the guest-state area in the VMCS. 5721 * VM-entry and stored from the host CPU on every successful VM-exit. 5722 * 5723 * We creates/updates MSR slots for the host MSRs in the VM-exit MSR-load area. The 5724 * actual host MSR values are not- updated here for performance reasons. See 5725 * hmR0VmxExportHostMsrs(). 5726 * 5727 * We also exports the guest sysenter MSRs into the guest-state area in the VMCS. 4723 5728 * 4724 5729 * @returns VBox status code. 4725 * @param pVCpu The cross context virtual CPU structure. 5730 * @param pVCpu The cross context virtual CPU structure. 5731 * @param pVmxTransient The VMX-transient structure. 4726 5732 * 4727 5733 * @remarks No-long-jump zone!!! 4728 5734 */ 4729 static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu )5735 static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 4730 5736 { 4731 5737 AssertPtr(pVCpu); 4732 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr); 5738 AssertPtr(pVmxTransient); 5739 5740 PVM pVM = pVCpu->CTX_SUFF(pVM); 5741 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4733 5742 4734 5743 /* 4735 5744 * MSRs that we use the auto-load/store MSR area in the VMCS. 4736 5745 * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). 4737 */ 4738 PVM pVM = pVCpu->CTX_SUFF(pVM); 4739 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 5746 * The host MSR values are updated when it's safe in hmR0VmxLazySaveHostMsrs(). 5747 * 5748 * For nested-guests, the guests MSRs from the VM-entry MSR-load area are already 5749 * loaded (into the guest-CPU context) by the VMLAUNCH/VMRESUME instruction 5750 * emulation, nothing to do here. 5751 */ 4740 5752 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS) 4741 5753 { 4742 if (pVM->hm.s.fAllow64BitGuests) 5754 if ( !pVmxTransient->fIsNestedGuest 5755 && pVM->hm.s.fAllow64BitGuests) 4743 5756 { 4744 5757 #if HC_ARCH_BITS == 32 4745 5758 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_KERNEL_GS_BASE); 4746 4747 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pCtx->msrLSTAR, false, NULL); 4748 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pCtx->msrSTAR, false, NULL); 4749 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pCtx->msrSFMASK, false, NULL); 4750 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE, false, NULL); 5759 Assert(!pVmxTransient->fIsNestedGuest); 5760 5761 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_LSTAR, pCtx->msrLSTAR, true, false); 5762 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_STAR, pCtx->msrSTAR, true, false); 5763 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_SF_MASK, pCtx->msrSFMASK, true, false); 5764 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE, true, false); 4751 5765 AssertRCReturn(rc, rc); 4752 # ifdef LOG_ENABLED4753 PCVMXAUTOMSR pMsr = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;4754 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)4755 Log4Func(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));4756 # endif4757 5766 #endif 4758 5767 } … … 4762 5771 /* 4763 5772 * Guest Sysenter MSRs. 4764 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause4765 * VM-exits on WRMSRs for these MSRs.4766 5773 */ 4767 5774 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK) … … 4791 5798 } 4792 5799 5800 /* 5801 * Guest/host EFER MSR. 5802 */ 4793 5803 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR) 4794 5804 { 5805 /* Whether we are using the VMCS to swap the EFER MSR must have been 5806 determined earlier while exporting VM-entry/VM-exit controls. */ 5807 Assert(!(ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)); 4795 5808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER); 4796 5809 … … 4804 5817 { 4805 5818 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pCtx->msrEFER); 4806 AssertRCReturn(rc,rc); 4807 Log4Func(("EFER=%#RX64\n", pCtx->msrEFER)); 5819 AssertRCReturn(rc, rc); 4808 5820 } 4809 5821 else 4810 5822 { 4811 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pCtx->msrEFER, false /* fUpdateHostMsr */, 4812 NULL /* pfAddedAndUpdated */); 5823 /* 5824 * We shall use the auto-load/store MSR area only for loading the EFER MSR but we must 5825 * continue to intercept guest read and write accesses to it, see @bugref{7386#c16}. 5826 */ 5827 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, pCtx->msrEFER, 5828 false /* fSetReadWrite */, false /* fUpdateHostMsr */); 4813 5829 AssertRCReturn(rc, rc); 4814 4815 /* We need to intercept reads too, see @bugref{7386#c16}. */4816 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)4817 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);4818 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pCtx->msrEFER,4819 pVCpu->hm.s.vmx.cMsrs));4820 5830 } 4821 5831 } 4822 5832 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer) 4823 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER); 5833 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER); 5834 4824 5835 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR); 4825 5836 } 4826 5837 5838 /* 5839 * Other MSRs. 5840 * Speculation Control (R/W). 5841 */ 5842 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS) 5843 { 5844 HMVMX_CPUMCTX_ASSERT(pVCpu, HM_CHANGED_GUEST_OTHER_MSRS); 5845 if (pVM->cpum.ro.GuestFeatures.fIbrs) 5846 { 5847 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), 5848 false /* fSetReadWrite */, false /* fUpdateHostMsr */); 5849 AssertRCReturn(rc, rc); 5850 } 5851 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS); 5852 } 5853 4827 5854 return VINF_SUCCESS; 4828 5855 } 4829 4830 4831 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)4832 /**4833 * Check if guest state allows safe use of 32-bit switcher again.4834 *4835 * Segment bases and protected mode structures must be 32-bit addressable4836 * because the 32-bit switcher will ignore high dword when writing these VMCS4837 * fields. See @bugref{8432} for details.4838 *4839 * @returns true if safe, false if must continue to use the 64-bit switcher.4840 * @param pCtx Pointer to the guest-CPU context.4841 *4842 * @remarks No-long-jump zone!!!4843 */4844 static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pCtx)4845 {4846 if (pCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false;4847 if (pCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false;4848 if (pCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false;4849 if (pCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false;4850 if (pCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false;4851 if (pCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false;4852 if (pCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false;4853 if (pCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false;4854 if (pCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false;4855 if (pCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false;4856 4857 /* All good, bases are 32-bit. */4858 return true;4859 }4860 #endif4861 5856 4862 5857 … … 4866 5861 * @returns VBox status code. 4867 5862 * @param pVCpu The cross context virtual CPU structure. 5863 * @param pVmcsInfo The VMCS info. object. 4868 5864 * 4869 5865 * @remarks No-long-jump zone!!! 4870 5866 */ 4871 static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu) 4872 { 4873 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 5867 static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 5868 { 5869 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 5870 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 5871 4874 5872 if (CPUMIsGuestInLongModeEx(pCtx)) 4875 5873 { … … 4880 5878 #if HC_ARCH_BITS == 32 4881 5879 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */ 4882 if (pV Cpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)5880 if (pVmcsInfo->pfnStartVM != VMXR0SwitcherStartVM64) 4883 5881 { 4884 5882 #ifdef VBOX_STRICT 4885 if (pV Cpu->hm.s.vmx.pfnStartVM != NULL) /* Very firstentry would have saved host-state already, ignore it. */5883 if (pVmcsInfo->pfnStartVM != NULL) /* Very first VM-entry would have saved host-state already, ignore it. */ 4886 5884 { 4887 5885 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */ 4888 5886 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 4889 5887 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 4890 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS 4891 | HM_CHANGED_VMX_ENTRY_CTLS 4892 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged)); 5888 AssertMsg(fCtxChanged & (HM_CHANGED_VMX_ENTRY_EXIT_CTLS | HM_CHANGED_GUEST_EFER_MSR), 5889 ("fCtxChanged=%#RX64\n", fCtxChanged)); 4893 5890 } 4894 5891 #endif 4895 pV Cpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;5892 pVmcsInfo->pfnStartVM = VMXR0SwitcherStartVM64; 4896 5893 4897 5894 /* Mark that we've switched to 64-bit handler, we can't safely switch back to 32-bit for 4898 5895 the rest of the VM run (until VM reset). See @bugref{8432#c7}. */ 4899 pV Cpu->hm.s.vmx.fSwitchedTo64on32 = true;5896 pVmcsInfo->fSwitchedTo64on32 = true; 4900 5897 Log4Func(("Selected 64-bit switcher\n")); 4901 5898 } 4902 5899 #else 4903 5900 /* 64-bit host. */ 4904 pV Cpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;5901 pVmcsInfo->pfnStartVM = VMXR0StartVM64; 4905 5902 #endif 4906 5903 } … … 4909 5906 /* Guest is not in long mode, use the 32-bit handler. */ 4910 5907 #if HC_ARCH_BITS == 32 4911 if ( pV Cpu->hm.s.vmx.pfnStartVM != VMXR0StartVM324912 && !pV Cpu->hm.s.vmx.fSwitchedTo64on32/* If set, guest mode change does not imply switcher change. */4913 && pV Cpu->hm.s.vmx.pfnStartVM != NULL) /* Very firstentry would have saved host-state already, ignore it. */5908 if ( pVmcsInfo->pfnStartVM != VMXR0StartVM32 5909 && !pVmcsInfo->fSwitchedTo64on32 /* If set, guest mode change does not imply switcher change. */ 5910 && pVmcsInfo->pfnStartVM != NULL) /* Very first VM-entry would have saved host-state already, ignore it. */ 4914 5911 { 4915 5912 # ifdef VBOX_STRICT … … 4917 5914 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 4918 5915 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 4919 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS 4920 | HM_CHANGED_VMX_ENTRY_CTLS 4921 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged)); 5916 AssertMsg(fCtxChanged & (HM_CHANGED_VMX_ENTRY_EXIT_CTLS | HM_CHANGED_GUEST_EFER_MSR), 5917 ("fCtxChanged=%#RX64\n", fCtxChanged)); 4922 5918 # endif 4923 5919 } … … 4926 5922 * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel 4927 5923 * design, see @bugref{8432#c7}. If real-on-v86 mode is active, clear the 64-bit 4928 * switcher flag because nowwe know the guest is in a sane state where it's safe4929 * to use the 32-bit switcher. Otherwise check the guest state if it's safe to use5924 * switcher flag now because we know the guest is in a sane state where it's safe 5925 * to use the 32-bit switcher. Otherwise, check the guest state if it's safe to use 4930 5926 * the much faster 32-bit switcher again. 4931 5927 */ 4932 if (!pV Cpu->hm.s.vmx.fSwitchedTo64on32)4933 { 4934 if (pV Cpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)5928 if (!pVmcsInfo->fSwitchedTo64on32) 5929 { 5930 if (pVmcsInfo->pfnStartVM != VMXR0StartVM32) 4935 5931 Log4Func(("Selected 32-bit switcher\n")); 4936 5932 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; … … 4938 5934 else 4939 5935 { 4940 Assert(pV Cpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64);4941 if ( pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active5936 Assert(pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64); 5937 if ( pVmcsInfo->RealMode.fRealOnV86Active 4942 5938 || hmR0VmxIs32BitSwitcherSafe(pCtx)) 4943 5939 { 4944 pV Cpu->hm.s.vmx.fSwitchedTo64on32 = false;4945 pV Cpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;5940 pVmcsInfo->fSwitchedTo64on32 = false; 5941 pVmcsInfo->pfnStartVM = VMXR0StartVM32; 4946 5942 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR 4947 | HM_CHANGED_VMX_ENTRY_CTLS 4948 | HM_CHANGED_VMX_EXIT_CTLS 5943 | HM_CHANGED_VMX_ENTRY_EXIT_CTLS 4949 5944 | HM_CHANGED_HOST_CONTEXT); 4950 5945 Log4Func(("Selected 32-bit switcher (safe)\n")); … … 4952 5947 } 4953 5948 # else 4954 pV Cpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;5949 pVmcsInfo->pfnStartVM = VMXR0StartVM32; 4955 5950 # endif 4956 5951 #else 4957 pV Cpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;5952 pVmcsInfo->pfnStartVM = VMXR0StartVM32; 4958 5953 #endif 4959 5954 } 4960 Assert(pV Cpu->hm.s.vmx.pfnStartVM);5955 Assert(pVmcsInfo->pfnStartVM); 4961 5956 return VINF_SUCCESS; 4962 5957 } … … 4967 5962 * 4968 5963 * @returns VBox status code, no informational status codes. 4969 * @param pVCpu The cross context virtual CPU structure. 5964 * @param pVCpu The cross context virtual CPU structure. 5965 * @param pVmxTransient The VMX-transient structure. 4970 5966 * 4971 5967 * @remarks No-long-jump zone!!! 4972 5968 */ 4973 DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu )5969 DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 4974 5970 { 4975 5971 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4976 5972 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4977 5973 pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 5974 5975 /** @todo Add stats for VMRESUME vs VMLAUNCH. */ 4978 5976 4979 5977 /* … … 4984 5982 * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 4985 5983 */ 4986 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED);4987 /** @todo Add stats for resume vs launch. */5984 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 5985 bool const fResumeVM = RT_BOOL(pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_LAUNCHED); 4988 5986 PVM pVM = pVCpu->CTX_SUFF(pVM); 4989 5987 #ifdef VBOX_WITH_KERNEL_USING_XMM 4990 int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.Vmcs BatchCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);5988 int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsCache, pVM, pVCpu, pVmcsInfo->pfnStartVM); 4991 5989 #else 4992 int rc = pV Cpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsBatchCache, pVM, pVCpu);5990 int rc = pVmcsInfo->pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsCache, pVM, pVCpu); 4993 5991 #endif 4994 5992 AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc)); … … 5002 6000 * @param pVCpu The cross context virtual CPU structure. 5003 6001 * @param rcVMRun The return code from VMLAUNCH/VMRESUME. 5004 * @param pVmxTransient Pointer to the VMXtransient structure (only6002 * @param pVmxTransient The VMX-transient structure (only 5005 6003 * exitReason updated). 5006 6004 */ … … 5030 6028 5031 6029 #ifdef VBOX_STRICT 6030 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 5032 6031 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason, 5033 6032 pVmxTransient->uExitReason)); … … 5049 6048 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc); 5050 6049 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val)); 5051 if (pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)6050 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS) 5052 6051 { 5053 6052 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc); … … 5254 6253 5255 6254 /** 5256 * Executes the specified handler in 64-bit mode.5257 *5258 * @returns VBox status code (no informational status codes).5259 * @param pVCpu The cross context virtual CPU structure.5260 * @param enmOp The operation to perform.5261 * @param cParams Number of parameters.5262 * @param paParam Array of 32-bit parameters.5263 */5264 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)5265 {5266 PVM pVM = pVCpu->CTX_SUFF(pVM);5267 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);5268 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);5269 Assert(pVCpu->hm.s.vmx.VmcsBatchCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsBatchCache.Write.aField));5270 Assert(pVCpu->hm.s.vmx.VmcsBatchCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsBatchCache.Read.aField));5271 5272 #ifdef VBOX_STRICT5273 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VmcsBatchCache.Write.cValidEntries; i++)5274 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VmcsBatchCache.Write.aField[i]));5275 5276 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VmcsBatchCache.Read.cValidEntries; i++)5277 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VmcsBatchCache.Read.aField[i]));5278 #endif5279 5280 /* Disable interrupts. */5281 RTCCUINTREG fOldEFlags = ASMIntDisableFlags();5282 5283 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI5284 RTCPUID idHostCpu = RTMpCpuId();5285 CPUMR0SetLApic(pVCpu, idHostCpu);5286 #endif5287 5288 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();5289 RTHCPHYS HCPhysCpuPage = pHostCpu->HCPhysMemObj;5290 5291 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */5292 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);5293 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;5294 5295 /* Leave VMX Root Mode. */5296 VMXDisable();5297 5298 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);5299 5300 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));5301 CPUMSetHyperEIP(pVCpu, enmOp);5302 for (int i = (int)cParams - 1; i >= 0; i--)5303 CPUMPushHyper(pVCpu, paParam[i]);5304 5305 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);5306 5307 /* Call the switcher. */5308 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_UOFFSETOF_DYN(VM, aCpus[pVCpu->idCpu].cpum) - RT_UOFFSETOF(VM, cpum));5309 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);5310 5311 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */5312 /* Make sure the VMX instructions don't cause #UD faults. */5313 SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);5314 5315 /* Re-enter VMX Root Mode */5316 int rc2 = VMXEnable(HCPhysCpuPage);5317 if (RT_FAILURE(rc2))5318 {5319 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);5320 ASMSetFlags(fOldEFlags);5321 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;5322 return rc2;5323 }5324 5325 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);5326 AssertRC(rc2);5327 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;5328 Assert(!(ASMGetFlags() & X86_EFL_IF));5329 ASMSetFlags(fOldEFlags);5330 return rc;5331 }5332 5333 5334 /**5335 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts5336 * supporting 64-bit guests.5337 *5338 * @returns VBox status code.5339 * @param fResume Whether to VMLAUNCH or VMRESUME.5340 * @param pCtx Pointer to the guest-CPU context.5341 * @param pCache Pointer to the VMCS cache.5342 * @param pVM The cross context VM structure.5343 * @param pVCpu The cross context virtual CPU structure.5344 */5345 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pCache, PVM pVM, PVMCPU pVCpu)5346 {5347 NOREF(fResume);5348 5349 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();5350 RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;5351 5352 #ifdef VBOX_WITH_CRASHDUMP_MAGIC5353 pCache->uPos = 1;5354 pCache->interPD = PGMGetInterPaeCR3(pVM);5355 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;5356 #endif5357 5358 #if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)5359 pCache->TestIn.HCPhysCpuPage = 0;5360 pCache->TestIn.HCPhysVmcs = 0;5361 pCache->TestIn.pCache = 0;5362 pCache->TestOut.HCPhysVmcs = 0;5363 pCache->TestOut.pCache = 0;5364 pCache->TestOut.pCtx = 0;5365 pCache->TestOut.eflags = 0;5366 #else5367 NOREF(pCache);5368 #endif5369 5370 uint32_t aParam[10];5371 aParam[0] = RT_LO_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */5372 aParam[1] = RT_HI_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Hi. */5373 aParam[2] = RT_LO_U32(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */5374 aParam[3] = RT_HI_U32(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Hi. */5375 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsBatchCache);5376 aParam[5] = 0;5377 aParam[6] = VM_RC_ADDR(pVM, pVM);5378 aParam[7] = 0;5379 aParam[8] = VM_RC_ADDR(pVM, pVCpu);5380 aParam[9] = 0;5381 5382 #ifdef VBOX_WITH_CRASHDUMP_MAGIC5383 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;5384 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;5385 #endif5386 int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);5387 5388 #ifdef VBOX_WITH_CRASHDUMP_MAGIC5389 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);5390 Assert(pCtx->dr[4] == 10);5391 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;5392 #endif5393 5394 #if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)5395 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));5396 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,5397 pVCpu->hm.s.vmx.HCPhysVmcs));5398 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,5399 pCache->TestOut.HCPhysVmcs));5400 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,5401 pCache->TestOut.pCache));5402 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsBatchCache),5403 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsBatchCache)));5404 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,5405 pCache->TestOut.pCtx));5406 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));5407 #endif5408 NOREF(pCtx);5409 return rc;5410 }5411 5412 5413 /**5414 6255 * Initialize the VMCS-Read cache. 5415 6256 * … … 5419 6260 * (those that have a 32-bit FULL & HIGH part). 5420 6261 * 5421 * @returns VBox status code.5422 6262 * @param pVCpu The cross context virtual CPU structure. 5423 6263 */ 5424 static inthmR0VmxInitVmcsReadCache(PVMCPU pVCpu)6264 static void hmR0VmxInitVmcsReadCache(PVMCPU pVCpu) 5425 6265 { 5426 6266 #define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \ … … 5432 6272 } while (0) 5433 6273 5434 PVMXVMCS BATCHCACHE pCache = &pVCpu->hm.s.vmx.VmcsBatchCache;6274 PVMXVMCSCACHE pCache = &pVCpu->hm.s.vmx.VmcsCache; 5435 6275 uint32_t cReadFields = 0; 5436 6276 … … 5461 6301 /* Unused natural width guest-state fields. */ 5462 6302 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS); 5463 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */6303 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in nested paging case */ 5464 6304 #endif 5465 6305 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP); … … 5498 6338 5499 6339 #undef VMXLOCAL_INIT_READ_CACHE_FIELD 5500 return VINF_SUCCESS;5501 6340 } 5502 6341 … … 5612 6451 { 5613 6452 AssertPtr(pVCpu); 5614 PVMXVMCS BATCHCACHE pCache = &pVCpu->hm.s.vmx.VmcsBatchCache;5615 5616 AssertMsgReturn(pCache->Write.cValidEntries < VMX_VMCS_ BATCH_CACHE_MAX_ENTRY - 1,6453 PVMXVMCSCACHE pCache = &pVCpu->hm.s.vmx.VmcsCache; 6454 6455 AssertMsgReturn(pCache->Write.cValidEntries < VMX_VMCS_CACHE_MAX_ENTRY - 1, 5617 6456 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED); 5618 6457 … … 5639 6478 * 5640 6479 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the 5641 * VMX 6480 * VMX-preemption timer. 5642 6481 * 5643 6482 * @returns VBox status code. 5644 6483 * @param pVCpu The cross context virtual CPU structure. 6484 * @param pVmxTransient The VMX-transient structure. 5645 6485 * 5646 6486 * @remarks No-long-jump zone!!! 5647 6487 */ 5648 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu) 5649 { 5650 bool fOffsettedTsc; 5651 bool fParavirtTsc; 5652 PVM pVM = pVCpu->CTX_SUFF(pVM); 5653 uint64_t uTscOffset; 6488 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 6489 { 6490 bool fOffsettedTsc; 6491 bool fParavirtTsc; 6492 uint64_t uTscOffset; 6493 PVM pVM = pVCpu->CTX_SUFF(pVM); 6494 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);; 6495 5654 6496 if (pVM->hm.s.vmx.fUsePreemptTimer) 5655 6497 { … … 5662 6504 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift; 5663 6505 6506 /** @todo r=ramshankar: We need to find a way to integrate nested-guest 6507 * preemption timers here. We probably need to clamp the preemption timer, 6508 * after converting the timer value to the host. */ 5664 6509 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16); 5665 6510 int rc = VMXWriteVmcs32(VMX_VMCS32_PREEMPT_TIMER_VALUE, cPreemptionTickCount); … … 5680 6525 } 5681 6526 5682 uint32_t uProcCtls = pV Cpu->hm.s.vmx.Ctls.u32ProcCtls;6527 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls; 5683 6528 if ( fOffsettedTsc 5684 6529 && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit)) 5685 6530 { 5686 if (pVCpu->hm.s.vmx.Ctls.u64TscOffset != uTscOffset) 6531 if (pVmxTransient->fIsNestedGuest) 6532 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset); 6533 if (pVmcsInfo->u64TscOffset != uTscOffset) 5687 6534 { 5688 6535 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset); 5689 6536 AssertRC(rc); 5690 pV Cpu->hm.s.vmx.Ctls.u64TscOffset = uTscOffset;6537 pVmcsInfo->u64TscOffset = uTscOffset; 5691 6538 } 5692 6539 … … 5696 6543 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 5697 6544 AssertRC(rc); 5698 pV Cpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls;6545 pVmcsInfo->u32ProcCtls = uProcCtls; 5699 6546 } 5700 6547 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); … … 5708 6555 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 5709 6556 AssertRC(rc); 5710 pV Cpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls;6557 pVmcsInfo->u32ProcCtls = uProcCtls; 5711 6558 } 5712 6559 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); … … 5721 6568 * @returns The IEM exception flags. 5722 6569 * @param uVector The event vector. 5723 * @param uVmx VectorTypeThe VMX event type.6570 * @param uVmxEventType The VMX event type. 5724 6571 * 5725 6572 * @remarks This function currently only constructs flags required for … … 5727 6574 * and CR2 aspects of an exception are not included). 5728 6575 */ 5729 static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmx VectorType)6576 static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType) 5730 6577 { 5731 6578 uint32_t fIemXcptFlags; 5732 switch (uVmx VectorType)6579 switch (uVmxEventType) 5733 6580 { 5734 6581 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT: … … 5755 6602 { 5756 6603 fIemXcptFlags = 0; 5757 AssertMsgFailed(("Unexpected vector for software int. uVector=%#x", uVector));6604 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector)); 5758 6605 } 5759 6606 break; … … 5766 6613 default: 5767 6614 fIemXcptFlags = 0; 5768 AssertMsgFailed(("Unexpected vector type! uVmx VectorType=%#x uVector=%#x", uVmxVectorType, uVector));6615 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector)); 5769 6616 break; 5770 6617 } … … 5784 6631 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a 5785 6632 * page-fault. 5786 *5787 * @remarks Statistics counter assumes this is a guest event being injected or5788 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is5789 * always incremented.5790 6633 */ 5791 6634 DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode, … … 6110 6953 * @param uExitReason The VM-exit reason. 6111 6954 * 6112 * @todo N stVmx: Document other error codes when VM-exit is implemented.6955 * @todo NSTVMX: Document other error codes when VM-exit is implemented. 6113 6956 * @remarks No-long-jump zone!!! 6114 6957 */ … … 6180 7023 6181 7024 6182 /** 6183 * Handle a condition that occurred while delivering an event through the guest 6184 * IDT. 6185 * 6186 * @returns Strict VBox status code (i.e. informational status codes too). 6187 * @retval VINF_SUCCESS if we should continue handling the VM-exit. 6188 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought 6189 * to continue execution of the guest which will delivery the \#DF. 6190 * @retval VINF_EM_RESET if we detected a triple-fault condition. 6191 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang. 6192 * 6193 * @param pVCpu The cross context virtual CPU structure. 6194 * @param pVmxTransient Pointer to the VMX transient structure. 6195 * 6196 * @remarks No-long-jump zone!!! 6197 */ 6198 static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 6199 { 6200 uint32_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo); 6201 6202 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 6203 rc2 |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 6204 AssertRCReturn(rc2, rc2); 6205 6206 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 6207 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo)) 6208 { 6209 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo); 6210 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo); 6211 6212 /* 6213 * If the event was a software interrupt (generated with INT n) or a software exception 6214 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we 6215 * can handle the VM-exit and continue guest execution which will re-execute the 6216 * instruction rather than re-injecting the exception, as that can cause premature 6217 * trips to ring-3 before injection and involve TRPM which currently has no way of 6218 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses 6219 * the problem). 6220 */ 6221 IEMXCPTRAISE enmRaise; 6222 IEMXCPTRAISEINFO fRaiseInfo; 6223 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT 6224 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT 6225 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT) 6226 { 6227 enmRaise = IEMXCPTRAISE_REEXEC_INSTR; 6228 fRaiseInfo = IEMXCPTRAISEINFO_NONE; 6229 } 6230 else if (VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo)) 6231 { 6232 uint32_t const uExitVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uExitIntInfo); 6233 uint32_t const fIdtVectorFlags = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType); 6234 uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType); 6235 /** @todo Make AssertMsgReturn as just AssertMsg later. */ 6236 AssertMsgReturn(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT, 6237 ("hmR0VmxCheckExitDueToEventDelivery: Unexpected VM-exit interruption info. %#x!\n", 6238 uExitVectorType), VERR_VMX_IPE_5); 6239 6240 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo); 6241 6242 /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */ 6243 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF)) 6244 { 6245 pVmxTransient->fVectoringPF = true; 6246 enmRaise = IEMXCPTRAISE_PREV_EVENT; 6247 } 6248 } 6249 else 6250 { 6251 /* 6252 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access 6253 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here. 6254 * It is sufficient to reflect the original event to the guest after handling the VM-exit. 6255 */ 6256 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT 6257 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI 6258 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT); 6259 enmRaise = IEMXCPTRAISE_PREV_EVENT; 6260 fRaiseInfo = IEMXCPTRAISEINFO_NONE; 6261 } 6262 6263 /* 6264 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig 6265 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest 6266 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the 6267 * subsequent VM-entry would fail. 6268 * 6269 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}. 6270 */ 6271 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) 6272 && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI 6273 && ( enmRaise == IEMXCPTRAISE_PREV_EVENT 6274 || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF)) 6275 && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 6276 { 6277 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6278 } 6279 6280 switch (enmRaise) 6281 { 6282 case IEMXCPTRAISE_CURRENT_XCPT: 6283 { 6284 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n", 6285 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo)); 6286 Assert(rcStrict == VINF_SUCCESS); 6287 break; 6288 } 6289 6290 case IEMXCPTRAISE_PREV_EVENT: 6291 { 6292 uint32_t u32ErrCode; 6293 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo)) 6294 { 6295 rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient); 6296 AssertRCReturn(rc2, rc2); 6297 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode; 6298 } 6299 else 6300 u32ErrCode = 0; 6301 6302 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */ 6303 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect); 6304 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo), 6305 0 /* cbInstr */, u32ErrCode, pVCpu->cpum.GstCtx.cr2); 6306 6307 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo, 6308 pVCpu->hm.s.Event.u32ErrCode)); 6309 Assert(rcStrict == VINF_SUCCESS); 6310 break; 6311 } 6312 6313 case IEMXCPTRAISE_REEXEC_INSTR: 6314 Assert(rcStrict == VINF_SUCCESS); 6315 break; 6316 6317 case IEMXCPTRAISE_DOUBLE_FAULT: 6318 { 6319 /* 6320 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the 6321 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF. 6322 */ 6323 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF) 6324 { 6325 pVmxTransient->fVectoringDoublePF = true; 6326 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo, 6327 pVCpu->cpum.GstCtx.cr2)); 6328 rcStrict = VINF_SUCCESS; 6329 } 6330 else 6331 { 6332 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect); 6333 hmR0VmxSetPendingXcptDF(pVCpu); 6334 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo, 6335 uIdtVector, uExitVector)); 6336 rcStrict = VINF_HM_DOUBLE_FAULT; 6337 } 6338 break; 6339 } 6340 6341 case IEMXCPTRAISE_TRIPLE_FAULT: 6342 { 6343 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector)); 6344 rcStrict = VINF_EM_RESET; 6345 break; 6346 } 6347 6348 case IEMXCPTRAISE_CPU_HANG: 6349 { 6350 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo)); 6351 rcStrict = VERR_EM_GUEST_CPU_HANG; 6352 break; 6353 } 6354 6355 default: 6356 { 6357 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise)); 6358 rcStrict = VERR_VMX_IPE_2; 6359 break; 6360 } 6361 } 6362 } 6363 else if ( VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo) 6364 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo) 6365 && uExitVector != X86_XCPT_DF 6366 && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 6367 { 6368 /* 6369 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler). 6370 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted. 6371 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception". 6372 */ 6373 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6374 { 6375 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n", 6376 VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason)); 6377 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6378 } 6379 } 6380 6381 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT 6382 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG); 6383 return rcStrict; 6384 } 6385 6386 6387 /** 6388 * Imports a guest segment register from the current VMCS into 6389 * the guest-CPU context. 6390 * 6391 * @returns VBox status code. 6392 * @param pVCpu The cross context virtual CPU structure. 6393 * @param idxSel Index of the selector in the VMCS. 6394 * @param idxLimit Index of the segment limit in the VMCS. 6395 * @param idxBase Index of the segment base in the VMCS. 6396 * @param idxAccess Index of the access rights of the segment in the VMCS. 6397 * @param pSelReg Pointer to the segment selector. 6398 * 6399 * @remarks Called with interrupts and/or preemption disabled, try not to assert and 6400 * do not log! 6401 * 6402 * @remarks Never call this function directly!!! Use the 6403 * HMVMX_IMPORT_SREG() macro as that takes care 6404 * of whether to read from the VMCS cache or not. 6405 */ 6406 static int hmR0VmxImportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess, 6407 PCPUMSELREG pSelReg) 6408 { 6409 NOREF(pVCpu); 6410 6411 uint32_t u32Sel; 6412 uint32_t u32Limit; 6413 uint32_t u32Attr; 6414 uint64_t u64Base; 6415 int rc = VMXReadVmcs32(idxSel, &u32Sel); 6416 rc |= VMXReadVmcs32(idxLimit, &u32Limit); 6417 rc |= VMXReadVmcs32(idxAccess, &u32Attr); 6418 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base); 6419 AssertRCReturn(rc, rc); 6420 6421 pSelReg->Sel = (uint16_t)u32Sel; 6422 pSelReg->ValidSel = (uint16_t)u32Sel; 6423 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID; 6424 pSelReg->u32Limit = u32Limit; 6425 pSelReg->u64Base = u64Base; 6426 pSelReg->Attr.u = u32Attr; 7025 static void hmR0VmxFixUnusableSegRegAttr(PVMCPU pVCpu, PCPUMSELREG pSelReg, uint32_t idxSel) 7026 { 7027 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE); 6427 7028 6428 7029 /* … … 6444 7045 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers". 6445 7046 */ 6446 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)6447 {6448 Assert(idxSel != VMX_VMCS16_GUEST_TR_SEL); /* TR is the only selector that can never be unusable. */6449 6450 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */6451 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G6452 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;6453 7047 #ifdef VBOX_STRICT 6454 VMMRZCallRing3Disable(pVCpu); 6455 Log4Func(("Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Sel, pSelReg->Attr.u)); 7048 uint32_t const uAttr = pSelReg->Attr.u; 7049 #endif 7050 7051 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */ 7052 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G 7053 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT; 7054 7055 #ifdef VBOX_STRICT 7056 VMMRZCallRing3Disable(pVCpu); 7057 Log4Func(("Unusable %#x: sel=%#x attr=%#x -> %#x\n", idxSel, pSelReg->Sel, uAttr, pSelReg->Attr.u)); 6456 7058 # ifdef DEBUG_bird 6457 AssertMsg((u32Attr & ~X86DESCATTR_P) == pSelReg->Attr.u,6458 6459 idxSel, u32Sel, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));7059 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u, 7060 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n", 7061 idxSel, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit)); 6460 7062 # endif 6461 VMMRZCallRing3Enable(pVCpu); 7063 VMMRZCallRing3Enable(pVCpu); 7064 #else 7065 RT_NOREF2(pVCpu, idxSel); 6462 7066 #endif 6463 } 7067 } 7068 7069 7070 /** 7071 * Imports a guest segment register from the current VMCS into the guest-CPU 7072 * context. 7073 * 7074 * @returns VBox status code. 7075 * @param pVCpu The cross context virtual CPU structure. 7076 * @param iSegReg The segment register number (X86_SREG_XXX). 7077 * 7078 * @remarks Called with interrupts and/or preemption disabled, try not to assert and 7079 * do not log! 7080 */ 7081 static int hmR0VmxImportGuestSegReg(PVMCPU pVCpu, uint8_t iSegReg) 7082 { 7083 Assert(iSegReg < X86_SREG_COUNT); 7084 7085 uint32_t const idxSel = g_aVmcsSegSel[iSegReg]; 7086 uint32_t const idxLimit = g_aVmcsSegLimit[iSegReg]; 7087 uint32_t const idxAttr = g_aVmcsSegAttr[iSegReg]; 7088 #ifdef VMX_USE_CACHED_VMCS_ACCESSES 7089 uint32_t const idxBase = g_aVmcsCacheSegBase[iSegReg]; 7090 #else 7091 uint32_t const idxBase = g_aVmcsSegBase[iSegReg]; 7092 #endif 7093 uint64_t u64Base; 7094 uint32_t u32Sel, u32Limit, u32Attr; 7095 int rc = VMXReadVmcs32(idxSel, &u32Sel); 7096 rc |= VMXReadVmcs32(idxLimit, &u32Limit); 7097 rc |= VMXReadVmcs32(idxAttr, &u32Attr); 7098 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base); 7099 if (RT_SUCCESS(rc)) 7100 { 7101 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg]; 7102 pSelReg->Sel = u32Sel; 7103 pSelReg->ValidSel = u32Sel; 7104 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID; 7105 pSelReg->u32Limit = u32Limit; 7106 pSelReg->u64Base = u64Base; 7107 pSelReg->Attr.u = u32Attr; 7108 if (u32Attr & X86DESCATTR_UNUSABLE) 7109 hmR0VmxFixUnusableSegRegAttr(pVCpu, pSelReg, idxSel); 7110 } 7111 return rc; 7112 } 7113 7114 7115 /** 7116 * Imports the guest LDTR from the current VMCS into the guest-CPU context. 7117 * 7118 * @returns VBox status code. 7119 * @param pVCpu The cross context virtual CPU structure. 7120 * @param pSelReg Pointer to the segment selector. 7121 * 7122 * @remarks Called with interrupts and/or preemption disabled, try not to assert and 7123 * do not log! 7124 */ 7125 static int hmR0VmxImportGuestLdtr(PVMCPU pVCpu) 7126 { 7127 uint64_t u64Base; 7128 uint32_t u32Sel, u32Limit, u32Attr; 7129 int rc = VMXReadVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, &u32Sel); 7130 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); 7131 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); 7132 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, &u64Base); 7133 7134 if (RT_SUCCESS(rc)) 7135 { 7136 pVCpu->cpum.GstCtx.ldtr.Sel = u32Sel; 7137 pVCpu->cpum.GstCtx.ldtr.ValidSel = u32Sel; 7138 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 7139 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit; 7140 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base; 7141 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr; 7142 if (u32Attr & X86DESCATTR_UNUSABLE) 7143 hmR0VmxFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, VMX_VMCS16_GUEST_LDTR_SEL); 7144 } 7145 return rc; 7146 } 7147 7148 7149 /** 7150 * Imports the guest TR from the current VMCS into the guest-CPU context. 7151 * 7152 * @returns VBox status code. 7153 * @param pVCpu The cross context virtual CPU structure. 7154 * @param pSelReg Pointer to the segment selector. 7155 * 7156 * @remarks Called with interrupts and/or preemption disabled, try not to assert and 7157 * do not log! 7158 */ 7159 static int hmR0VmxImportGuestTr(PVMCPU pVCpu) 7160 { 7161 uint32_t u32Sel, u32Limit, u32Attr; 7162 uint64_t u64Base; 7163 int rc = VMXReadVmcs32(VMX_VMCS16_GUEST_TR_SEL, &u32Sel); 7164 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); 7165 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); 7166 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_TR_BASE, &u64Base); 7167 AssertRCReturn(rc, rc); 7168 7169 pVCpu->cpum.GstCtx.tr.Sel = u32Sel; 7170 pVCpu->cpum.GstCtx.tr.ValidSel = u32Sel; 7171 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID; 7172 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit; 7173 pVCpu->cpum.GstCtx.tr.u64Base = u64Base; 7174 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr; 7175 /* TR is the only selector that can never be unusable. */ 7176 Assert(!(u32Attr & X86DESCATTR_UNUSABLE)); 6464 7177 return VINF_SUCCESS; 6465 7178 } … … 6476 7189 * instead!!! 6477 7190 */ 6478 DECLINLINE(int)hmR0VmxImportGuestRip(PVMCPU pVCpu)7191 static int hmR0VmxImportGuestRip(PVMCPU pVCpu) 6479 7192 { 6480 7193 uint64_t u64Val; … … 6499 7212 * 6500 7213 * @returns VBox status code. 6501 * @param pVCpu The cross context virtual CPU structure. 7214 * @param pVCpu The cross context virtual CPU structure. 7215 * @param pVmcsInfo The VMCS info. object. 6502 7216 * 6503 7217 * @remarks Called with interrupts and/or preemption disabled, should not assert! … … 6505 7219 * instead!!! 6506 7220 */ 6507 DECLINLINE(int) hmR0VmxImportGuestRFlags(PVMCPU pVCpu)7221 static int hmR0VmxImportGuestRFlags(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 6508 7222 { 6509 7223 uint32_t u32Val; … … 6517 7231 6518 7232 /* Restore eflags for real-on-v86-mode hack. */ 6519 if (pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active)7233 if (pVmcsInfo->RealMode.fRealOnV86Active) 6520 7234 { 6521 7235 pCtx->eflags.Bits.u1VM = 0; 6522 pCtx->eflags.Bits.u2IOPL = pV Cpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;7236 pCtx->eflags.Bits.u2IOPL = pVmcsInfo->RealMode.Eflags.Bits.u2IOPL; 6523 7237 } 6524 7238 } … … 6535 7249 * 6536 7250 * @returns VBox status code. 6537 * @param pVCpu The cross context virtual CPU structure. 7251 * @param pVCpu The cross context virtual CPU structure. 7252 * @param pVmcsInfo The VMCS info. object. 6538 7253 * 6539 7254 * @remarks Called with interrupts and/or preemption disabled, try not to assert and … … 6542 7257 * instead!!! 6543 7258 */ 6544 DECLINLINE(int) hmR0VmxImportGuestIntrState(PVMCPU pVCpu)7259 static int hmR0VmxImportGuestIntrState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 6545 7260 { 6546 7261 uint32_t u32Val; 6547 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;6548 7262 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32Val); 6549 AssertRCReturn(rc, rc); 6550 6551 /* 6552 * We additionally have a requirement to import RIP, RFLAGS depending on whether we 6553 * might need them in while evaluating pending events before VM-entry. 6554 */ 6555 if (!u32Val) 6556 { 6557 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6558 { 7263 if (RT_SUCCESS(rc)) 7264 { 7265 if (!u32Val) 7266 { 7267 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 7268 { 7269 rc = hmR0VmxImportGuestRip(pVCpu); 7270 rc |= hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo); 7271 AssertRCReturn(rc, rc); 7272 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 7273 } 7274 7275 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7276 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 7277 } 7278 else 7279 { 7280 /* 7281 * We must import RIP here to set our EM interrupt-inhibited state. 7282 * We also import RFLAGS as our code that evaluates pending interrupts 7283 * before VM-entry requires it. 7284 */ 6559 7285 rc = hmR0VmxImportGuestRip(pVCpu); 6560 rc |= hmR0VmxImportGuestRFlags(pVCpu); 6561 AssertRCReturn(rc, rc); 6562 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6563 } 6564 6565 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6566 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6567 } 6568 else 6569 { 6570 rc = hmR0VmxImportGuestRip(pVCpu); 6571 rc |= hmR0VmxImportGuestRFlags(pVCpu); 6572 AssertRCReturn(rc, rc); 6573 6574 if (u32Val & ( VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS 6575 | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)) 6576 { 6577 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 6578 } 6579 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6580 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6581 6582 if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI) 6583 { 6584 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6585 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6586 } 6587 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6588 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6589 } 6590 6591 return VINF_SUCCESS; 7286 rc |= hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo); 7287 if (RT_SUCCESS(rc)) 7288 { 7289 if (u32Val & ( VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS 7290 | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)) 7291 { 7292 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 7293 } 7294 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 7295 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 7296 7297 if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI) 7298 { 7299 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7300 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 7301 } 7302 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7303 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 7304 } 7305 } 7306 } 7307 return rc; 6592 7308 } 6593 7309 … … 6597 7313 * 6598 7314 * @returns VBox status code. 6599 * @param pVCpu The cross context virtual CPU structure. 6600 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 6601 */ 6602 static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat) 7315 * @param pVCpu The cross context virtual CPU structure. 7316 * @param pVmcsInfo The VMCS info. object. 7317 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 7318 */ 7319 static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint64_t fWhat) 6603 7320 { 6604 7321 #define VMXLOCAL_BREAK_RC(a_rc) \ 6605 if (RT_FAILURE(a_rc)) \ 7322 if (RT_SUCCESS(a_rc)) \ 7323 { } \ 7324 else \ 6606 7325 break 6607 7326 … … 6612 7331 uint32_t u32Val; 6613 7332 6614 Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat)); 6615 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x); 7333 STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatImportGuestState, x); 6616 7334 6617 7335 /* … … 6634 7352 if (fWhat & CPUMCTX_EXTRN_RFLAGS) 6635 7353 { 6636 rc = hmR0VmxImportGuestRFlags(pVCpu );7354 rc = hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo); 6637 7355 VMXLOCAL_BREAK_RC(rc); 6638 7356 } … … 6640 7358 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE) 6641 7359 { 6642 rc = hmR0VmxImportGuestIntrState(pVCpu );7360 rc = hmR0VmxImportGuestIntrState(pVCpu, pVmcsInfo); 6643 7361 VMXLOCAL_BREAK_RC(rc); 6644 7362 } … … 6653 7371 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 6654 7372 { 7373 bool const fRealOnV86Active = pVmcsInfo->RealMode.fRealOnV86Active; 6655 7374 if (fWhat & CPUMCTX_EXTRN_CS) 6656 7375 { 6657 rc = HMVMX_IMPORT_SREG(CS, &pCtx->cs);7376 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_CS); 6658 7377 rc |= hmR0VmxImportGuestRip(pVCpu); 6659 VMXLOCAL_BREAK_RC(rc); 6660 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6661 pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u; 6662 EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true); 7378 if (fRealOnV86Active) 7379 pCtx->cs.Attr.u = pVmcsInfo->RealMode.AttrCS.u; 7380 EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */); 6663 7381 } 6664 7382 if (fWhat & CPUMCTX_EXTRN_SS) 6665 7383 { 6666 rc = HMVMX_IMPORT_SREG(SS, &pCtx->ss); 6667 VMXLOCAL_BREAK_RC(rc); 6668 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6669 pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u; 7384 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_SS); 7385 if (fRealOnV86Active) 7386 pCtx->ss.Attr.u = pVmcsInfo->RealMode.AttrSS.u; 6670 7387 } 6671 7388 if (fWhat & CPUMCTX_EXTRN_DS) 6672 7389 { 6673 rc = HMVMX_IMPORT_SREG(DS, &pCtx->ds); 6674 VMXLOCAL_BREAK_RC(rc); 6675 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6676 pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u; 7390 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_DS); 7391 if (fRealOnV86Active) 7392 pCtx->ds.Attr.u = pVmcsInfo->RealMode.AttrDS.u; 6677 7393 } 6678 7394 if (fWhat & CPUMCTX_EXTRN_ES) 6679 7395 { 6680 rc = HMVMX_IMPORT_SREG(ES, &pCtx->es); 6681 VMXLOCAL_BREAK_RC(rc); 6682 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6683 pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u; 7396 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_ES); 7397 if (fRealOnV86Active) 7398 pCtx->es.Attr.u = pVmcsInfo->RealMode.AttrES.u; 6684 7399 } 6685 if (fWhat & CPUMCTX_EXTRN_FS) 6686 { 6687 rc = HMVMX_IMPORT_SREG(FS, &pCtx->fs); 6688 VMXLOCAL_BREAK_RC(rc); 6689 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6690 pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u; 6691 } 7400 if (fWhat & CPUMCTX_EXTRN_FS) 7401 { 7402 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_FS); 7403 if (fRealOnV86Active) 7404 pCtx->fs.Attr.u = pVmcsInfo->RealMode.AttrFS.u; 7405 } 6692 7406 if (fWhat & CPUMCTX_EXTRN_GS) 6693 7407 { 6694 rc = HMVMX_IMPORT_SREG(GS, &pCtx->gs); 6695 VMXLOCAL_BREAK_RC(rc); 6696 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6697 pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u; 7408 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_GS); 7409 if (fRealOnV86Active) 7410 pCtx->gs.Attr.u = pVmcsInfo->RealMode.AttrGS.u; 6698 7411 } 7412 VMXLOCAL_BREAK_RC(rc); 6699 7413 } 6700 7414 … … 6702 7416 { 6703 7417 if (fWhat & CPUMCTX_EXTRN_LDTR) 6704 { 6705 rc = HMVMX_IMPORT_SREG(LDTR, &pCtx->ldtr); 6706 VMXLOCAL_BREAK_RC(rc); 6707 } 7418 rc |= hmR0VmxImportGuestLdtr(pVCpu); 6708 7419 6709 7420 if (fWhat & CPUMCTX_EXTRN_GDTR) 6710 7421 { 6711 rc 7422 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 6712 7423 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); 6713 VMXLOCAL_BREAK_RC(rc);6714 7424 pCtx->gdtr.pGdt = u64Val; 6715 7425 pCtx->gdtr.cbGdt = u32Val; … … 6719 7429 if (fWhat & CPUMCTX_EXTRN_IDTR) 6720 7430 { 6721 rc 7431 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 6722 7432 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); 6723 VMXLOCAL_BREAK_RC(rc);6724 7433 pCtx->idtr.pIdt = u64Val; 6725 7434 pCtx->idtr.cbIdt = u32Val; … … 6729 7438 if (fWhat & CPUMCTX_EXTRN_TR) 6730 7439 { 6731 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */ 6732 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6733 { 6734 rc = HMVMX_IMPORT_SREG(TR, &pCtx->tr); 6735 VMXLOCAL_BREAK_RC(rc); 6736 } 7440 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, 7441 don't need to import that one. */ 7442 if (!pVmcsInfo->RealMode.fRealOnV86Active) 7443 rc |= hmR0VmxImportGuestTr(pVCpu); 7444 } 7445 VMXLOCAL_BREAK_RC(rc); 7446 } 7447 7448 if (fWhat & CPUMCTX_EXTRN_DR7) 7449 { 7450 if (!pVCpu->hm.s.fUsingHyperDR7) 7451 { 7452 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */ 7453 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); 7454 VMXLOCAL_BREAK_RC(rc); 7455 pCtx->dr[7] = u32Val; 6737 7456 } 6738 7457 } … … 6773 7492 ) 6774 7493 { 6775 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 6776 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs; 7494 PCVMXAUTOMSR pMsr = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 7495 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore; 7496 Assert(cMsrs == 0 || pMsr != NULL); 6777 7497 for (uint32_t i = 0; i < cMsrs; i++, pMsr++) 6778 7498 { … … 6780 7500 { 6781 7501 #if HC_ARCH_BITS == 32 6782 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr->u64Value; break;6783 case MSR_K6_STAR: pCtx->msrSTAR = pMsr->u64Value; break;6784 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr->u64Value; break;6785 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value; break;7502 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr->u64Value; break; 7503 case MSR_K6_STAR: pCtx->msrSTAR = pMsr->u64Value; break; 7504 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr->u64Value; break; 7505 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value; break; 6786 7506 #endif 6787 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break; 6788 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); break; 6789 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit */ break; 7507 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break; 7508 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); break; 7509 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break; 7510 6790 7511 default: 6791 7512 { 6792 7513 pVCpu->hm.s.u32HMError = pMsr->u32Msr; 6793 7514 ASMSetFlags(fEFlags); 6794 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr,7515 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, 6795 7516 cMsrs)); 6796 7517 return VERR_HM_UNEXPECTED_LD_ST_MSR; … … 6800 7521 } 6801 7522 6802 if (fWhat & CPUMCTX_EXTRN_DR7)6803 {6804 if (!pVCpu->hm.s.fUsingHyperDR7)6805 {6806 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */6807 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);6808 VMXLOCAL_BREAK_RC(rc);6809 pCtx->dr[7] = u32Val;6810 }6811 }6812 6813 7523 if (fWhat & CPUMCTX_EXTRN_CR_MASK) 6814 7524 { 6815 uint 32_t u32Shadow;7525 uint64_t u64Shadow; 6816 7526 if (fWhat & CPUMCTX_EXTRN_CR0) 6817 7527 { 6818 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); 6819 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow); 7528 /** @todo r=ramshankar: We only read 32-bits here for legacy/convenience reasons, 7529 * remove when we drop 32-bit host w/ 64-bit host support, see 7530 * @bugref{9180#c39}. */ 7531 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); 7532 rc |= VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); 6820 7533 VMXLOCAL_BREAK_RC(rc); 6821 u32Val = (u32Val & ~pVCpu->hm.s.vmx.Ctls.u32Cr0Mask) 6822 | (u32Shadow & pVCpu->hm.s.vmx.Ctls.u32Cr0Mask); 6823 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */ 6824 CPUMSetGuestCR0(pVCpu, u32Val); 7534 u64Val = u32Val; 7535 u64Val = (u64Val & ~pVmcsInfo->u64Cr0Mask) 7536 | (u64Shadow & pVmcsInfo->u64Cr0Mask); 7537 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */ 7538 CPUMSetGuestCR0(pVCpu, u64Val); 6825 7539 VMMRZCallRing3Enable(pVCpu); 6826 7540 } … … 6828 7542 if (fWhat & CPUMCTX_EXTRN_CR4) 6829 7543 { 6830 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val); 6831 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow); 7544 /** @todo r=ramshankar: We only read 32-bits here for legacy/convenience reasons, 7545 * remove when we drop 32-bit host w/ 64-bit host support, see 7546 * @bugref{9180#c39}. */ 7547 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val); 7548 rc |= VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); 6832 7549 VMXLOCAL_BREAK_RC(rc); 6833 u32Val = (u32Val & ~pVCpu->hm.s.vmx.Ctls.u32Cr4Mask) 6834 | (u32Shadow & pVCpu->hm.s.vmx.Ctls.u32Cr4Mask); 6835 CPUMSetGuestCR4(pVCpu, u32Val); 7550 u64Val = u32Val; 7551 u64Val = (u64Val & ~pVmcsInfo->u64Cr4Mask) 7552 | (u64Shadow & pVmcsInfo->u64Cr4Mask); 7553 pCtx->cr4 = u64Val; 6836 7554 } 6837 7555 … … 6846 7564 if (pCtx->cr3 != u64Val) 6847 7565 { 6848 CPUMSetGuestCR3(pVCpu, u64Val);7566 pCtx->cr3 = u64Val; 6849 7567 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3); 6850 7568 } 6851 7569 6852 7570 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. 6853 Note: CR4.PAE, CR0.PG, EFER bitchanges are always intercepted, so they're up to date. */7571 Note: CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date. */ 6854 7572 if (CPUMIsGuestInPAEModeEx(pCtx)) 6855 7573 { … … 6884 7602 ASMSetFlags(fEFlags); 6885 7603 6886 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x); 7604 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatImportGuestState, x); 7605 7606 if (RT_SUCCESS(rc)) 7607 { /* likely */ } 7608 else 7609 return rc; 6887 7610 6888 7611 /* … … 6929 7652 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat) 6930 7653 { 6931 return hmR0VmxImportGuestState(pVCpu, fWhat); 7654 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 7655 return hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fWhat); 6932 7656 } 6933 7657 … … 6950 7674 * 6951 7675 * @param pVCpu The cross context virtual CPU structure. 6952 * @param fStepping Running in hmR0VmxRunGuestCodeStep(). 7676 * @param fStepping Whether we are single-stepping the guest using the 7677 * hypervisor debugger. 6953 7678 */ 6954 7679 static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, bool fStepping) … … 7048 7773 if (enmTrpmEvent == TRPM_TRAP) 7049 7774 { 7775 /** @todo r=ramshankar: TRPM currently offers no way to determine a #DB that was 7776 * generated using INT1 (ICEBP). */ 7050 7777 switch (uVector) 7051 7778 { … … 7076 7803 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_EXT_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT); 7077 7804 else if (enmTrpmEvent == TRPM_SOFTWARE_INT) 7078 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT); 7805 { 7806 switch (uVector) 7807 { 7808 case X86_XCPT_BP: 7809 case X86_XCPT_OF: 7810 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT); 7811 break; 7812 7813 default: 7814 Assert(uVector == X86_XCPT_DB); 7815 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT); 7816 break; 7817 } 7818 } 7079 7819 else 7080 7820 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent)); … … 7092 7832 * Converts the pending HM event into a TRPM trap. 7093 7833 * 7094 * @param pVCpu 7834 * @param pVCpu The cross context virtual CPU structure. 7095 7835 */ 7096 7836 static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu) … … 7106 7846 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP); 7107 7847 7848 /** @todo Use HMVmxEventToTrpmEventType() later. */ 7108 7849 TRPMEVENT enmTrapType; 7109 7850 switch (uVectorType) … … 7113 7854 break; 7114 7855 7856 case VMX_IDT_VECTORING_INFO_TYPE_NMI: 7857 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT: 7858 enmTrapType = TRPM_TRAP; 7859 break; 7860 7861 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT: /* #DB (INT1/ICEBP). */ 7862 Assert(uVector == X86_XCPT_DB); 7863 enmTrapType = TRPM_SOFTWARE_INT; 7864 break; 7865 7866 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP (INT3) and #OF (INTO) */ 7867 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF); 7868 enmTrapType = TRPM_SOFTWARE_INT; 7869 break; 7870 7115 7871 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT: 7116 7872 enmTrapType = TRPM_SOFTWARE_INT; 7117 break;7118 7119 case VMX_IDT_VECTORING_INFO_TYPE_NMI:7120 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:7121 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */7122 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:7123 enmTrapType = TRPM_TRAP;7124 7873 break; 7125 7874 … … 7140 7889 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT 7141 7890 && uVector == X86_XCPT_PF) 7142 {7143 7891 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress); 7144 } 7145 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT 7146 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT 7147 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT) 7148 { 7149 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT 7150 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF), 7151 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType)); 7892 else if (enmTrapType == TRPM_SOFTWARE_INT) 7152 7893 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr); 7153 }7154 7155 /* Clear the events from the VMCS. */7156 VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);7157 7894 7158 7895 /* We're now done converting the pending event. */ 7159 7896 pVCpu->hm.s.Event.fPending = false; 7897 } 7898 7899 7900 /** 7901 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to 7902 * cause a VM-exit as soon as the guest is in a state to receive interrupts. 7903 * 7904 * @param pVCpu The cross context virtual CPU structure. 7905 * @param pVmcsInfo The VMCS info. object. 7906 */ 7907 static void hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 7908 { 7909 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT) 7910 { 7911 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)) 7912 { 7913 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT; 7914 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 7915 AssertRC(rc); 7916 } 7917 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */ 7918 } 7919 7920 7921 /** 7922 * Clears the interrupt-window exiting control in the VMCS. 7923 * 7924 * @param pVmcsInfo The VMCS info. object. 7925 */ 7926 DECLINLINE(int) hmR0VmxClearIntWindowExitVmcs(PVMXVMCSINFO pVmcsInfo) 7927 { 7928 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT) 7929 { 7930 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT; 7931 return VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 7932 } 7933 return VINF_SUCCESS; 7934 } 7935 7936 7937 /** 7938 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to 7939 * cause a VM-exit as soon as the guest is in a state to receive NMIs. 7940 * 7941 * @param pVCpu The cross context virtual CPU structure. 7942 * @param pVmcsInfo The VMCS info. object. 7943 */ 7944 static void hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 7945 { 7946 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT) 7947 { 7948 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)) 7949 { 7950 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT; 7951 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 7952 AssertRC(rc); 7953 Log4Func(("Setup NMI-window exiting\n")); 7954 } 7955 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */ 7956 } 7957 7958 7959 /** 7960 * Clears the NMI-window exiting control in the VMCS. 7961 * 7962 * @param pVmcsInfo The VMCS info. object. 7963 */ 7964 DECLINLINE(int) hmR0VmxClearNmiWindowExitVmcs(PVMXVMCSINFO pVmcsInfo) 7965 { 7966 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT) 7967 { 7968 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT; 7969 return VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 7970 } 7971 return VINF_SUCCESS; 7160 7972 } 7161 7973 … … 7186 7998 7187 7999 /* Save the guest state if necessary. */ 8000 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 7188 8001 if (fImportState) 7189 8002 { 7190 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);8003 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 7191 8004 AssertRCReturn(rc, rc); 7192 8005 } … … 7199 8012 #ifdef VBOX_STRICT 7200 8013 if (CPUMIsHyperDebugStateActive(pVCpu)) 7201 Assert(pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);8014 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT); 7202 8015 #endif 7203 8016 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */); … … 7222 8035 if (!fImportState) 7223 8036 { 7224 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);8037 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS); 7225 8038 AssertRCReturn(rc, rc); 7226 8039 } … … 7232 8045 7233 8046 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */ 7234 pVCpu->hm.s.vmx.fUpdatedHost Msrs = false;8047 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false; 7235 8048 7236 8049 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); … … 7251 8064 * context. 7252 8065 */ 7253 if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE) 7254 { 7255 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 7256 AssertRCReturn(rc, rc); 7257 7258 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR; 7259 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu)); 7260 } 7261 Assert(!(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED)); 8066 int rc = hmR0VmxClearVmcs(pVmcsInfo); 8067 AssertRCReturn(rc, rc); 8068 8069 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu)); 7262 8070 NOREF(idCpu); 7263 7264 8071 return VINF_SUCCESS; 7265 8072 } … … 7342 8149 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu); 7343 8150 8151 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 7344 8152 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR)) 7345 8153 { 7346 VMXGet ActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VmcsPhys);7347 pVCpu->hm.s.vmx.LastError.u32VmcsRev = *(uint32_t *)pV Cpu->hm.s.vmx.pvVmcs;8154 VMXGetCurrentVmcs(&pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs); 8155 pVCpu->hm.s.vmx.LastError.u32VmcsRev = *(uint32_t *)pVmcsInfo->pvVmcs; 7348 8156 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu; 7349 8157 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */ … … 7354 8162 Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit))); 7355 8163 7356 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */ 8164 /* 8165 * Convert any pending HM events back to TRPM due to premature exits to ring-3. 8166 * We need to do this only on returns to ring-3 and not for longjmps to ring3. 8167 * 8168 * This is because execution may continue from ring-3 and we would need to inject 8169 * the event from there (hence place it back in TRPM). 8170 */ 7357 8171 if (pVCpu->hm.s.Event.fPending) 7358 8172 { 7359 8173 hmR0VmxPendingEventToTrpmTrap(pVCpu); 7360 8174 Assert(!pVCpu->hm.s.Event.fPending); 7361 } 7362 7363 /* Clear interrupt-window and NMI-window controls as we re-evaluate it when we return from ring-3. */ 7364 hmR0VmxClearIntNmiWindowsVmcs(pVCpu); 8175 8176 /* Clear the events from the VMCS. */ 8177 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); 8178 AssertRCReturn(rc, rc); 8179 } 8180 #ifdef VBOX_STRICT 8181 else 8182 { 8183 /* 8184 * Ensure we don't accidentally clear a pending HM event without clearing the VMCS. 8185 * This can be pretty hard to debug otherwise, interrupts might get injected twice 8186 * occasionally, see @bugref{9180#c42}. 8187 */ 8188 uint32_t uEntryIntInfo; 8189 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &uEntryIntInfo); 8190 AssertRC(rc); 8191 Assert(!VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo)); 8192 } 8193 #endif 8194 8195 /* 8196 * Clear the interrupt-window and NMI-window VMCS controls as we could have got 8197 * a VM-exit with higher priority than interrupt-window or NMI-window VM-exits 8198 * (e.g. TPR below threshold). 8199 */ 8200 int rc = hmR0VmxClearIntWindowExitVmcs(pVmcsInfo); 8201 rc |= hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo); 8202 AssertRCReturn(rc, rc); 7365 8203 7366 8204 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending … … 7372 8210 7373 8211 /* Save guest state and restore host state bits. */ 7374 intrc = hmR0VmxLeaveSession(pVCpu);8212 rc = hmR0VmxLeaveSession(pVCpu); 7375 8213 AssertRCReturn(rc, rc); 7376 8214 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 8215 7377 8216 /* Thread-context hooks are unregistered at this point!!! */ 7378 8217 … … 7387 8226 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging 7388 8227 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)) 7389 {7390 8228 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); 7391 }7392 8229 7393 8230 Assert(!pVCpu->hm.s.fClearTrapFlag); … … 7397 8234 7398 8235 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */ 7399 if (rcExit != VINF_EM_RAW_INTERRUPT) 8236 if ( rcExit != VINF_EM_RAW_INTERRUPT 8237 || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 8238 { 8239 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL)); 7400 8240 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 8241 } 7401 8242 7402 8243 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); … … 7434 8275 RTThreadPreemptDisable(&PreemptState); 7435 8276 7436 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 8277 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 8278 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 7437 8279 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu); 7438 8280 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */); … … 7440 8282 #if HC_ARCH_BITS == 64 7441 8283 /* Restore host-state bits that VT-x only restores partially. */ 7442 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)8284 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED) 7443 8285 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED)) 7444 8286 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); … … 7451 8293 7452 8294 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */ 7453 pVCpu->hm.s.vmx.fUpdatedHost Msrs = false;8295 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false; 7454 8296 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 7455 if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE) 7456 { 7457 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 7458 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR; 7459 } 8297 8298 /* Clear the current VMCS data back to memory. */ 8299 hmR0VmxClearVmcs(pVmcsInfo); 7460 8300 7461 8301 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */ … … 7481 8321 VMMRZCallRing3Enable(pVCpu); 7482 8322 return VINF_SUCCESS; 7483 }7484 7485 7486 /**7487 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to7488 * cause a VM-exit as soon as the guest is in a state to receive interrupts.7489 *7490 * @param pVCpu The cross context virtual CPU structure.7491 */7492 DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)7493 {7494 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT))7495 {7496 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))7497 {7498 pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;7499 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);7500 AssertRC(rc);7501 Log4Func(("Setup interrupt-window exiting\n"));7502 }7503 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */7504 }7505 7506 7507 /**7508 * Clears the interrupt-window exiting control in the VMCS.7509 *7510 * @param pVCpu The cross context virtual CPU structure.7511 */7512 DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)7513 {7514 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT);7515 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;7516 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);7517 AssertRC(rc);7518 Log4Func(("Cleared interrupt-window exiting\n"));7519 }7520 7521 7522 /**7523 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to7524 * cause a VM-exit as soon as the guest is in a state to receive NMIs.7525 *7526 * @param pVCpu The cross context virtual CPU structure.7527 */7528 DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)7529 {7530 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT))7531 {7532 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))7533 {7534 pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;7535 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);7536 AssertRC(rc);7537 Log4Func(("Setup NMI-window exiting\n"));7538 }7539 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */7540 }7541 7542 7543 /**7544 * Clears the NMI-window exiting control in the VMCS.7545 *7546 * @param pVCpu The cross context virtual CPU structure.7547 */7548 DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)7549 {7550 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT);7551 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;7552 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);7553 AssertRC(rc);7554 Log4Func(("Cleared NMI-window exiting\n"));7555 }7556 7557 7558 /**7559 * Evaluates the event to be delivered to the guest and sets it as the pending7560 * event.7561 *7562 * @returns The VT-x guest-interruptibility state.7563 * @param pVCpu The cross context virtual CPU structure.7564 */7565 static uint32_t hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu)7566 {7567 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */7568 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;7569 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu);7570 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);7571 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);7572 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);7573 7574 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));7575 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/7576 Assert(!fBlockSti || pCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */7577 Assert(!TRPMHasTrap(pVCpu));7578 7579 /*7580 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits7581 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.7582 */7583 /** @todo SMI. SMIs take priority over NMIs. */7584 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */7585 {7586 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */7587 if ( !pVCpu->hm.s.Event.fPending7588 && !fBlockNmi7589 && !fBlockSti7590 && !fBlockMovSS)7591 {7592 hmR0VmxSetPendingXcptNmi(pVCpu);7593 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);7594 Log4Func(("Pending NMI\n"));7595 }7596 else7597 hmR0VmxSetNmiWindowExitVmcs(pVCpu);7598 }7599 /*7600 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns7601 * a valid interrupt we must- deliver the interrupt. We can no longer re-request it from the APIC.7602 */7603 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)7604 && !pVCpu->hm.s.fSingleInstruction)7605 {7606 Assert(!DBGFIsStepping(pVCpu));7607 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);7608 AssertRCReturn(rc, 0);7609 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);7610 if ( !pVCpu->hm.s.Event.fPending7611 && !fBlockInt7612 && !fBlockSti7613 && !fBlockMovSS)7614 {7615 uint8_t u8Interrupt;7616 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);7617 if (RT_SUCCESS(rc))7618 {7619 hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt);7620 Log4Func(("Pending external interrupt vector %#x\n", u8Interrupt));7621 }7622 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)7623 {7624 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)7625 hmR0VmxApicSetTprThreshold(pVCpu, u8Interrupt >> 4);7626 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);7627 7628 /*7629 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and7630 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no7631 * need to re-set this force-flag here.7632 */7633 }7634 else7635 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);7636 }7637 else7638 hmR0VmxSetIntWindowExitVmcs(pVCpu);7639 }7640 7641 return fIntrState;7642 }7643 7644 7645 /**7646 * Injects any pending events into the guest if the guest is in a state to7647 * receive them.7648 *7649 * @returns Strict VBox status code (i.e. informational status codes too).7650 * @param pVCpu The cross context virtual CPU structure.7651 * @param fIntrState The VT-x guest-interruptibility state.7652 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should7653 * return VINF_EM_DBG_STEPPED if the event was7654 * dispatched directly.7655 */7656 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, uint32_t fIntrState, bool fStepping)7657 {7658 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);7659 Assert(VMMRZCallRing3IsEnabled(pVCpu));7660 7661 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);7662 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);7663 7664 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));7665 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/7666 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */7667 Assert(!TRPMHasTrap(pVCpu));7668 7669 VBOXSTRICTRC rcStrict = VINF_SUCCESS;7670 if (pVCpu->hm.s.Event.fPending)7671 {7672 /*7673 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt7674 * pending even while injecting an event and in this case, we want a VM-exit as soon as7675 * the guest is ready for the next interrupt, see @bugref{6208#c45}.7676 *7677 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".7678 */7679 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);7680 #ifdef VBOX_STRICT7681 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)7682 {7683 bool const fBlockInt = !(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);7684 Assert(!fBlockInt);7685 Assert(!fBlockSti);7686 Assert(!fBlockMovSS);7687 }7688 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)7689 {7690 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);7691 Assert(!fBlockSti);7692 Assert(!fBlockMovSS);7693 Assert(!fBlockNmi);7694 }7695 #endif7696 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,7697 uIntType));7698 7699 /*7700 * Inject the event and get any changes to the guest-interruptibility state.7701 *7702 * The guest-interruptibility state may need to be updated if we inject the event7703 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).7704 */7705 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,7706 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping,7707 &fIntrState);7708 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);7709 7710 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)7711 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);7712 else7713 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);7714 }7715 7716 /*7717 * Update the guest-interruptibility state.7718 *7719 * This is required for the real-on-v86 software interrupt injection case above, as well as7720 * updates to the guest state from ring-3 or IEM/REM.7721 */7722 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);7723 AssertRCReturn(rc, rc);7724 7725 /*7726 * There's no need to clear the VM-entry interruption-information field here if we're not7727 * injecting anything. VT-x clears the valid bit on every VM-exit.7728 *7729 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".7730 */7731 7732 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));7733 NOREF(fBlockMovSS); NOREF(fBlockSti);7734 return rcStrict;7735 }7736 7737 7738 /**7739 * Injects a double-fault (\#DF) exception into the VM.7740 *7741 * @returns Strict VBox status code (i.e. informational status codes too).7742 * @param pVCpu The cross context virtual CPU structure.7743 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep()7744 * and should return VINF_EM_DBG_STEPPED if the event7745 * is injected directly (register modified by us, not7746 * by hardware on VM-entry).7747 * @param pfIntrState Pointer to the current guest interruptibility-state.7748 * This interruptibility-state will be updated if7749 * necessary. This cannot not be NULL.7750 */7751 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, bool fStepping, uint32_t *pfIntrState)7752 {7753 uint32_t const u32IntInfo = X86_XCPT_DF | VMX_EXIT_INT_INFO_VALID7754 | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT)7755 | VMX_EXIT_INT_INFO_ERROR_CODE_VALID;7756 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */, fStepping,7757 pfIntrState);7758 }7759 7760 7761 /**7762 * Injects a general-protection (\#GP) fault into the VM.7763 *7764 * @returns Strict VBox status code (i.e. informational status codes too).7765 * @param pVCpu The cross context virtual CPU structure.7766 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU7767 * mode, i.e. in real-mode it's not valid).7768 * @param u32ErrorCode The error code associated with the \#GP.7769 * @param fStepping Whether we're running in7770 * hmR0VmxRunGuestCodeStep() and should return7771 * VINF_EM_DBG_STEPPED if the event is injected7772 * directly (register modified by us, not by7773 * hardware on VM-entry).7774 * @param pfIntrState Pointer to the current guest interruptibility-state.7775 * This interruptibility-state will be updated if7776 * necessary. This cannot not be NULL.7777 */7778 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, bool fErrorCodeValid, uint32_t u32ErrorCode, bool fStepping,7779 uint32_t *pfIntrState)7780 {7781 uint32_t const u32IntInfo = X86_XCPT_GP | VMX_EXIT_INT_INFO_VALID7782 | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT)7783 | (fErrorCodeValid ? VMX_EXIT_INT_INFO_ERROR_CODE_VALID : 0);7784 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */, fStepping,7785 pfIntrState);7786 8323 } 7787 8324 … … 7822 8359 * 7823 8360 * @param pVCpu The cross context virtual CPU structure. 7824 * @param u64IntInfo The VM-entry interruption-information field. 7825 * @param cbInstr The VM-entry instruction length in bytes (for 7826 * software interrupts, exceptions and privileged 7827 * software exceptions). 7828 * @param u32ErrCode The VM-entry exception error code. 7829 * @param GCPtrFaultAddress The page-fault address for \#PF exceptions. 7830 * @param pfIntrState Pointer to the current guest interruptibility-state. 7831 * This interruptibility-state will be updated if 7832 * necessary. This cannot not be NULL. 7833 * @param fStepping Whether we're running in 7834 * hmR0VmxRunGuestCodeStep() and should return 7835 * VINF_EM_DBG_STEPPED if the event is injected 7836 * directly (register modified by us, not by 8361 * @param pVmxTransient The VMX-transient structure. 8362 * @param pEvent The event being injected. 8363 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. 8364 * This will be updated if necessary. This cannot not 8365 * be NULL. 8366 * @param fStepping Whether we're single-stepping guest execution and 8367 * should return VINF_EM_DBG_STEPPED if the event is 8368 * injected directly (registers modified by us, not by 7837 8369 * hardware on VM-entry). 7838 8370 */ 7839 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,7840 RTGCUINTREG GCPtrFaultAddress, bool fStepping,uint32_t *pfIntrState)8371 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCHMEVENT pEvent, bool fStepping, 8372 uint32_t *pfIntrState) 7841 8373 { 7842 8374 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */ 7843 AssertMsg(!RT_HI_U32( u64IntInfo), ("%#RX64\n",u64IntInfo));8375 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo)); 7844 8376 Assert(pfIntrState); 7845 8377 7846 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7847 uint32_t u32IntInfo = (uint32_t)u64IntInfo; 7848 uint32_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo); 7849 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo); 8378 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 8379 uint32_t u32IntInfo = pEvent->u64IntInfo; 8380 uint32_t const u32ErrCode = pEvent->u32ErrCode; 8381 uint32_t const cbInstr = pEvent->cbInstr; 8382 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress; 8383 uint32_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo); 8384 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo); 7850 8385 7851 8386 #ifdef VBOX_STRICT … … 7875 8410 } 7876 8411 } 7877 #endif7878 8412 7879 8413 /* Cannot inject an NMI when block-by-MOV SS is in effect. */ 7880 8414 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI 7881 8415 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)); 8416 #endif 7882 8417 7883 8418 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]); … … 7896 8431 { 7897 8432 /* 7898 * For unrestricted execution enabled CPUs running real-mode guests, we must not7899 * set the deliver-error-code bit.8433 * For CPUs with unrestricted guest execution enabled and with the guest 8434 * in real-mode, we must not set the deliver-error-code bit. 7900 8435 * 7901 8436 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields". … … 7908 8443 Assert(PDMVmmDevHeapIsEnabled(pVM)); 7909 8444 Assert(pVM->hm.s.vmx.pRealModeTSS); 8445 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)); 7910 8446 7911 8447 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */ 7912 int rc2 = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_RIP 7913 | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS); 8448 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 8449 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK 8450 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS); 7914 8451 AssertRCReturn(rc2, rc2); 7915 8452 … … 7922 8459 return VINF_EM_RESET; 7923 8460 7924 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */ 8461 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. 8462 No error codes for exceptions in real-mode. */ 7925 8463 if (uVector == X86_XCPT_GP) 7926 return hmR0VmxInjectXcptDF(pVCpu, fStepping, pfIntrState); 8464 { 8465 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF) 8466 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT) 8467 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0) 8468 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 8469 HMEVENT EventXcptDf; 8470 RT_ZERO(EventXcptDf); 8471 EventXcptDf.u64IntInfo = uXcptDfInfo; 8472 return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptDf, fStepping, pfIntrState); 8473 } 7927 8474 7928 8475 /* … … 7932 8479 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" 7933 8480 */ 7934 return hmR0VmxInjectXcptGP(pVCpu, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping, pfIntrState); 8481 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP) 8482 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT) 8483 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0) 8484 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1); 8485 HMEVENT EventXcptGp; 8486 RT_ZERO(EventXcptGp); 8487 EventXcptGp.u64IntInfo = uXcptGpInfo; 8488 return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptGp, fStepping, pfIntrState); 7935 8489 } 7936 8490 … … 7948 8502 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */ 7949 8503 X86IDTR16 IdtEntry; 7950 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;8504 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry; 7951 8505 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry); 7952 8506 AssertRCReturn(rc2, rc2); … … 7956 8510 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32); 7957 8511 if (rcStrict == VINF_SUCCESS) 8512 { 7958 8513 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel); 7959 if (rcStrict == VINF_SUCCESS) 7960 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp); 8514 if (rcStrict == VINF_SUCCESS) 8515 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp); 8516 } 7961 8517 7962 8518 /* Clear the required eflag bits and jump to the interrupt/exception handler. */ … … 7970 8526 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT 7971 8527 && uVector == X86_XCPT_PF) 7972 pCtx->cr2 = GCPtrFaultAddress; 7973 7974 /* If any other guest-state bits are changed here, make sure to update 7975 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */ 8528 pCtx->cr2 = GCPtrFault; 8529 7976 8530 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2 7977 8531 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS 7978 8532 | HM_CHANGED_GUEST_RSP); 7979 8533 7980 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */ 8534 /* 8535 * If we delivered a hardware exception (other than an NMI) and if there was 8536 * block-by-STI in effect, we should clear it. 8537 */ 7981 8538 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI) 7982 8539 { … … 7986 8543 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI; 7987 8544 } 7988 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n", 8545 8546 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n", 7989 8547 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip)); 7990 8548 7991 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo' 7992 it, if we are returning to ring-3 before executing guest code. */ 8549 /* 8550 * The event has been truly dispatched to the guest. Mark it as no longer pending so 8551 * we don't attempt to undo it if we are returning to ring-3 before executing guest code. 8552 */ 7993 8553 pVCpu->hm.s.Event.fPending = false; 7994 8554 7995 /* Make hmR0VmxPreRunGuest() return if we're stepping since we've changed cs:rip. */8555 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */ 7996 8556 if (fStepping) 7997 8557 rcStrict = VINF_EM_DBG_STEPPED; … … 8003 8563 } 8004 8564 8005 /* Validate. */ 8565 /* 8566 * Validate. 8567 */ 8006 8568 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */ 8007 8569 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */ 8008 8570 8009 /* Inject. */ 8571 /* 8572 * Inject the event into the VMCS. 8573 */ 8010 8574 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo); 8011 8575 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo)) … … 8014 8578 AssertRCReturn(rc, rc); 8015 8579 8016 /* Update CR2. */ 8580 /* 8581 * Update guest CR2 if this is a page-fault. 8582 */ 8017 8583 if ( VMX_ENTRY_INT_INFO_TYPE(u32IntInfo) == VMX_EXIT_INT_INFO_TYPE_HW_XCPT 8018 8584 && uVector == X86_XCPT_PF) 8019 pCtx->cr2 = GCPtrFault Address;8585 pCtx->cr2 = GCPtrFault; 8020 8586 8021 8587 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2)); 8022 8023 8588 return VINF_SUCCESS; 8024 8589 } … … 8026 8591 8027 8592 /** 8028 * Clears the interrupt-window exiting control in the VMCS and if necessary 8029 * clears the current event in the VMCS as well. 8030 * 8031 * @returns VBox status code. 8032 * @param pVCpu The cross context virtual CPU structure. 8033 * 8034 * @remarks Use this function only to clear events that have not yet been 8035 * delivered to the guest but are injected in the VMCS! 8036 * @remarks No-long-jump zone!!! 8037 */ 8038 static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu) 8039 { 8040 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT) 8041 { 8042 hmR0VmxClearIntWindowExitVmcs(pVCpu); 8043 Log4Func(("Cleared interrupt window\n")); 8044 } 8045 8046 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT) 8047 { 8048 hmR0VmxClearNmiWindowExitVmcs(pVCpu); 8049 Log4Func(("Cleared NMI window\n")); 8050 } 8593 * Evaluates the event to be delivered to the guest and sets it as the pending 8594 * event. 8595 * 8596 * @returns Strict VBox status code (i.e. informational status codes too). 8597 * @param pVCpu The cross context virtual CPU structure. 8598 * @param pVmcsInfo The VMCS info. object. 8599 * @param pVmxTransient The VMX-transient structure. 8600 * @param pfIntrState Where to store the VT-x guest-interruptibility state. 8601 */ 8602 static VBOXSTRICTRC hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t *pfIntrState) 8603 { 8604 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 8605 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 8606 8607 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */ 8608 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pVmcsInfo); 8609 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS); 8610 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI); 8611 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI); 8612 8613 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 8614 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 8615 Assert(!fBlockSti || pCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 8616 Assert(!TRPMHasTrap(pVCpu)); 8617 Assert(pfIntrState); 8618 8619 *pfIntrState = fIntrState; 8620 8621 /* 8622 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits 8623 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags. 8624 */ 8625 /** @todo SMI. SMIs take priority over NMIs. */ 8626 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */ 8627 { 8628 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */ 8629 if ( !pVCpu->hm.s.Event.fPending 8630 && !fBlockNmi 8631 && !fBlockSti 8632 && !fBlockMovSS) 8633 { 8634 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 8635 if ( pVmxTransient->fIsNestedGuest 8636 && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_NMI_EXIT)) 8637 return IEMExecVmxVmexitNmi(pVCpu); 8638 #endif 8639 hmR0VmxSetPendingXcptNmi(pVCpu); 8640 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 8641 Log4Func(("Pending NMI\n")); 8642 } 8643 else 8644 hmR0VmxSetNmiWindowExitVmcs(pVCpu, pVmcsInfo); 8645 } 8646 /* 8647 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns 8648 * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC. 8649 */ 8650 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 8651 && !pVCpu->hm.s.fSingleInstruction) 8652 { 8653 Assert(!DBGFIsStepping(pVCpu)); 8654 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS); 8655 AssertRCReturn(rc, rc); 8656 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 8657 if ( !pVCpu->hm.s.Event.fPending 8658 && !fBlockInt 8659 && !fBlockSti 8660 && !fBlockMovSS) 8661 { 8662 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 8663 if ( pVmxTransient->fIsNestedGuest 8664 && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)) 8665 { 8666 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0/* uVector */, true /* fIntPending */); 8667 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE) 8668 return rcStrict; 8669 } 8670 #endif 8671 uint8_t u8Interrupt; 8672 rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 8673 if (RT_SUCCESS(rc)) 8674 { 8675 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 8676 if ( pVmxTransient->fIsNestedGuest 8677 && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_EXT_INT_EXIT) 8678 && CPUMIsGuestVmxExitCtlsSet(pVCpu, pCtx, VMX_EXIT_CTLS_ACK_EXT_INT)) 8679 { 8680 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */); 8681 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE) 8682 return rcStrict; 8683 } 8684 #endif 8685 hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt); 8686 Log4Func(("Pending external interrupt vector %#x\n", u8Interrupt)); 8687 } 8688 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 8689 { 8690 if ( !pVmxTransient->fIsNestedGuest 8691 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)) 8692 hmR0VmxApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4); 8693 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 8694 8695 /* 8696 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and 8697 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no 8698 * need to re-set this force-flag here. 8699 */ 8700 } 8701 else 8702 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 8703 } 8704 else 8705 hmR0VmxSetIntWindowExitVmcs(pVCpu, pVmcsInfo); 8706 } 8707 8708 return VINF_SUCCESS; 8709 } 8710 8711 8712 /** 8713 * Injects any pending events into the guest if the guest is in a state to 8714 * receive them. 8715 * 8716 * @returns Strict VBox status code (i.e. informational status codes too). 8717 * @param pVCpu The cross context virtual CPU structure. 8718 * @param pVmxTransient The VMX-transient structure. 8719 * @param fIntrState The VT-x guest-interruptibility state. 8720 * @param fStepping Whether we are single-stepping the guest using the 8721 * hypervisor debugger and should return 8722 * VINF_EM_DBG_STEPPED if the event was dispatched 8723 * directly. 8724 */ 8725 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t fIntrState, bool fStepping) 8726 { 8727 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu); 8728 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 8729 8730 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS); 8731 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI); 8732 8733 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 8734 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 8735 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 8736 Assert(!TRPMHasTrap(pVCpu)); 8737 8738 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 8739 if (pVCpu->hm.s.Event.fPending) 8740 { 8741 /* 8742 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt 8743 * pending even while injecting an event and in this case, we want a VM-exit as soon as 8744 * the guest is ready for the next interrupt, see @bugref{6208#c45}. 8745 * 8746 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery". 8747 */ 8748 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo); 8749 #ifdef VBOX_STRICT 8750 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT) 8751 { 8752 bool const fBlockInt = !(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF); 8753 Assert(!fBlockInt); 8754 Assert(!fBlockSti); 8755 Assert(!fBlockMovSS); 8756 } 8757 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI) 8758 { 8759 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI); 8760 Assert(!fBlockSti); 8761 Assert(!fBlockMovSS); 8762 Assert(!fBlockNmi); 8763 } 8764 #endif 8765 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo, 8766 uIntType)); 8767 8768 /* 8769 * Inject the event and get any changes to the guest-interruptibility state. 8770 * 8771 * The guest-interruptibility state may need to be updated if we inject the event 8772 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts). 8773 */ 8774 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &pVCpu->hm.s.Event, fStepping, &fIntrState); 8775 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict); 8776 8777 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT) 8778 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt); 8779 else 8780 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt); 8781 } 8782 8783 /* 8784 * Update the guest-interruptibility state. 8785 * 8786 * This is required for the real-on-v86 software interrupt injection case above, as well as 8787 * updates to the guest state from ring-3 or IEM/REM. 8788 */ 8789 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState); 8790 AssertRCReturn(rc, rc); 8791 8792 /* 8793 * There's no need to clear the VM-entry interruption-information field here if we're not 8794 * injecting anything. VT-x clears the valid bit on every VM-exit. 8795 * 8796 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection". 8797 */ 8798 8799 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping)); 8800 NOREF(fBlockMovSS); NOREF(fBlockSti); 8801 return rcStrict; 8051 8802 } 8052 8803 … … 8079 8830 8080 8831 /* 8081 * Load the VCPU's VMCS as the current (and active) one. 8082 */ 8083 Assert(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR); 8084 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 8832 * Load the appropriate VMCS as the current and active one. 8833 */ 8834 PVMXVMCSINFO pVmcsInfo; 8835 bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx); 8836 if (!fInNestedGuestMode) 8837 pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfo; 8838 else 8839 pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst; 8840 int rc = hmR0VmxLoadVmcs(pVmcsInfo); 8085 8841 if (RT_SUCCESS(rc)) 8086 8842 { 8087 pVCpu->hm.s.vmx.f VmcsState = HMVMX_VMCS_STATE_ACTIVE;8843 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs = fInNestedGuestMode; 8088 8844 pVCpu->hm.s.fLeaveDone = false; 8089 Log4Func((" Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));8845 Log4Func(("Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId())); 8090 8846 8091 8847 /* … … 8123 8879 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId())); 8124 8880 8125 /* 8126 * Restore host-state (FPU, debug etc.) 8127 */ 8881 /* Restore host-state (FPU, debug etc.) */ 8128 8882 if (!pVCpu->hm.s.fLeaveDone) 8129 8883 { … … 8138 8892 /* Leave HM context, takes care of local init (term). */ 8139 8893 int rc = HMR0LeaveCpu(pVCpu); 8140 AssertRC(rc); NOREF(rc);8894 AssertRC(rc); 8141 8895 8142 8896 /* Restore longjmp state. */ … … 8160 8914 int rc = hmR0EnterCpu(pVCpu); 8161 8915 AssertRC(rc); 8162 Assert( (pVCpu->hm.s.fCtxChanged &(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))8163 ==(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));8916 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 8917 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); 8164 8918 8165 8919 /* Load the active VMCS as the current one. */ 8166 if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR) 8167 { 8168 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 8169 AssertRC(rc); NOREF(rc); 8170 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE; 8171 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId())); 8172 } 8920 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 8921 rc = hmR0VmxLoadVmcs(pVmcsInfo); 8922 AssertRC(rc); 8923 Log4Func(("Resumed: Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId())); 8173 8924 pVCpu->hm.s.fLeaveDone = false; 8174 8925 … … 8256 9007 * @returns VBox strict status code. 8257 9008 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code 8258 * without unrestricted guest accessand the VMMDev is not presently9009 * without unrestricted guest execution and the VMMDev is not presently 8259 9010 * mapped (e.g. EFI32). 8260 9011 * 8261 * @param pVCpu The cross context virtual CPU structure. 9012 * @param pVCpu The cross context virtual CPU structure. 9013 * @param pVmxTransient The VMX-transient structure. 8262 9014 * 8263 9015 * @remarks No-long-jump zone!!! 8264 9016 */ 8265 static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu )9017 static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 8266 9018 { 8267 9019 AssertPtr(pVCpu); 8268 9020 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu); 8269 8270 9021 LogFlowFunc(("pVCpu=%p\n", pVCpu)); 8271 9022 8272 9023 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x); 8273 9024 8274 /* Determine real-on-v86 mode. */ 8275 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false; 8276 if ( !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest 8277 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)) 8278 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true; 9025 /* 9026 * Determine real-on-v86 mode. 9027 * Used when the guest is in real-mode and unrestricted guest execution is not used. 9028 */ 9029 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 9030 if ( pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest 9031 || !CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)) 9032 pVmcsInfo->RealMode. fRealOnV86Active = false; 9033 else 9034 { 9035 Assert(!pVmxTransient->fIsNestedGuest); 9036 pVmcsInfo->RealMode.fRealOnV86Active = true; 9037 } 8279 9038 8280 9039 /* … … 8282 9041 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it. 8283 9042 */ 8284 int rc = hmR0VmxSelectVMRunHandler(pVCpu); 9043 /** @todo r=ramshankar: Move hmR0VmxSelectVMRunHandler inside 9044 * hmR0VmxExportGuestEntryExitCtls and do it conditionally. There shouldn't 9045 * be a need to evaluate this everytime since I'm pretty sure we intercept 9046 * all guest paging mode changes. */ 9047 int rc = hmR0VmxSelectVMRunHandler(pVCpu, pVmxTransient); 8285 9048 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8286 9049 8287 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */ 8288 rc = hmR0VmxExportGuestEntryCtls(pVCpu); 9050 rc = hmR0VmxExportGuestEntryExitCtls(pVCpu, pVmxTransient); 8289 9051 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8290 9052 8291 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */ 8292 rc = hmR0VmxExportGuestExitCtls(pVCpu); 9053 rc = hmR0VmxExportGuestCR0(pVCpu, pVmxTransient); 8293 9054 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8294 9055 8295 rc = hmR0VmxExportGuestCR0(pVCpu); 8296 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8297 8298 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu); 9056 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pVmxTransient); 8299 9057 if (rcStrict == VINF_SUCCESS) 8300 9058 { /* likely */ } … … 8305 9063 } 8306 9064 8307 rc = hmR0VmxExportGuestSeg mentRegs(pVCpu);9065 rc = hmR0VmxExportGuestSegRegsXdtr(pVCpu, pVmxTransient); 8308 9066 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8309 9067 8310 /* This needs to be done after hmR0VmxExportGuestEntryCtls() and hmR0VmxExportGuestExitCtls() as it 8311 may alter controls if we determine we don't have to swap EFER after all. */ 8312 rc = hmR0VmxExportGuestMsrs(pVCpu); 9068 rc = hmR0VmxExportGuestMsrs(pVCpu, pVmxTransient); 8313 9069 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8314 9070 8315 rc = hmR0VmxExportGuestApicTpr(pVCpu );9071 rc = hmR0VmxExportGuestApicTpr(pVCpu, pVmxTransient); 8316 9072 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8317 9073 8318 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu );9074 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu, pVmxTransient); 8319 9075 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8320 9076 8321 9077 rc = hmR0VmxExportGuestRip(pVCpu); 8322 9078 rc |= hmR0VmxExportGuestRsp(pVCpu); 8323 rc |= hmR0VmxExportGuestRflags(pVCpu );9079 rc |= hmR0VmxExportGuestRflags(pVCpu, pVmxTransient); 8324 9080 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8325 9081 … … 8336 9092 | HM_CHANGED_GUEST_TSC_AUX 8337 9093 | HM_CHANGED_GUEST_OTHER_MSRS 8338 | HM_CHANGED_GUEST_HWVIRT 9094 | HM_CHANGED_GUEST_HWVIRT /* More accurate PLE handling someday? */ 8339 9095 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK))); 8340 9096 … … 8347 9103 * Exports the state shared between the host and guest into the VMCS. 8348 9104 * 8349 * @param pVCpu The cross context virtual CPU structure. 9105 * @param pVCpu The cross context virtual CPU structure. 9106 * @param pVmxTransient The VMX-transient structure. 8350 9107 * 8351 9108 * @remarks No-long-jump zone!!! 8352 9109 */ 8353 static void hmR0VmxExportSharedState(PVMCPU pVCpu )9110 static void hmR0VmxExportSharedState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 8354 9111 { 8355 9112 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 8358 9115 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK) 8359 9116 { 8360 int rc = hmR0VmxExportSharedDebugState(pVCpu );9117 int rc = hmR0VmxExportSharedDebugState(pVCpu, pVmxTransient); 8361 9118 AssertRC(rc); 8362 9119 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK; … … 8365 9122 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS) 8366 9123 { 8367 rc = hmR0VmxExportGuestRflags(pVCpu );9124 rc = hmR0VmxExportGuestRflags(pVCpu, pVmxTransient); 8368 9125 AssertRC(rc); 8369 9126 } … … 8386 9143 * @returns Strict VBox status code (i.e. informational status codes too). 8387 9144 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code 8388 * without unrestricted guest accessand the VMMDev is not presently9145 * without unrestricted guest execution and the VMMDev is not presently 8389 9146 * mapped (e.g. EFI32). 8390 9147 * 8391 9148 * @param pVCpu The cross context virtual CPU structure. 9149 * @param pVmxTransient The VMX-transient structure. 8392 9150 * 8393 9151 * @remarks No-long-jump zone!!! 8394 9152 */ 8395 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu )9153 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 8396 9154 { 8397 9155 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu); … … 8416 9174 { /* likely */} 8417 9175 else 8418 AssertMsgFailedReturn((" hmR0VmxExportGuestRip failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict);9176 AssertMsgFailedReturn(("Failed to export guest RIP! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict); 8419 9177 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal); 8420 9178 } 8421 9179 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 8422 9180 { 8423 rcStrict = hmR0VmxExportGuestState(pVCpu );9181 rcStrict = hmR0VmxExportGuestState(pVCpu, pVmxTransient); 8424 9182 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8425 9183 { /* likely */} 8426 9184 else 8427 9185 { 8428 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, (" hmR0VmxExportGuestState failed! rc=%Rrc\n",9186 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("Failed to export guest state! rc=%Rrc\n", 8429 9187 VBOXSTRICTRC_VAL(rcStrict))); 8430 9188 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 8448 9206 8449 9207 /** 9208 * Tries to determine what part of the guest-state VT-x has deemed as invalid 9209 * and update error record fields accordingly. 9210 * 9211 * @return VMX_IGS_* return codes. 9212 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything 9213 * wrong with the guest state. 9214 * 9215 * @param pVCpu The cross context virtual CPU structure. 9216 * @param pVmcsInfo The VMCS info. object. 9217 * 9218 * @remarks This function assumes our cache of the VMCS controls 9219 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded. 9220 */ 9221 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 9222 { 9223 #define HMVMX_ERROR_BREAK(err) { uError = (err); break; } 9224 #define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \ 9225 uError = (err); \ 9226 break; \ 9227 } else do { } while (0) 9228 9229 int rc; 9230 PVM pVM = pVCpu->CTX_SUFF(pVM); 9231 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 9232 uint32_t uError = VMX_IGS_ERROR; 9233 uint32_t u32Val; 9234 bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest; 9235 9236 do 9237 { 9238 /* 9239 * CR0. 9240 */ 9241 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 9242 uint32_t const fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 9243 /* Exceptions for unrestricted guest execution for fixed CR0 bits (PE, PG). 9244 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */ 9245 if (fUnrestrictedGuest) 9246 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG); 9247 9248 uint32_t u32GuestCr0; 9249 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCr0); 9250 AssertRCBreak(rc); 9251 HMVMX_CHECK_BREAK((u32GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1); 9252 HMVMX_CHECK_BREAK(!(u32GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0); 9253 if ( !fUnrestrictedGuest 9254 && (u32GuestCr0 & X86_CR0_PG) 9255 && !(u32GuestCr0 & X86_CR0_PE)) 9256 { 9257 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO); 9258 } 9259 9260 /* 9261 * CR4. 9262 */ 9263 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 9264 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 9265 9266 uint32_t u32GuestCr4; 9267 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCr4); 9268 AssertRCBreak(rc); 9269 HMVMX_CHECK_BREAK((u32GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1); 9270 HMVMX_CHECK_BREAK(!(u32GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0); 9271 9272 /* 9273 * IA32_DEBUGCTL MSR. 9274 */ 9275 uint64_t u64Val; 9276 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val); 9277 AssertRCBreak(rc); 9278 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 9279 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */ 9280 { 9281 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED); 9282 } 9283 uint64_t u64DebugCtlMsr = u64Val; 9284 9285 #ifdef VBOX_STRICT 9286 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); 9287 AssertRCBreak(rc); 9288 Assert(u32Val == pVmcsInfo->u32EntryCtls); 9289 #endif 9290 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 9291 9292 /* 9293 * RIP and RFLAGS. 9294 */ 9295 uint32_t u32Eflags; 9296 #if HC_ARCH_BITS == 64 9297 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val); 9298 AssertRCBreak(rc); 9299 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */ 9300 if ( !fLongModeGuest 9301 || !pCtx->cs.Attr.n.u1Long) 9302 { 9303 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID); 9304 } 9305 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N 9306 * must be identical if the "IA-32e mode guest" VM-entry 9307 * control is 1 and CS.L is 1. No check applies if the 9308 * CPU supports 64 linear-address bits. */ 9309 9310 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */ 9311 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val); 9312 AssertRCBreak(rc); 9313 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */ 9314 VMX_IGS_RFLAGS_RESERVED); 9315 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */ 9316 u32Eflags = u64Val; 9317 #else 9318 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags); 9319 AssertRCBreak(rc); 9320 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */ 9321 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */ 9322 #endif 9323 9324 if ( fLongModeGuest 9325 || ( fUnrestrictedGuest 9326 && !(u32GuestCr0 & X86_CR0_PE))) 9327 { 9328 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID); 9329 } 9330 9331 uint32_t u32EntryInfo; 9332 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo); 9333 AssertRCBreak(rc); 9334 if ( VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo) 9335 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT) 9336 { 9337 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID); 9338 } 9339 9340 /* 9341 * 64-bit checks. 9342 */ 9343 #if HC_ARCH_BITS == 64 9344 if (fLongModeGuest) 9345 { 9346 HMVMX_CHECK_BREAK(u32GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE); 9347 HMVMX_CHECK_BREAK(u32GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE); 9348 } 9349 9350 if ( !fLongModeGuest 9351 && (u32GuestCr4 & X86_CR4_PCIDE)) 9352 { 9353 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE); 9354 } 9355 9356 /** @todo CR3 field must be such that bits 63:52 and bits in the range 9357 * 51:32 beyond the processor's physical-address width are 0. */ 9358 9359 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 9360 && (pCtx->dr[7] & X86_DR7_MBZ_MASK)) 9361 { 9362 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED); 9363 } 9364 9365 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val); 9366 AssertRCBreak(rc); 9367 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL); 9368 9369 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val); 9370 AssertRCBreak(rc); 9371 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL); 9372 #endif 9373 9374 /* 9375 * PERF_GLOBAL MSR. 9376 */ 9377 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR) 9378 { 9379 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val); 9380 AssertRCBreak(rc); 9381 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)), 9382 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */ 9383 } 9384 9385 /* 9386 * PAT MSR. 9387 */ 9388 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR) 9389 { 9390 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val); 9391 AssertRCBreak(rc); 9392 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED); 9393 for (unsigned i = 0; i < 8; i++) 9394 { 9395 uint8_t u8Val = (u64Val & 0xff); 9396 if ( u8Val != 0 /* UC */ 9397 && u8Val != 1 /* WC */ 9398 && u8Val != 4 /* WT */ 9399 && u8Val != 5 /* WP */ 9400 && u8Val != 6 /* WB */ 9401 && u8Val != 7 /* UC- */) 9402 { 9403 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID); 9404 } 9405 u64Val >>= 8; 9406 } 9407 } 9408 9409 /* 9410 * EFER MSR. 9411 */ 9412 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR) 9413 { 9414 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer); 9415 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val); 9416 AssertRCBreak(rc); 9417 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)), 9418 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */ 9419 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls 9420 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST), 9421 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH); 9422 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see 9423 * iemVmxVmentryCheckGuestState(). */ 9424 HMVMX_CHECK_BREAK( fUnrestrictedGuest 9425 || !(u32GuestCr0 & X86_CR0_PG) 9426 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME), 9427 VMX_IGS_EFER_LMA_LME_MISMATCH); 9428 } 9429 9430 /* 9431 * Segment registers. 9432 */ 9433 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 9434 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID); 9435 if (!(u32Eflags & X86_EFL_VM)) 9436 { 9437 /* CS */ 9438 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID); 9439 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED); 9440 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED); 9441 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff 9442 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID); 9443 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000) 9444 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID); 9445 /* CS cannot be loaded with NULL in protected mode. */ 9446 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE); 9447 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID); 9448 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11) 9449 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL); 9450 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15) 9451 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH); 9452 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3) 9453 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID); 9454 else 9455 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID); 9456 9457 /* SS */ 9458 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest 9459 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL); 9460 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL); 9461 if ( !(pCtx->cr0 & X86_CR0_PE) 9462 || pCtx->cs.Attr.n.u4Type == 3) 9463 { 9464 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID); 9465 } 9466 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE)) 9467 { 9468 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID); 9469 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID); 9470 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED); 9471 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED); 9472 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff 9473 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID); 9474 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000) 9475 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID); 9476 } 9477 9478 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSReg(). */ 9479 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE)) 9480 { 9481 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID); 9482 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID); 9483 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest 9484 || pCtx->ds.Attr.n.u4Type > 11 9485 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL); 9486 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED); 9487 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED); 9488 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff 9489 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID); 9490 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000) 9491 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID); 9492 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE) 9493 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID); 9494 } 9495 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE)) 9496 { 9497 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID); 9498 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID); 9499 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest 9500 || pCtx->es.Attr.n.u4Type > 11 9501 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL); 9502 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED); 9503 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED); 9504 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff 9505 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID); 9506 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000) 9507 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID); 9508 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE) 9509 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID); 9510 } 9511 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE)) 9512 { 9513 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID); 9514 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID); 9515 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest 9516 || pCtx->fs.Attr.n.u4Type > 11 9517 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL); 9518 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED); 9519 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED); 9520 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff 9521 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID); 9522 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000) 9523 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID); 9524 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE) 9525 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID); 9526 } 9527 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE)) 9528 { 9529 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID); 9530 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID); 9531 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest 9532 || pCtx->gs.Attr.n.u4Type > 11 9533 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL); 9534 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED); 9535 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED); 9536 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff 9537 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID); 9538 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000) 9539 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID); 9540 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE) 9541 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID); 9542 } 9543 /* 64-bit capable CPUs. */ 9544 #if HC_ARCH_BITS == 64 9545 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL); 9546 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL); 9547 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 9548 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL); 9549 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID); 9550 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base), 9551 VMX_IGS_LONGMODE_SS_BASE_INVALID); 9552 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base), 9553 VMX_IGS_LONGMODE_DS_BASE_INVALID); 9554 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base), 9555 VMX_IGS_LONGMODE_ES_BASE_INVALID); 9556 #endif 9557 } 9558 else 9559 { 9560 /* V86 mode checks. */ 9561 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr; 9562 if (pVmcsInfo->RealMode.fRealOnV86Active) 9563 { 9564 u32CSAttr = 0xf3; u32SSAttr = 0xf3; 9565 u32DSAttr = 0xf3; u32ESAttr = 0xf3; 9566 u32FSAttr = 0xf3; u32GSAttr = 0xf3; 9567 } 9568 else 9569 { 9570 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; 9571 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u; 9572 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u; 9573 } 9574 9575 /* CS */ 9576 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID); 9577 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID); 9578 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID); 9579 /* SS */ 9580 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID); 9581 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID); 9582 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID); 9583 /* DS */ 9584 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID); 9585 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID); 9586 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID); 9587 /* ES */ 9588 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID); 9589 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID); 9590 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID); 9591 /* FS */ 9592 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID); 9593 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID); 9594 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID); 9595 /* GS */ 9596 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID); 9597 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID); 9598 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID); 9599 /* 64-bit capable CPUs. */ 9600 #if HC_ARCH_BITS == 64 9601 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL); 9602 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL); 9603 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 9604 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL); 9605 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID); 9606 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base), 9607 VMX_IGS_LONGMODE_SS_BASE_INVALID); 9608 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base), 9609 VMX_IGS_LONGMODE_DS_BASE_INVALID); 9610 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base), 9611 VMX_IGS_LONGMODE_ES_BASE_INVALID); 9612 #endif 9613 } 9614 9615 /* 9616 * TR. 9617 */ 9618 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID); 9619 /* 64-bit capable CPUs. */ 9620 #if HC_ARCH_BITS == 64 9621 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL); 9622 #endif 9623 if (fLongModeGuest) 9624 { 9625 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */ 9626 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID); 9627 } 9628 else 9629 { 9630 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */ 9631 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/ 9632 VMX_IGS_TR_ATTR_TYPE_INVALID); 9633 } 9634 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID); 9635 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID); 9636 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */ 9637 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff 9638 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID); 9639 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000) 9640 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID); 9641 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE); 9642 9643 /* 9644 * GDTR and IDTR. 9645 */ 9646 #if HC_ARCH_BITS == 64 9647 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 9648 AssertRCBreak(rc); 9649 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL); 9650 9651 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 9652 AssertRCBreak(rc); 9653 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL); 9654 #endif 9655 9656 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); 9657 AssertRCBreak(rc); 9658 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */ 9659 9660 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); 9661 AssertRCBreak(rc); 9662 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */ 9663 9664 /* 9665 * Guest Non-Register State. 9666 */ 9667 /* Activity State. */ 9668 uint32_t u32ActivityState; 9669 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState); 9670 AssertRCBreak(rc); 9671 HMVMX_CHECK_BREAK( !u32ActivityState 9672 || (u32ActivityState & RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)), 9673 VMX_IGS_ACTIVITY_STATE_INVALID); 9674 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl) 9675 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID); 9676 uint32_t u32IntrState; 9677 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32IntrState); 9678 AssertRCBreak(rc); 9679 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS 9680 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI) 9681 { 9682 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID); 9683 } 9684 9685 /** @todo Activity state and injecting interrupts. Left as a todo since we 9686 * currently don't use activity states but ACTIVE. */ 9687 9688 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM) 9689 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID); 9690 9691 /* Guest interruptibility-state. */ 9692 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED); 9693 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)) 9694 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS), 9695 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID); 9696 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF) 9697 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI), 9698 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID); 9699 if (VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)) 9700 { 9701 if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT) 9702 { 9703 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI) 9704 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS), 9705 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID); 9706 } 9707 else if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI) 9708 { 9709 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS), 9710 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID); 9711 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI), 9712 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID); 9713 } 9714 } 9715 /** @todo Assumes the processor is not in SMM. */ 9716 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI), 9717 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID); 9718 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM) 9719 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI), 9720 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID); 9721 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 9722 && VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo) 9723 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI) 9724 { 9725 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), 9726 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID); 9727 } 9728 9729 /* Pending debug exceptions. */ 9730 #if HC_ARCH_BITS == 64 9731 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val); 9732 AssertRCBreak(rc); 9733 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */ 9734 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED); 9735 u32Val = u64Val; /* For pending debug exceptions checks below. */ 9736 #else 9737 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u32Val); 9738 AssertRCBreak(rc); 9739 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */ 9740 HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED); 9741 #endif 9742 9743 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI) 9744 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS) 9745 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT) 9746 { 9747 if ( (u32Eflags & X86_EFL_TF) 9748 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */ 9749 { 9750 /* Bit 14 is PendingDebug.BS. */ 9751 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET); 9752 } 9753 if ( !(u32Eflags & X86_EFL_TF) 9754 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */ 9755 { 9756 /* Bit 14 is PendingDebug.BS. */ 9757 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR); 9758 } 9759 } 9760 9761 /* VMCS link pointer. */ 9762 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val); 9763 AssertRCBreak(rc); 9764 if (u64Val != UINT64_C(0xffffffffffffffff)) 9765 { 9766 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED); 9767 /** @todo Bits beyond the processor's physical-address width MBZ. */ 9768 /** @todo 32-bit located in memory referenced by value of this field (as a 9769 * physical address) must contain the processor's VMCS revision ID. */ 9770 /** @todo SMM checks. */ 9771 } 9772 9773 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is 9774 * not using nested paging? */ 9775 if ( pVM->hm.s.fNestedPaging 9776 && !fLongModeGuest 9777 && CPUMIsGuestInPAEModeEx(pCtx)) 9778 { 9779 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val); 9780 AssertRCBreak(rc); 9781 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED); 9782 9783 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val); 9784 AssertRCBreak(rc); 9785 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED); 9786 9787 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val); 9788 AssertRCBreak(rc); 9789 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED); 9790 9791 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val); 9792 AssertRCBreak(rc); 9793 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED); 9794 } 9795 9796 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */ 9797 if (uError == VMX_IGS_ERROR) 9798 uError = VMX_IGS_REASON_NOT_FOUND; 9799 } while (0); 9800 9801 pVCpu->hm.s.u32HMError = uError; 9802 return uError; 9803 9804 #undef HMVMX_ERROR_BREAK 9805 #undef HMVMX_CHECK_BREAK 9806 } 9807 9808 9809 /** 8450 9810 * Setup the APIC-access page for virtualizing APIC access. 8451 9811 * … … 8472 9832 8473 9833 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */ 8474 Assert(pVM->hm.s.vmx.HCPhysApicAccess );9834 Assert(pVM->hm.s.vmx.HCPhysApicAccess != NIL_RTHCPHYS); 8475 9835 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P); 8476 9836 AssertRCReturn(rc, rc); 8477 9837 8478 9838 /* Update the per-VCPU cache of the APIC base MSR. */ 8479 pVCpu->hm.s.vmx.u64 MsrApicBase = u64MsrApicBase;9839 pVCpu->hm.s.vmx.u64GstMsrApicBase = u64MsrApicBase; 8480 9840 return VINF_SUCCESS; 8481 9841 } 9842 9843 9844 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 9845 /** 9846 * Merges the guest with the nested-guest MSR bitmap in preparation of executing the 9847 * nested-guest using hardware-assisted VMX. 9848 * 9849 * @param pVCpu The cross context virtual CPU structure. 9850 * @param pVmcsInfoNstGst The nested-guest VMCS info. object. 9851 * @param pVmcsInfoGst The guest VMCS info. object. 9852 */ 9853 static void hmR0VmxMergeMsrBitmapNested(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfoNstGst, PCVMXVMCSINFO pVmcsInfoGst) 9854 { 9855 uint64_t const *pu64MsrBitmapNstGst = (uint64_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap); 9856 uint64_t const *pu64MsrBitmapGst = (uint64_t const *)pVmcsInfoGst->pvMsrBitmap; 9857 uint64_t *pu64MsrBitmap = (uint64_t *)pVmcsInfoNstGst->pvMsrBitmap; 9858 Assert(pu64MsrBitmapNstGst); 9859 Assert(pu64MsrBitmapGst); 9860 Assert(pu64MsrBitmap); 9861 9862 /* 9863 * We merge the guest MSR bitmap with the nested-guest MSR bitmap such that any 9864 * MSR that is intercepted by the guest is also intercepted while executing the 9865 * nested-guest using hardware-assisted VMX. 9866 */ 9867 uint32_t const cbFrag = sizeof(uint64_t); 9868 uint32_t const cFrags = X86_PAGE_4K_SIZE / cbFrag; 9869 for (uint32_t i = 0; i <= cFrags; i++) 9870 pu64MsrBitmap[i] = pu64MsrBitmapNstGst[i] | pu64MsrBitmapGst[i]; 9871 } 9872 9873 9874 /** 9875 * Merges the guest VMCS in to the nested-guest VMCS controls in preparation of 9876 * hardware-assisted VMX execution of the nested-guest. 9877 * 9878 * For a guest, we don't modify these controls once we set up the VMCS. 9879 * 9880 * For nested-guests since the guest hypervisor provides these controls on every 9881 * nested-guest VM-entry and could potentially change them everytime we need to 9882 * merge them before every nested-guest VM-entry. 9883 * 9884 * @returns VBox status code. 9885 * @param pVCpu The cross context virtual CPU structure. 9886 */ 9887 static int hmR0VmxMergeVmcsNested(PVMCPU pVCpu) 9888 { 9889 PVM pVM = pVCpu->CTX_SUFF(pVM); 9890 PCVMXVMCSINFO pVmcsInfoGst = &pVCpu->hm.s.vmx.VmcsInfo; 9891 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 9892 Assert(pVmcsNstGst); 9893 9894 /* 9895 * Merge the controls with the requirements of the guest VMCS. 9896 * 9897 * We do not need to validate the nested-guest VMX features specified in the 9898 * nested-guest VMCS with the features supported by the physical CPU as it's 9899 * already done by the VMLAUNCH/VMRESUME instruction emulation. 9900 * 9901 * This is because the VMX features exposed by CPUM (through CPUID/MSRs) to the 9902 * guest are derived from the VMX features supported by the physical CPU. 9903 */ 9904 9905 /* Pin-based VM-execution controls. */ 9906 uint32_t const u32PinCtls = pVmcsNstGst->u32PinCtls | pVmcsInfoGst->u32PinCtls; 9907 9908 /* Processor-based VM-execution controls. */ 9909 uint32_t u32ProcCtls = (pVmcsNstGst->u32ProcCtls & ~VMX_PROC_CTLS_USE_IO_BITMAPS) 9910 | (pVmcsInfoGst->u32ProcCtls & ~( VMX_PROC_CTLS_INT_WINDOW_EXIT 9911 | VMX_PROC_CTLS_NMI_WINDOW_EXIT 9912 | VMX_PROC_CTLS_USE_TPR_SHADOW 9913 | VMX_PROC_CTLS_MONITOR_TRAP_FLAG)); 9914 9915 /* Secondary processor-based VM-execution controls. */ 9916 uint32_t const u32ProcCtls2 = (pVmcsNstGst->u32ProcCtls2 & ~VMX_PROC_CTLS2_VPID) 9917 | (pVmcsInfoGst->u32ProcCtls2 & ~( VMX_PROC_CTLS2_VIRT_APIC_ACCESS 9918 | VMX_PROC_CTLS2_INVPCID 9919 | VMX_PROC_CTLS2_RDTSCP 9920 | VMX_PROC_CTLS2_XSAVES_XRSTORS 9921 | VMX_PROC_CTLS2_APIC_REG_VIRT 9922 | VMX_PROC_CTLS2_VIRT_INT_DELIVERY 9923 | VMX_PROC_CTLS2_VMFUNC)); 9924 9925 /* 9926 * VM-entry controls: 9927 * These controls contains state that depends on the nested-guest state (primarily 9928 * EFER MSR) and is thus not constant through VMLAUNCH/VMRESUME and the nested-guest 9929 * VM-exit. Although the nested-hypervisor cannot change it, we need to in order to 9930 * properly continue executing the nested-guest if the EFER MSR changes but does not 9931 * cause a nested-guest VM-exits. 9932 * 9933 * VM-exit controls: 9934 * These controls specify the host state on return. We cannot use the controls from 9935 * the nested-hypervisor state as is as it would contain the guest state rather than 9936 * the host state. Since the host state is subject to change (e.g. preemption, trips 9937 * to ring-3, longjmp and rescheduling to a different host CPU) they are not constant 9938 * through VMLAUNCH/VMRESUME and the nested-guest VM-exit. 9939 * 9940 * VM-entry MSR-load: 9941 * The guest MSRs from the VM-entry MSR-load area are already loaded into the 9942 * guest-CPU context by the VMLAUNCH/VMRESUME instruction emulation. 9943 * 9944 * VM-exit MSR-store: 9945 * The VM-exit emulation will take care of populating the MSRs from the guest-CPU 9946 * context back into the VM-exit MSR-store area. 9947 * 9948 * VM-exit MSR-load areas: 9949 * This must contain the real host MSRs with hardware-assisted VMX execution. Hence, 9950 * we can entirely ignore what the nested-hypervisor wants to load here. 9951 */ 9952 9953 /* 9954 * Exception bitmap. 9955 * 9956 * We could remove #UD from the guest bitmap and merge it with the nested-guest 9957 * bitmap here (and avoid doing anything while exporting nested-guest state), but to 9958 * keep the code more flexible if intercepting exceptions become more dynamic in 9959 * the future we do it as part of exporting the nested-guest state. 9960 */ 9961 uint32_t const u32XcptBitmap = pVmcsNstGst->u32XcptBitmap | pVmcsInfoGst->u32XcptBitmap; 9962 9963 /* 9964 * CR0/CR4 guest/host mask. 9965 * 9966 * Modifications by the nested-guest to CR0/CR4 bits owned by the host and the guest 9967 * must cause VM-exits, so we need to merge them here. 9968 */ 9969 uint64_t const u64Cr0Mask = pVmcsNstGst->u64Cr0Mask.u | pVmcsInfoGst->u64Cr0Mask; 9970 uint64_t const u64Cr4Mask = pVmcsNstGst->u64Cr4Mask.u | pVmcsInfoGst->u64Cr4Mask; 9971 9972 /* 9973 * Page-fault error-code mask and match. 9974 * 9975 * Although we require unrestricted guest execution (and thereby nested-paging) for 9976 * hardware-assisted VMX execution of nested-guests and thus the outer guest doesn't 9977 * normally intercept #PFs, it might intercept them for debugging purposes. 9978 * 9979 * If the outer guest is not intercepting #PFs, we can use the nested-guest #PF 9980 * filters. If the outer guest is intercepting #PFs we must intercept all #PFs. 9981 */ 9982 uint32_t u32XcptPFMask; 9983 uint32_t u32XcptPFMatch; 9984 if (!(pVmcsInfoGst->u32XcptBitmap & RT_BIT(X86_XCPT_PF))) 9985 { 9986 u32XcptPFMask = pVmcsNstGst->u32XcptPFMask; 9987 u32XcptPFMatch = pVmcsNstGst->u32XcptPFMatch; 9988 } 9989 else 9990 { 9991 u32XcptPFMask = 0; 9992 u32XcptPFMatch = 0; 9993 } 9994 9995 /* 9996 * Pause-Loop exiting. 9997 */ 9998 uint32_t const cPleGapTicks = RT_MIN(pVM->hm.s.vmx.cPleGapTicks, pVmcsNstGst->u32PleGap); 9999 uint32_t const cPleWindowTicks = RT_MIN(pVM->hm.s.vmx.cPleWindowTicks, pVmcsNstGst->u32PleWindow); 10000 10001 /* 10002 * I/O Bitmap. 10003 * 10004 * We do not use the I/O bitmap that may be provided by the guest hypervisor as we 10005 * always intercept all I/O port accesses. 10006 */ 10007 Assert(u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT); 10008 10009 /* 10010 * APIC-access page. 10011 * 10012 * The APIC-access page address has already been initialized while setting up the 10013 * nested-guest VMCS. In theory, even if the guest-physical address is invalid, it 10014 * should not be on any consequence to the host or to the guest for that matter, but 10015 * we only accept valid addresses verified by the VMLAUNCH/VMRESUME instruction 10016 * emulation to keep it simple. 10017 */ 10018 10019 /* 10020 * Virtual-APIC page and TPR threshold. 10021 * 10022 * We shall use the host-physical address of the virtual-APIC page in guest memory directly. 10023 * For this reason, we can access the virtual-APIC page of the nested-guest only using 10024 * PGM physical handlers as we must not assume a kernel virtual-address mapping exists and 10025 * requesting PGM for a mapping could be expensive/resource intensive (PGM mapping cache). 10026 */ 10027 RTHCPHYS HCPhysVirtApic = NIL_RTHCPHYS; 10028 uint32_t const u32TprThreshold = pVmcsNstGst->u32TprThreshold; 10029 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 10030 { 10031 int rc = PGMPhysGCPhys2HCPhys(pVM, pVmcsNstGst->u64AddrVirtApic.u, &HCPhysVirtApic); 10032 10033 /* 10034 * If the guest hypervisor has loaded crap into the virtual-APIC page field 10035 * we would fail to obtain a valid host-physical address for its guest-physical 10036 * address. 10037 * 10038 * We currently do not support this scenario. Maybe in the future if there is a 10039 * pressing need we can explore making this particular set of conditions work. 10040 * Right now we just cause a VM-entry failure. 10041 * 10042 * This has already been checked by VMLAUNCH/VMRESUME instruction emulation, 10043 * so should not really failure at the moment. 10044 */ 10045 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 10046 } 10047 else 10048 { 10049 /* 10050 * We must make sure CR8 reads/write must cause VM-exits when TPR shadowing is not 10051 * used by the guest hypervisor. Preventing MMIO accesses to the physical APIC will 10052 * be taken care of by EPT/shadow paging. 10053 */ 10054 if (pVM->hm.s.fAllow64BitGuests) 10055 { 10056 u32ProcCtls |= VMX_PROC_CTLS_CR8_STORE_EXIT 10057 | VMX_PROC_CTLS_CR8_LOAD_EXIT; 10058 } 10059 } 10060 10061 /* 10062 * Validate basic assumptions. 10063 */ 10064 PVMXVMCSINFO pVmcsInfoNstGst = &pVCpu->hm.s.vmx.VmcsInfoNstGst; 10065 Assert(pVM->hm.s.vmx.fAllowUnrestricted); 10066 Assert(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS); 10067 Assert(hmGetVmxActiveVmcsInfo(pVCpu) == pVmcsInfoNstGst); 10068 10069 /* 10070 * Commit it to the nested-guest VMCS. 10071 */ 10072 int rc = VINF_SUCCESS; 10073 if (pVmcsInfoNstGst->u32PinCtls != u32PinCtls) 10074 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, u32PinCtls); 10075 if (pVmcsInfoNstGst->u32ProcCtls != u32ProcCtls) 10076 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, u32ProcCtls); 10077 if (pVmcsInfoNstGst->u32ProcCtls2 != u32ProcCtls2) 10078 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, u32ProcCtls2); 10079 if (pVmcsInfoNstGst->u32XcptBitmap != u32XcptBitmap) 10080 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap); 10081 if (pVmcsInfoNstGst->u64Cr0Mask != u64Cr0Mask) 10082 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask); 10083 if (pVmcsInfoNstGst->u64Cr4Mask != u64Cr4Mask) 10084 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask); 10085 if (pVmcsInfoNstGst->u32XcptPFMask != u32XcptPFMask) 10086 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, u32XcptPFMask); 10087 if (pVmcsInfoNstGst->u32XcptPFMatch != u32XcptPFMatch) 10088 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, u32XcptPFMatch); 10089 if ( !(u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT) 10090 && (u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)) 10091 { 10092 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT); 10093 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, cPleGapTicks); 10094 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, cPleWindowTicks); 10095 } 10096 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 10097 { 10098 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold); 10099 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic); 10100 } 10101 AssertRCReturn(rc, rc); 10102 10103 /* 10104 * Update the nested-guest VMCS cache. 10105 */ 10106 pVmcsInfoNstGst->u32PinCtls = u32PinCtls; 10107 pVmcsInfoNstGst->u32ProcCtls = u32ProcCtls; 10108 pVmcsInfoNstGst->u32ProcCtls2 = u32ProcCtls2; 10109 pVmcsInfoNstGst->u32XcptBitmap = u32XcptBitmap; 10110 pVmcsInfoNstGst->u64Cr0Mask = u64Cr0Mask; 10111 pVmcsInfoNstGst->u64Cr4Mask = u64Cr4Mask; 10112 pVmcsInfoNstGst->u32XcptPFMask = u32XcptPFMask; 10113 pVmcsInfoNstGst->u32XcptPFMatch = u32XcptPFMatch; 10114 pVmcsInfoNstGst->HCPhysVirtApic = HCPhysVirtApic; 10115 10116 /* 10117 * MSR bitmap. 10118 * 10119 * The MSR bitmap address has already been initialized while setting up the 10120 * nested-guest VMCS, here we need to merge the MSR bitmaps. 10121 */ 10122 if (u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 10123 hmR0VmxMergeMsrBitmapNested(pVCpu, pVmcsInfoNstGst, pVmcsInfoGst); 10124 10125 return VINF_SUCCESS; 10126 } 10127 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 8482 10128 8483 10129 … … 8505 10151 * 8506 10152 * @param pVCpu The cross context virtual CPU structure. 8507 * @param pVmxTransient Pointer to the VMXtransient structure.8508 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep(). Makes8509 * us ignore some of the reasons for returning to8510 * ring-3, and return VINF_EM_DBG_STEPPED if event8511 * dispatching took place.10153 * @param pVmxTransient The VMX-transient structure. 10154 * @param fStepping Whether we are single-stepping the guest in the 10155 * hypervisor debugger. Makes us ignore some of the reasons 10156 * for returning to ring-3, and return VINF_EM_DBG_STEPPED 10157 * if event dispatching took place. 8512 10158 */ 8513 10159 static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, bool fStepping) … … 8516 10162 8517 10163 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM 8518 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))8519 { 8520 Log2(("hmR0VmxPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));8521 RT_NOREF3(pVCpu, pVmxTransient, fStepping);10164 if (pVmxTransient->fIsNestedGuest) 10165 { 10166 RT_NOREF2(pVCpu, fStepping); 10167 Log2Func(("Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n")); 8522 10168 return VINF_EM_RESCHEDULE_REM; 8523 10169 } … … 8533 10179 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, fStepping); 8534 10180 if (rcStrict == VINF_SUCCESS) 8535 { /* FFs do esn't get set all the time. */ }10181 { /* FFs don't get set all the time. */ } 8536 10182 else 8537 10183 return rcStrict; 8538 10184 10185 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 10186 /* 10187 * Switch to the nested-guest VMCS as we may have transitioned into executing 10188 * the nested-guest without leaving ring-0. Otherwise, if we came from ring-3 10189 * we would load the nested-guest VMCS while entering the VMX ring-0 session. 10190 * 10191 * We do this as late as possible to minimize (though not completely remove) 10192 * clearing/loading VMCS again due to premature trips to ring-3 above. 10193 */ 10194 if (pVmxTransient->fIsNestedGuest) 10195 { 10196 if (!pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs) 10197 { 10198 /* 10199 * Ensure we have synced everything from the guest VMCS and also flag that 10200 * that we need to export the full (nested) guest-CPU context to the 10201 * nested-guest VMCS. 10202 */ 10203 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 10204 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST); 10205 10206 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 10207 int rc = hmR0VmxSwitchVmcs(&pVCpu->hm.s.vmx.VmcsInfo, &pVCpu->hm.s.vmx.VmcsInfoNstGst); 10208 if (RT_LIKELY(rc == VINF_SUCCESS)) 10209 { 10210 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs = true; 10211 ASMSetFlags(fEFlags); 10212 pVmxTransient->pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst; 10213 10214 /* 10215 * We use a different VM-exit MSR-store area for the nested-guest. Hence, 10216 * flag that we need to update the host MSR values there. Even if we decide 10217 * in the future to share the VM-exit MSR-store area page with the guest, 10218 * if its content differs, we would have to update the host MSRs anyway. 10219 */ 10220 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false; 10221 Assert(!pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer); /** @todo NSTVMX: Paranoia remove later. */ 10222 } 10223 else 10224 { 10225 ASMSetFlags(fEFlags); 10226 return rc; 10227 } 10228 } 10229 10230 /* 10231 * Merge guest VMCS controls with the nested-guest VMCS controls. 10232 * 10233 * Even if we have not executed the guest prior to this (e.g. when resuming 10234 * from a saved state), we should be okay with merging controls as we 10235 * initialize the guest VMCS controls as part of VM setup phase. 10236 */ 10237 if (!pVCpu->hm.s.vmx.fMergedNstGstCtls) 10238 { 10239 int rc = hmR0VmxMergeVmcsNested(pVCpu); 10240 AssertRCReturn(rc, rc); 10241 pVCpu->hm.s.vmx.fMergedNstGstCtls = true; 10242 } 10243 } 10244 #endif 10245 8539 10246 /* 8540 10247 * Virtualize memory-mapped accesses to the physical APIC (may take locks). 10248 * We look at the guest VMCS control here as we always set it when supported by 10249 * the physical CPU. Looking at the nested-guest control here would not be 10250 * possible because they are not merged yet. 8541 10251 */ 8542 10252 PVM pVM = pVCpu->CTX_SUFF(pVM); 8543 if ( !pVCpu->hm.s.vmx.u64MsrApicBase 8544 && (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS) 10253 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 10254 if ( !pVCpu->hm.s.vmx.u64GstMsrApicBase 10255 && (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS) 8545 10256 && PDMHasApic(pVM)) 8546 10257 { … … 8549 10260 } 8550 10261 10262 /* 10263 * Evaluate events to be injected into the guest. 10264 * 10265 * Events in TRPM can be injected without inspecting the guest state. 10266 * If any new events (interrupts/NMI) are pending currently, we try to set up the 10267 * guest to cause a VM-exit the next time they are ready to receive the event. 10268 */ 8551 10269 if (TRPMHasTrap(pVCpu)) 8552 10270 hmR0VmxTrpmTrapToPendingEvent(pVCpu); 8553 uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu); 10271 10272 uint32_t fIntrState; 10273 rcStrict = hmR0VmxEvaluatePendingEvent(pVCpu, pVmxTransient, &fIntrState); 10274 10275 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 10276 /* 10277 * While evaluating pending events if something failed (unlikely) or if we were 10278 * preparing to run a nested-guest but performed a nested-guest VM-exit, we should bail. 10279 */ 10280 if ( rcStrict != VINF_SUCCESS 10281 || ( pVmxTransient->fIsNestedGuest 10282 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))) 10283 return rcStrict; 10284 #endif 8554 10285 8555 10286 /* … … 8557 10288 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might 8558 10289 * also result in triple-faulting the VM. 8559 */ 8560 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, fIntrState, fStepping); 10290 * 10291 * The above does not apply when executing a nested-guest (since unrestricted guest execution 10292 * is a requirement) regardless doing it avoid duplicating code elsewhere. 10293 */ 10294 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pVmxTransient, fIntrState, fStepping); 8561 10295 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8562 10296 { /* likely */ } … … 8587 10321 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 8588 10322 } 10323 10324 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 10325 /* Paranoia. */ 10326 Assert(!pVmxTransient->fIsNestedGuest || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)); 10327 #endif 8589 10328 8590 10329 /* … … 8602 10341 * CPU migration. 8603 10342 * 8604 * If we are injecting events to a real-on-v86 mode guest, we will have to update 8605 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs(). 8606 * Hence, loading of the guest state needs to be done -after- injection of events. 8607 */ 8608 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu); 10343 * If we are injecting events to a real-on-v86 mode guest, we would have updated RIP and some segment 10344 * registers. Hence, loading of the guest state needs to be done -after- injection of events. 10345 */ 10346 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pVmxTransient); 8609 10347 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8610 10348 { /* likely */ } … … 8618 10356 * We disable interrupts so that we don't miss any interrupts that would flag preemption 8619 10357 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with 8620 * preemption disabled for a while. Since this is pur ly to aid the10358 * preemption disabled for a while. Since this is purely to aid the 8621 10359 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and 8622 10360 * disable interrupt on NT. … … 8666 10404 8667 10405 /** 8668 * Prepares to run guest code in VT-x and we've committed to doing so. This 8669 * means there is no backing out to ring-3 or anywhere else at this 8670 * point. 10406 * Final preparations before executing guest code using hardware-assisted VMX. 10407 * 10408 * We can no longer get preempted to a different host CPU and there are no returns 10409 * to ring-3. We ignore any errors that may happen from this point (e.g. VMWRITE 10410 * failures), this function is not intended to fail sans unrecoverable hardware 10411 * errors. 8671 10412 * 8672 10413 * @param pVCpu The cross context virtual CPU structure. 8673 * @param pVmxTransient Pointer to the VMXtransient structure.10414 * @param pVmxTransient The VMX-transient structure. 8674 10415 * 8675 10416 * @remarks Called with preemption disabled. … … 8681 10422 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 8682 10423 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 10424 Assert(!pVCpu->hm.s.Event.fPending); 8683 10425 8684 10426 /* … … 8688 10430 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 8689 10431 8690 PVM pVM = pVCpu->CTX_SUFF(pVM); 10432 PVM pVM = pVCpu->CTX_SUFF(pVM); 10433 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 10434 8691 10435 if (!CPUMIsGuestFPUStateActive(pVCpu)) 8692 10436 { … … 8699 10443 8700 10444 /* 8701 * Lazy-update of the host MSRs values in the auto-load/store MSR area.8702 */8703 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs8704 && pVCpu->hm.s.vmx.cMsrs > 0)8705 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);8706 8707 /*8708 10445 * Re-save the host state bits as we may've been preempted (only happens when 8709 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM). 8710 * Note that the 64-on-32 switcher saves the (64-bit) host state into the VMCS and 8711 * if we change the switcher back to 32-bit, we *must* save the 32-bit host state here. 8712 * See @bugref{8432}. 10446 * thread-context hooks are used or when the VM start function changes). 10447 * The 64-on-32 switcher saves the (64-bit) host state into the VMCS and if we 10448 * changed the switcher back to 32-bit, we *must* save the 32-bit host state here, 10449 * see @bugref{8432}. 10450 * 10451 * This may also happen when switching to/from a nested-guest VMCS without leaving 10452 * ring-0. 8713 10453 */ 8714 10454 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT) … … 8724 10464 */ 8725 10465 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE) 8726 hmR0VmxExportSharedState(pVCpu );10466 hmR0VmxExportSharedState(pVCpu, pVmxTransient); 8727 10467 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged)); 8728 10468 8729 /* Store status of the shared guest-host state at the time of VM-entry. */ 10469 /* 10470 * Store status of the shared guest/host debug state at the time of VM-entry. 10471 */ 8730 10472 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 8731 10473 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)) … … 8742 10484 8743 10485 /* 8744 * Cache the TPR-shadow for checking on every VM-exit if it might have changed. 8745 */ 8746 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 8747 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]; 8748 8749 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 8750 RTCPUID idCurrentCpu = pHostCpu->idCpu; 8751 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer 10486 * Always cache the TPR-shadow if the virtual-APIC page exists, thereby skipping 10487 * more than one conditional check. The post-run side of our code shall determine 10488 * if it needs to sync. the virtual APIC TPR with the TPR-shadow. 10489 */ 10490 if (pVmcsInfo->pbVirtApic) 10491 pVmxTransient->u8GuestTpr = pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]; 10492 10493 /* 10494 * Update the host MSRs values in the VM-exit MSR-load area. 10495 */ 10496 if (!pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs) 10497 { 10498 if (pVmcsInfo->cExitMsrLoad > 0) 10499 hmR0VmxUpdateAutoLoadHostMsrs(pVCpu, pVmcsInfo); 10500 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = true; 10501 } 10502 10503 /* 10504 * Evaluate if we need to intercept guest RDTSC/P accesses. Set up the 10505 * VMX-preemption timer based on the next virtual sync clock deadline. 10506 */ 10507 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 10508 RTCPUID const idCurrentCpu = pHostCpu->idCpu; 10509 if ( !pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer 8752 10510 || idCurrentCpu != pVCpu->hm.s.idLastCpu) 8753 10511 { 8754 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu );8755 pVmxTransient->fUpdate TscOffsettingAndPreemptTimer = false;10512 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient); 10513 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = true; 8756 10514 } 8757 10515 8758 10516 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */ 8759 hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu );/* Invalidate the appropriate guest entries from the TLB. */10517 hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu, pVmcsInfo); /* Invalidate the appropriate guest entries from the TLB. */ 8760 10518 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu); 8761 10519 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */ … … 8763 10521 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); 8764 10522 8765 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about8766 to start executing. */8767 8768 /* 8769 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.8770 * /8771 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP)8772 {8773 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))8774 {8775 bool fMsrUpdated;8776 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_TSC_AUX);8777 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,8778 &fMsrUpdated);8779 AssertRC(rc2);8780 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);8781 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */8782 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;10523 TMNotifyStartOfExecution(pVCpu); /* Notify TM to resume its clocks when TSC is tied to execution, 10524 as we're about to start executing the guest . */ 10525 10526 /* 10527 * Load the guest TSC_AUX MSR when we are not intercepting RDTSCP. 10528 * 10529 * This is done this late as updating the TSC offsetting/preemption timer above 10530 * figures out if we can skip intercepting RDTSCP by calculating the number of 10531 * host CPU ticks till the next virtual sync deadline (for the dynamic case). 10532 */ 10533 if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP) 10534 { 10535 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT)) 10536 { 10537 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_TSC_AUX); 10538 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), 10539 true /* fSetReadWrite */, true /* fUpdateHostMsr */); 10540 AssertRC(rc); 8783 10541 } 8784 10542 else 8785 { 8786 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX); 8787 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs); 8788 } 8789 } 8790 8791 if (pVM->cpum.ro.GuestFeatures.fIbrs) 8792 { 8793 bool fMsrUpdated; 8794 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_OTHER_MSRS); 8795 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */, 8796 &fMsrUpdated); 8797 AssertRC(rc2); 8798 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs); 8799 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */ 8800 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true; 10543 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX); 8801 10544 } 8802 10545 8803 10546 #ifdef VBOX_STRICT 8804 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu );8805 hmR0VmxCheckHostEferMsr(pVCpu );8806 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu ));10547 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu, pVmcsInfo); 10548 hmR0VmxCheckHostEferMsr(pVCpu, pVmcsInfo); 10549 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu, pVmcsInfo)); 8807 10550 #endif 10551 8808 10552 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE 8809 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)) 8810 { 8811 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu); 8812 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND) 8813 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason)); 8814 } 10553 /** @todo r=ramshankar: We can now probably use iemVmxVmentryCheckGuestState here. 10554 * Add a PVMXMSRS parameter to it, so that IEM can look at the host MSRs. */ 10555 uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo); 10556 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND) 10557 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason)); 8815 10558 #endif 8816 10559 } … … 8818 10561 8819 10562 /** 8820 * Performs some essential restoration of state after running guest code in 8821 * VT-x. 10563 * First C routine invoked after running guest code using hardware-assisted VMX. 8822 10564 * 8823 10565 * @param pVCpu The cross context virtual CPU structure. 8824 * @param pVmxTransient Pointer to the VMXtransient structure.10566 * @param pVmxTransient The VMX-transient structure. 8825 10567 * @param rcVMRun Return code of VMLAUNCH/VMRESUME. 8826 10568 * … … 8832 10574 static void hmR0VmxPostRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun) 8833 10575 { 8834 uint64_t const uHostTsc = ASMReadTSC(); 8835 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 10576 uint64_t const uHostTsc = ASMReadTSC(); /** @todo We can do a lot better here, see @bugref{9180#c38}. */ 8836 10577 8837 10578 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */ … … 8842 10583 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */ 8843 10584 8844 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT)) 8845 TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVCpu->hm.s.vmx.Ctls.u64TscOffset); 10585 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 10586 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT)) 10587 { 10588 uint64_t uGstTsc; 10589 if (!pVmxTransient->fIsNestedGuest) 10590 uGstTsc = uHostTsc + pVmcsInfo->u64TscOffset; 10591 else 10592 { 10593 uint64_t const uNstGstTsc = uHostTsc + pVmcsInfo->u64TscOffset; 10594 uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, uNstGstTsc); 10595 } 10596 TMCpuTickSetLastSeen(pVCpu, uGstTsc); /* Update TM with the guest TSC. */ 10597 } 8846 10598 8847 10599 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x); 8848 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */ 8849 Assert(!ASMIntAreEnabled()); 10600 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */ 8850 10601 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 8851 10602 8852 10603 #if HC_ARCH_BITS == 64 8853 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */10604 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Some host state messed up by VMX needs restoring. */ 8854 10605 #endif 8855 10606 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 8856 /* The 64-on-32 switcher maintains fVmcsState on its own and we need to leave it alone here. */ 8857 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64) 8858 pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */ 10607 /* The 64-on-32 switcher maintains VMCS-launch state on its own 10608 and we need to leave it alone here. */ 10609 if (pVmcsInfo->pfnStartVM != VMXR0SwitcherStartVM64) 10610 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */ 8859 10611 #else 8860 pV Cpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */10612 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */ 8861 10613 #endif 8862 10614 #ifdef VBOX_STRICT 8863 hmR0VmxCheckHostEferMsr(pVCpu ); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */10615 hmR0VmxCheckHostEferMsr(pVCpu, pVmcsInfo); /* Verify that the host EFER MSR wasn't modified. */ 8864 10616 #endif 8865 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */ 8866 8867 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */ 10617 Assert(!ASMIntAreEnabled()); 10618 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */ 10619 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 10620 10621 /* 10622 * Save the basic VM-exit reason and check if the VM-entry failed. 10623 * See Intel spec. 24.9.1 "Basic VM-exit Information". 10624 */ 8868 10625 uint32_t uExitReason; 8869 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason); 8870 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient); 10626 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason); 8871 10627 AssertRC(rc); 8872 10628 pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason); 8873 10629 pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason); 8874 10630 8875 if (rcVMRun == VINF_SUCCESS) 10631 /* 10632 * Check if VMLAUNCH/VMRESUME succeeded. 10633 * If this failed, we cause a guru meditation and cease further execution. 10634 */ 10635 if (RT_LIKELY(rcVMRun == VINF_SUCCESS)) 8876 10636 { 8877 10637 /* … … 8892 10652 UINT64_MAX, uHostTsc); 8893 10653 8894 if ( !pVmxTransient->fVMEntryFailed)10654 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed)) 8895 10655 { 8896 10656 VMMRZCallRing3Enable(pVCpu); … … 8900 10660 8901 10661 #if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE) 8902 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);10662 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 8903 10663 AssertRC(rc); 8904 10664 #elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS) 8905 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_RFLAGS);10665 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_RFLAGS); 8906 10666 AssertRC(rc); 8907 10667 #else … … 8910 10670 * injecting events on re-entry. 8911 10671 * 8912 * We don't import CR0 (when Unrestricted guest execution is unavailable) despite10672 * We don't import CR0 (when unrestricted guest execution is unavailable) despite 8913 10673 * checking for real-mode while exporting the state because all bits that cause 8914 10674 * mode changes wrt CR0 are intercepted. 8915 10675 */ 8916 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_HM_VMX_INT_STATE);10676 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_HM_VMX_INT_STATE); 8917 10677 AssertRC(rc); 8918 10678 #endif … … 8921 10681 * Sync the TPR shadow with our APIC state. 8922 10682 */ 8923 if ( (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)8924 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR])10683 if ( !pVmxTransient->fIsNestedGuest 10684 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)) 8925 10685 { 8926 rc = APICSetTpr(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]); 8927 AssertRC(rc); 8928 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 10686 Assert(pVmcsInfo->pbVirtApic); 10687 if (pVmxTransient->u8GuestTpr != pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]) 10688 { 10689 rc = APICSetTpr(pVCpu, pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]); 10690 AssertRC(rc); 10691 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 10692 } 8929 10693 } 8930 10694 … … 8941 10705 8942 10706 /** 8943 * Runs the guest code using VT-xthe normal way.10707 * Runs the guest code using hardware-assisted VMX the normal way. 8944 10708 * 8945 10709 * @returns VBox status code. 8946 10710 * @param pVCpu The cross context virtual CPU structure. 8947 * 8948 * @note Mostly the same as hmR0VmxRunGuestCodeStep(). 8949 */ 8950 static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu) 8951 { 10711 * @param pcLoops Pointer to the number of executed loops. 10712 */ 10713 static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu, uint32_t *pcLoops) 10714 { 10715 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops; 10716 Assert(pcLoops); 10717 Assert(*pcLoops <= cMaxResumeLoops); 10718 8952 10719 VMXTRANSIENT VmxTransient; 8953 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true; 10720 RT_ZERO(VmxTransient); 10721 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 10722 10723 /* Paranoia. */ 10724 Assert(VmxTransient.pVmcsInfo == &pVCpu->hm.s.vmx.VmcsInfo); 10725 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)); 10726 8954 10727 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5; 8955 uint32_t cLoops = 0; 8956 8957 for (;; cLoops++) 10728 for (;;) 8958 10729 { 8959 10730 Assert(!HMR0SuspendPending()); 8960 10731 HMVMX_ASSERT_CPU_SAFE(pVCpu); 8961 8962 /* Preparatory work for running guest code, this may force us to return8963 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */8964 10732 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 10733 10734 /* 10735 * Preparatory work for running nested-guest code, this may force us to 10736 * return to ring-3. 10737 * 10738 * Warning! This bugger disables interrupts on VINF_SUCCESS! 10739 */ 8965 10740 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */); 8966 10741 if (rcStrict != VINF_SUCCESS) 8967 10742 break; 8968 10743 10744 /* Interrupts are disabled at this point! */ 8969 10745 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient); 8970 int rcRun = hmR0VmxRunGuest(pVCpu); 8971 8972 /* Restore any residual host-state and save any bits shared between host 8973 and guest into the guest-CPU state. Re-enables interrupts! */ 10746 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient); 8974 10747 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun); 8975 8976 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */ 10748 /* Interrupts are re-enabled at this point! */ 10749 10750 /* 10751 * Check for errors with running the VM (VMLAUNCH/VMRESUME). 10752 */ 8977 10753 if (RT_SUCCESS(rcRun)) 8978 10754 { /* very likely */ } … … 8984 10760 } 8985 10761 8986 /* Profile the VM-exit. */ 10762 /* 10763 * Profile the VM-exit. 10764 */ 8987 10765 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 8988 10766 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); … … 8993 10771 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason); 8994 10772 8995 /* Handle the VM-exit. */ 10773 /* 10774 * Handle the VM-exit. 10775 */ 8996 10776 #ifdef HMVMX_USE_FUNCTION_TABLE 8997 10777 rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, &VmxTransient); 8998 10778 #else 8999 rcStrict = hmR0VmxHandleExit(pVCpu, &VmxTransient , VmxTransient.uExitReason);10779 rcStrict = hmR0VmxHandleExit(pVCpu, &VmxTransient); 9000 10780 #endif 9001 10781 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 9002 10782 if (rcStrict == VINF_SUCCESS) 9003 10783 { 9004 if ( cLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)9005 continue; /* likely */10784 if (++(*pcLoops) <= cMaxResumeLoops) 10785 continue; 9006 10786 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops); 9007 10787 rcStrict = VINF_EM_RAW_INTERRUPT; … … 9016 10796 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 9017 10797 /** 9018 * Runs the nested-guest code using VT-x the normal way.10798 * Runs the nested-guest code using hardware-assisted VMX. 9019 10799 * 9020 10800 * @returns VBox status code. 9021 10801 * @param pVCpu The cross context virtual CPU structure. 9022 * @sa hmR0VmxRunGuestCodeNormal. 9023 */ 9024 static VBOXSTRICTRC hmR0VmxRunGuestCodeNested(PVMCPU pVCpu) 9025 { 9026 RT_NOREF(pVCpu); 9027 return VERR_NOT_IMPLEMENTED; 10802 * @param pcLoops Pointer to the number of executed loops. 10803 * 10804 * @sa hmR0VmxRunGuestCodeNormal(). 10805 */ 10806 static VBOXSTRICTRC hmR0VmxRunGuestCodeNested(PVMCPU pVCpu, uint32_t *pcLoops) 10807 { 10808 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops; 10809 Assert(pcLoops); 10810 Assert(*pcLoops <= cMaxResumeLoops); 10811 10812 VMXTRANSIENT VmxTransient; 10813 RT_ZERO(VmxTransient); 10814 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 10815 VmxTransient.fIsNestedGuest = true; 10816 10817 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5; 10818 for (;;) 10819 { 10820 Assert(!HMR0SuspendPending()); 10821 HMVMX_ASSERT_CPU_SAFE(pVCpu); 10822 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 10823 10824 /* 10825 * Preparatory work for running guest code, this may force us to 10826 * return to ring-3. 10827 * 10828 * Warning! This bugger disables interrupts on VINF_SUCCESS! 10829 */ 10830 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */); 10831 if (rcStrict != VINF_SUCCESS) 10832 break; 10833 10834 /* Interrupts are disabled at this point! */ 10835 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient); 10836 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient); 10837 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun); 10838 /* Interrupts are re-enabled at this point! */ 10839 10840 /* 10841 * Check for errors with running the VM (VMLAUNCH/VMRESUME). 10842 */ 10843 if (RT_SUCCESS(rcRun)) 10844 { /* very likely */ } 10845 else 10846 { 10847 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 10848 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient); 10849 return rcRun; 10850 } 10851 10852 /* 10853 * Profile the VM-exit. 10854 */ 10855 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 10856 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); 10857 STAM_COUNTER_INC(&pVCpu->hm.s.paStatNestedExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 10858 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 10859 HMVMX_START_EXIT_DISPATCH_PROF(); 10860 10861 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason); 10862 10863 /* 10864 * Handle the VM-exit. 10865 */ 10866 rcStrict = hmR0VmxHandleExitNested(pVCpu, &VmxTransient); 10867 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 10868 if ( rcStrict == VINF_SUCCESS 10869 && CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 10870 { 10871 if (++(*pcLoops) <= cMaxResumeLoops) 10872 continue; 10873 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops); 10874 rcStrict = VINF_EM_RAW_INTERRUPT; 10875 } 10876 break; 10877 } 10878 10879 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 10880 return rcStrict; 9028 10881 } 9029 10882 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ … … 9098 10951 * @param pVCpu The cross context virtual CPU structure of the 9099 10952 * calling EMT. 9100 * @param pDbgState The structure to initialize. 9101 */ 9102 static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState) 10953 * @param pVmxTransient The VMX-transient structure. 10954 * @param pDbgState The debug state to initialize. 10955 */ 10956 static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState) 9103 10957 { 9104 10958 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip; … … 9114 10968 pDbgState->fCpe2Extra = 0; 9115 10969 pDbgState->bmXcptExtra = 0; 9116 pDbgState->fProcCtlsInitial = pV Cpu->hm.s.vmx.Ctls.u32ProcCtls;9117 pDbgState->fProcCtls2Initial = pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2;9118 pDbgState->bmXcptInitial = pV Cpu->hm.s.vmx.Ctls.u32XcptBitmap;10970 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls; 10971 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2; 10972 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap; 9119 10973 } 9120 10974 … … 9128 10982 * latter case. 9129 10983 * 9130 * @param pVCpu The cross context virtual CPU structure. 9131 * @param pDbgState The debug state. 9132 */ 9133 static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState) 10984 * @param pVCpu The cross context virtual CPU structure. 10985 * @param pVmxTransient The VMX-transient structure. 10986 * @param pDbgState The debug state. 10987 */ 10988 static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState) 9134 10989 { 9135 10990 /* … … 9140 10995 * there should be no stale data in pCtx at this point. 9141 10996 */ 9142 if ( (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra 9143 || (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & pDbgState->fCpe1Unwanted)) 9144 { 9145 pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= pDbgState->fCpe1Extra; 9146 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~pDbgState->fCpe1Unwanted; 9147 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls); 9148 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls)); 10997 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 10998 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra 10999 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted)) 11000 { 11001 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra; 11002 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted; 11003 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 11004 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls)); 9149 11005 pDbgState->fModifiedProcCtls = true; 9150 11006 } 9151 11007 9152 if ((pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)9153 { 9154 pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2 |= pDbgState->fCpe2Extra;9155 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2);9156 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2));11008 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra) 11009 { 11010 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra; 11011 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2); 11012 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2)); 9157 11013 pDbgState->fModifiedProcCtls2 = true; 9158 11014 } 9159 11015 9160 if ((pV Cpu->hm.s.vmx.Ctls.u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)9161 { 9162 pV Cpu->hm.s.vmx.Ctls.u32XcptBitmap |= pDbgState->bmXcptExtra;9163 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pV Cpu->hm.s.vmx.Ctls.u32XcptBitmap);9164 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pV Cpu->hm.s.vmx.Ctls.u32XcptBitmap));11016 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra) 11017 { 11018 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra; 11019 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap); 11020 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap)); 9165 11021 pDbgState->fModifiedXcptBitmap = true; 9166 11022 } 9167 11023 9168 if (pDbgState->fClearCr0Mask && pV Cpu->hm.s.vmx.Ctls.u32Cr0Mask != 0)9169 { 9170 pV Cpu->hm.s.vmx.Ctls.u32Cr0Mask = 0;9171 VMXWriteVmcs 32(VMX_VMCS_CTRL_CR0_MASK, 0);11024 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0) 11025 { 11026 pVmcsInfo->u64Cr0Mask = 0; 11027 VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, 0); 9172 11028 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n")); 9173 11029 } 9174 11030 9175 if (pDbgState->fClearCr4Mask && pV Cpu->hm.s.vmx.Ctls.u32Cr4Mask != 0)9176 { 9177 pV Cpu->hm.s.vmx.Ctls.u32Cr4Mask = 0;9178 VMXWriteVmcs 32(VMX_VMCS_CTRL_CR4_MASK, 0);11031 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0) 11032 { 11033 pVmcsInfo->u64Cr4Mask = 0; 11034 VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, 0); 9179 11035 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n")); 9180 11036 } 11037 11038 NOREF(pVCpu); 9181 11039 } 9182 11040 … … 9187 11045 * 9188 11046 * @returns Strict VBox status code (i.e. informational status codes too). 9189 * @param pVCpu The cross context virtual CPU structure. 9190 * @param pDbgState The debug state. 9191 * @param rcStrict The return code from executing the guest using single 9192 * stepping. 9193 */ 9194 static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict) 11047 * @param pVCpu The cross context virtual CPU structure. 11048 * @param pVmxTransient The VMX-transient structure. 11049 * @param pDbgState The debug state. 11050 * @param rcStrict The return code from executing the guest using single 11051 * stepping. 11052 */ 11053 static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState, 11054 VBOXSTRICTRC rcStrict) 9195 11055 { 9196 11056 /* … … 9198 11058 * next time around. 9199 11059 */ 11060 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 11061 9200 11062 /* We reload the initial value, trigger what we can of recalculations the 9201 11063 next time around. From the looks of things, that's all that's required atm. */ … … 9206 11068 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial); 9207 11069 AssertRCReturn(rc2, rc2); 9208 pV Cpu->hm.s.vmx.Ctls.u32ProcCtls = pDbgState->fProcCtlsInitial;11070 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial; 9209 11071 } 9210 11072 … … 9212 11074 cached value and reload the field. */ 9213 11075 if ( pDbgState->fModifiedProcCtls2 9214 && pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2 != pDbgState->fProcCtls2Initial)11076 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial) 9215 11077 { 9216 11078 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial); 9217 11079 AssertRCReturn(rc2, rc2); 9218 pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2 = pDbgState->fProcCtls2Initial;11080 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial; 9219 11081 } 9220 11082 … … 9222 11084 reloading and partial recalculation the next time around. */ 9223 11085 if (pDbgState->fModifiedXcptBitmap) 9224 pV Cpu->hm.s.vmx.Ctls.u32XcptBitmap = pDbgState->bmXcptInitial;11086 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial; 9225 11087 9226 11088 return rcStrict; … … 9235 11097 * 9236 11098 * @param pVCpu The cross context virtual CPU structure. 11099 * @param pVmxTransient The VMX-transient structure. May update 11100 * fUpdatedTscOffsettingAndPreemptTimer. 9237 11101 * @param pDbgState The debug state. 9238 * @param pVmxTransient Pointer to the VMX transient structure. May update 9239 * fUpdateTscOffsettingAndPreemptTimer. 9240 */ 9241 static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient) 11102 */ 11103 static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState) 9242 11104 { 9243 11105 /* … … 9388 11250 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE)) 9389 11251 { 9390 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR); 11252 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 11253 | CPUMCTX_EXTRN_APIC_TPR); 9391 11254 AssertRC(rc); 9392 11255 … … 9508 11371 { 9509 11372 pVCpu->hm.s.fDebugWantRdTscExit ^= true; 9510 pVmxTransient->fUpdate TscOffsettingAndPreemptTimer = true;11373 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false; 9511 11374 } 9512 11375 … … 9528 11391 * @returns Strict VBox status code (i.e. informational status codes too). 9529 11392 * @param pVCpu The cross context virtual CPU structure. 9530 * @param pVmxTransient Pointer to the VMX-transient structure.11393 * @param pVmxTransient The VMX-transient structure. 9531 11394 * @param uExitReason The VM-exit reason. 9532 11395 * … … 9740 11603 { 9741 11604 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 9742 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);11605 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 9743 11606 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 9744 11607 switch (enmEvent1) … … 9886 11749 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1)) 9887 11750 { 9888 HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);11751 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 9889 11752 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg); 9890 11753 if (rcStrict != VINF_SUCCESS) … … 9894 11757 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2)) 9895 11758 { 9896 HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);11759 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 9897 11760 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg); 9898 11761 if (rcStrict != VINF_SUCCESS) … … 9913 11776 * @returns Strict VBox status code (i.e. informational status codes too). 9914 11777 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 9915 * @param pVmxTransient Pointer to the VMX-transient structure.11778 * @param pVmxTransient The VMX-transient structure. 9916 11779 * @param pDbgState The debug state. 9917 11780 */ … … 9927 11790 { 9928 11791 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 9929 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);11792 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 9930 11793 AssertRC(rc); 9931 11794 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual); … … 10010 11873 case VMX_EXIT_XRSTORS: 10011 11874 { 10012 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);11875 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 10013 11876 AssertRCReturn(rc, rc); 10014 11877 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart … … 10058 11921 10059 11922 /** 10060 * Single steps guest code using VT-x. 11923 * Single steps guest code using hardware-assisted VMX. 11924 * 11925 * This is -not- the same as the guest single-stepping itself (say using EFLAGS.TF) 11926 * but single-stepping through the hypervisor debugger. 10061 11927 * 10062 11928 * @returns Strict VBox status code (i.e. informational status codes too). 10063 11929 * @param pVCpu The cross context virtual CPU structure. 11930 * @param pcLoops Pointer to the number of executed loops. 10064 11931 * 10065 11932 * @note Mostly the same as hmR0VmxRunGuestCodeNormal(). 10066 11933 */ 10067 static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu) 10068 { 11934 static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu, uint32_t *pcLoops) 11935 { 11936 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops; 11937 Assert(pcLoops); 11938 Assert(*pcLoops <= cMaxResumeLoops); 11939 10069 11940 VMXTRANSIENT VmxTransient; 10070 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true; 11941 RT_ZERO(VmxTransient); 11942 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 10071 11943 10072 11944 /* Set HMCPU indicators. */ … … 10078 11950 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */ 10079 11951 VMXRUNDBGSTATE DbgState; 10080 hmR0VmxRunDebugStateInit(pVCpu, & DbgState);10081 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, & DbgState, &VmxTransient);11952 hmR0VmxRunDebugStateInit(pVCpu, &VmxTransient, &DbgState); 11953 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState); 10082 11954 10083 11955 /* … … 10085 11957 */ 10086 11958 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5; 10087 for ( uint32_t cLoops = 0; ; cLoops++)11959 for (;;) 10088 11960 { 10089 11961 Assert(!HMR0SuspendPending()); 10090 11962 HMVMX_ASSERT_CPU_SAFE(pVCpu); 11963 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 10091 11964 bool fStepping = pVCpu->hm.s.fSingleInstruction; 10092 11965 11966 /* Set up VM-execution controls the next two can respond to. */ 11967 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState); 11968 10093 11969 /* 10094 * Preparatory work for running guest code, this may force us to return 10095 * to ring-3. This bugger disables interrupts on VINF_SUCCESS! 11970 * Preparatory work for running guest code, this may force us to 11971 * return to ring-3. 11972 * 11973 * Warning! This bugger disables interrupts on VINF_SUCCESS! 10096 11974 */ 10097 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);10098 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */10099 11975 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, fStepping); 10100 11976 if (rcStrict != VINF_SUCCESS) 10101 11977 break; 10102 11978 11979 /* Interrupts are disabled at this point! */ 10103 11980 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient); 10104 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */ 11981 11982 /* Override any obnoxious code in the above two calls. */ 11983 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState); 10105 11984 10106 11985 /* 10107 * Now we can run the guest code.11986 * Finally execute the guest. 10108 11987 */ 10109 int rcRun = hmR0VmxRunGuest(pVCpu); 10110 10111 /* 10112 * Restore any residual host-state and save any bits shared between host 10113 * and guest into the guest-CPU state. Re-enables interrupts! 10114 */ 11988 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient); 11989 10115 11990 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun); 11991 /* Interrupts are re-enabled at this point! */ 10116 11992 10117 11993 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */ … … 10141 12017 if (rcStrict != VINF_SUCCESS) 10142 12018 break; 10143 if ( cLoops > pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)12019 if (++(*pcLoops) > cMaxResumeLoops) 10144 12020 { 10145 12021 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops); … … 10154 12030 if (fStepping) 10155 12031 { 10156 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);12032 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 10157 12033 AssertRC(rc); 10158 12034 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart … … 10169 12045 */ 10170 12046 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo) 10171 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, & DbgState, &VmxTransient);12047 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState); 10172 12048 } 10173 12049 … … 10177 12053 if (pVCpu->hm.s.fClearTrapFlag) 10178 12054 { 10179 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);12055 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS); 10180 12056 AssertRC(rc); 10181 12057 pVCpu->hm.s.fClearTrapFlag = false; … … 10187 12063 10188 12064 /* 10189 * Restore VM-exit control settings as we may not re enter this function the12065 * Restore VM-exit control settings as we may not re-enter this function the 10190 12066 * next time around. 10191 12067 */ 10192 rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, & DbgState, rcStrict);12068 rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict); 10193 12069 10194 12070 /* Restore HMCPU indicators. */ … … 10343 12219 10344 12220 /** 10345 * Runs the guest code using VT-x.12221 * Runs the guest using hardware-assisted VMX. 10346 12222 * 10347 12223 * @returns Strict VBox status code (i.e. informational status codes too). … … 10358 12234 10359 12235 VBOXSTRICTRC rcStrict; 12236 uint32_t cLoops = 0; 10360 12237 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 10361 12238 bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(pCtx); … … 10369 12246 && !DBGFIsStepping(pVCpu) 10370 12247 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints) 10371 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu );12248 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu, &cLoops); 10372 12249 else 10373 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu );12250 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu, &cLoops); 10374 12251 } 10375 12252 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX … … 10378 12255 10379 12256 if (rcStrict == VINF_VMX_VMLAUNCH_VMRESUME) 10380 rcStrict = hmR0VmxRunGuestCodeNested(pVCpu );12257 rcStrict = hmR0VmxRunGuestCodeNested(pVCpu, &cLoops); 10381 12258 #endif 10382 12259 … … 10399 12276 10400 12277 #ifndef HMVMX_USE_FUNCTION_TABLE 10401 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t rcReason) 12278 /** 12279 * Handles a guest VM-exit from hardware-assisted VMX execution. 12280 * 12281 * @returns Strict VBox status code (i.e. informational status codes too). 12282 * @param pVCpu The cross context virtual CPU structure. 12283 * @param pVmxTransient The VMX-transient structure. 12284 */ 12285 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 10402 12286 { 10403 12287 #ifdef DEBUG_ramshankar … … 10414 12298 # define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr 10415 12299 #endif 12300 uint32_t const rcReason = pVmxTransient->uExitReason; 10416 12301 switch (rcReason) 10417 12302 { … … 10491 12376 10492 12377 case VMX_EXIT_ENCLS: 10493 case VMX_EXIT_RDSEED: /* only spurious VM-exits, so undefined */12378 case VMX_EXIT_RDSEED: 10494 12379 case VMX_EXIT_PML_FULL: 10495 12380 default: … … 10499 12384 } 10500 12385 #endif /* !HMVMX_USE_FUNCTION_TABLE */ 12386 12387 12388 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 12389 /** 12390 * Handles a nested-guest VM-exit from hardware-assisted VMX execution. 12391 * 12392 * @returns Strict VBox status code (i.e. informational status codes too). 12393 * @param pVCpu The cross context virtual CPU structure. 12394 * @param pVmxTransient The VMX-transient structure. 12395 */ 12396 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12397 { 12398 uint32_t const rcReason = pVmxTransient->uExitReason; 12399 switch (rcReason) 12400 { 12401 case VMX_EXIT_EPT_MISCONFIG: 12402 case VMX_EXIT_EPT_VIOLATION: 12403 case VMX_EXIT_IO_INSTR: 12404 case VMX_EXIT_CPUID: 12405 case VMX_EXIT_RDTSC: 12406 case VMX_EXIT_RDTSCP: 12407 case VMX_EXIT_APIC_ACCESS: 12408 case VMX_EXIT_XCPT_OR_NMI: 12409 case VMX_EXIT_MOV_CRX: 12410 case VMX_EXIT_EXT_INT: 12411 case VMX_EXIT_INT_WINDOW: 12412 case VMX_EXIT_TPR_BELOW_THRESHOLD: 12413 case VMX_EXIT_MWAIT: 12414 case VMX_EXIT_MONITOR: 12415 case VMX_EXIT_TASK_SWITCH: 12416 case VMX_EXIT_PREEMPT_TIMER: 12417 case VMX_EXIT_RDMSR: 12418 case VMX_EXIT_WRMSR: 12419 case VMX_EXIT_VMCALL: 12420 case VMX_EXIT_MOV_DRX: 12421 case VMX_EXIT_HLT: 12422 case VMX_EXIT_INVD: 12423 case VMX_EXIT_INVLPG: 12424 case VMX_EXIT_RSM: 12425 case VMX_EXIT_MTF: 12426 case VMX_EXIT_PAUSE: 12427 case VMX_EXIT_GDTR_IDTR_ACCESS: 12428 case VMX_EXIT_LDTR_TR_ACCESS: 12429 case VMX_EXIT_WBINVD: 12430 case VMX_EXIT_XSETBV: 12431 case VMX_EXIT_RDRAND: 12432 case VMX_EXIT_INVPCID: 12433 case VMX_EXIT_GETSEC: 12434 case VMX_EXIT_RDPMC: 12435 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 12436 case VMX_EXIT_VMCLEAR: 12437 case VMX_EXIT_VMLAUNCH: 12438 case VMX_EXIT_VMPTRLD: 12439 case VMX_EXIT_VMPTRST: 12440 case VMX_EXIT_VMREAD: 12441 case VMX_EXIT_VMRESUME: 12442 case VMX_EXIT_VMWRITE: 12443 case VMX_EXIT_VMXOFF: 12444 case VMX_EXIT_VMXON: 12445 #endif 12446 case VMX_EXIT_TRIPLE_FAULT: 12447 case VMX_EXIT_NMI_WINDOW: 12448 case VMX_EXIT_INIT_SIGNAL: 12449 case VMX_EXIT_SIPI: 12450 case VMX_EXIT_IO_SMI: 12451 case VMX_EXIT_SMI: 12452 case VMX_EXIT_ERR_MSR_LOAD: 12453 case VMX_EXIT_ERR_INVALID_GUEST_STATE: 12454 case VMX_EXIT_ERR_MACHINE_CHECK: 12455 12456 case VMX_EXIT_INVEPT: 12457 case VMX_EXIT_INVVPID: 12458 case VMX_EXIT_VMFUNC: 12459 case VMX_EXIT_XSAVES: 12460 case VMX_EXIT_XRSTORS: 12461 12462 case VMX_EXIT_ENCLS: 12463 case VMX_EXIT_RDSEED: 12464 case VMX_EXIT_PML_FULL: 12465 default: 12466 return hmR0VmxExitErrUndefined(pVCpu, pVmxTransient); 12467 } 12468 #undef VMEXIT_CALL_RET 12469 } 12470 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 10501 12471 10502 12472 … … 10517 12487 AssertPtr((a_pVmxTransient)); \ 10518 12488 Assert((a_pVmxTransient)->fVMEntryFailed == false); \ 12489 Assert((a_pVmxTransient)->pVmcsInfo); \ 10519 12490 Assert(ASMIntAreEnabled()); \ 10520 12491 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \ … … 10567 12538 * @returns VBox status code, no informational status codes. 10568 12539 * @param pVCpu The cross context virtual CPU structure. 10569 * @param pVmxTransient Pointer to the VMXtransient structure.12540 * @param pVmxTransient The VMX-transient structure. 10570 12541 * 10571 12542 * @remarks No-long-jump zone!!! … … 10574 12545 { 10575 12546 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 10576 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);12547 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS); 10577 12548 AssertRCReturn(rc, rc); 10578 12549 … … 10583 12554 10584 12555 /** 10585 * Tries to determine what part of the guest-state VT-x has deemed as invalid 10586 * and update error record fields accordingly. 10587 * 10588 * @return VMX_IGS_* return codes. 10589 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything 10590 * wrong with the guest state. 10591 * 10592 * @param pVCpu The cross context virtual CPU structure. 10593 * 10594 * @remarks This function assumes our cache of the VMCS controls 10595 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded. 10596 */ 10597 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu) 10598 { 10599 #define HMVMX_ERROR_BREAK(err) { uError = (err); break; } 10600 #define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \ 10601 uError = (err); \ 10602 break; \ 10603 } else do { } while (0) 10604 10605 int rc; 10606 PVM pVM = pVCpu->CTX_SUFF(pVM); 10607 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 10608 uint32_t uError = VMX_IGS_ERROR; 10609 uint32_t u32Val; 10610 bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest; 10611 10612 do 10613 { 12556 * Handle a condition that occurred while delivering an event through the guest 12557 * IDT. 12558 * 12559 * @returns Strict VBox status code (i.e. informational status codes too). 12560 * @retval VINF_SUCCESS if we should continue handling the VM-exit. 12561 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought 12562 * to continue execution of the guest which will delivery the \#DF. 12563 * @retval VINF_EM_RESET if we detected a triple-fault condition. 12564 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang. 12565 * 12566 * @param pVCpu The cross context virtual CPU structure. 12567 * @param pVmxTransient The VMX-transient structure. 12568 * 12569 * @remarks No-long-jump zone!!! 12570 */ 12571 static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12572 { 12573 uint32_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo); 12574 12575 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 12576 rc2 |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 12577 AssertRCReturn(rc2, rc2); 12578 12579 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 12580 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 12581 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo)) 12582 { 12583 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo); 12584 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo); 12585 10614 12586 /* 10615 * CR0. 12587 * If the event was a software interrupt (generated with INT n) or a software exception 12588 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we 12589 * can handle the VM-exit and continue guest execution which will re-execute the 12590 * instruction rather than re-injecting the exception, as that can cause premature 12591 * trips to ring-3 before injection and involve TRPM which currently has no way of 12592 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses 12593 * the problem). 10616 12594 */ 10617 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 10618 uint32_t const fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 10619 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). 10620 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */ 10621 if (fUnrestrictedGuest) 10622 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG); 10623 10624 uint32_t u32GuestCr0; 10625 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCr0); 10626 AssertRCBreak(rc); 10627 HMVMX_CHECK_BREAK((u32GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1); 10628 HMVMX_CHECK_BREAK(!(u32GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0); 10629 if ( !fUnrestrictedGuest 10630 && (u32GuestCr0 & X86_CR0_PG) 10631 && !(u32GuestCr0 & X86_CR0_PE)) 10632 { 10633 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO); 12595 IEMXCPTRAISE enmRaise; 12596 IEMXCPTRAISEINFO fRaiseInfo; 12597 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT 12598 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT 12599 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT) 12600 { 12601 enmRaise = IEMXCPTRAISE_REEXEC_INSTR; 12602 fRaiseInfo = IEMXCPTRAISEINFO_NONE; 12603 } 12604 else if (VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo)) 12605 { 12606 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo); 12607 uint32_t const fIdtVectorFlags = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType); 12608 uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType); 12609 /** @todo Make AssertMsgReturn as just AssertMsg later. */ 12610 AssertMsgReturn(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT, 12611 ("Unexpected VM-exit interruption vector type %#x!\n", uExitVectorType), VERR_VMX_IPE_5); 12612 12613 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo); 12614 12615 /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */ 12616 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF)) 12617 { 12618 pVmxTransient->fVectoringPF = true; 12619 enmRaise = IEMXCPTRAISE_PREV_EVENT; 12620 } 12621 } 12622 else 12623 { 12624 /* 12625 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access 12626 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here. 12627 * It is sufficient to reflect the original event to the guest after handling the VM-exit. 12628 */ 12629 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT 12630 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI 12631 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT); 12632 enmRaise = IEMXCPTRAISE_PREV_EVENT; 12633 fRaiseInfo = IEMXCPTRAISEINFO_NONE; 10634 12634 } 10635 12635 10636 12636 /* 10637 * CR4. 12637 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig 12638 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest 12639 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the 12640 * subsequent VM-entry would fail. 12641 * 12642 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}. 10638 12643 */ 10639 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 10640 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 10641 10642 uint32_t u32GuestCr4; 10643 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCr4); 10644 AssertRCBreak(rc); 10645 HMVMX_CHECK_BREAK((u32GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1); 10646 HMVMX_CHECK_BREAK(!(u32GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0); 10647 12644 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) 12645 && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI 12646 && ( enmRaise == IEMXCPTRAISE_PREV_EVENT 12647 || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF)) 12648 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 12649 { 12650 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 12651 } 12652 12653 switch (enmRaise) 12654 { 12655 case IEMXCPTRAISE_CURRENT_XCPT: 12656 { 12657 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n", 12658 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo)); 12659 Assert(rcStrict == VINF_SUCCESS); 12660 break; 12661 } 12662 12663 case IEMXCPTRAISE_PREV_EVENT: 12664 { 12665 uint32_t u32ErrCode; 12666 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo)) 12667 { 12668 rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient); 12669 AssertRCReturn(rc2, rc2); 12670 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode; 12671 } 12672 else 12673 u32ErrCode = 0; 12674 12675 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */ 12676 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect); 12677 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo), 12678 0 /* cbInstr */, u32ErrCode, pVCpu->cpum.GstCtx.cr2); 12679 12680 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo, 12681 pVCpu->hm.s.Event.u32ErrCode)); 12682 Assert(rcStrict == VINF_SUCCESS); 12683 break; 12684 } 12685 12686 case IEMXCPTRAISE_REEXEC_INSTR: 12687 Assert(rcStrict == VINF_SUCCESS); 12688 break; 12689 12690 case IEMXCPTRAISE_DOUBLE_FAULT: 12691 { 12692 /* 12693 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the 12694 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF. 12695 */ 12696 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF) 12697 { 12698 pVmxTransient->fVectoringDoublePF = true; 12699 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo, 12700 pVCpu->cpum.GstCtx.cr2)); 12701 rcStrict = VINF_SUCCESS; 12702 } 12703 else 12704 { 12705 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect); 12706 hmR0VmxSetPendingXcptDF(pVCpu); 12707 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo, 12708 uIdtVector, uExitVector)); 12709 rcStrict = VINF_HM_DOUBLE_FAULT; 12710 } 12711 break; 12712 } 12713 12714 case IEMXCPTRAISE_TRIPLE_FAULT: 12715 { 12716 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector)); 12717 rcStrict = VINF_EM_RESET; 12718 break; 12719 } 12720 12721 case IEMXCPTRAISE_CPU_HANG: 12722 { 12723 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo)); 12724 rcStrict = VERR_EM_GUEST_CPU_HANG; 12725 break; 12726 } 12727 12728 default: 12729 { 12730 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise)); 12731 rcStrict = VERR_VMX_IPE_2; 12732 break; 12733 } 12734 } 12735 } 12736 else if ( VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo) 12737 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo) 12738 && uExitVector != X86_XCPT_DF 12739 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 12740 { 10648 12741 /* 10649 * IA32_DEBUGCTL MSR. 12742 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler). 12743 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted. 12744 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception". 10650 12745 */ 10651 uint64_t u64Val; 10652 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val); 10653 AssertRCBreak(rc); 10654 if ( (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 10655 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */ 10656 { 10657 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED); 10658 } 10659 uint64_t u64DebugCtlMsr = u64Val; 10660 10661 #ifdef VBOX_STRICT 10662 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); 10663 AssertRCBreak(rc); 10664 Assert(u32Val == pVCpu->hm.s.vmx.Ctls.u32EntryCtls); 10665 #endif 10666 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 10667 10668 /* 10669 * RIP and RFLAGS. 10670 */ 10671 uint32_t u32Eflags; 10672 #if HC_ARCH_BITS == 64 10673 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val); 10674 AssertRCBreak(rc); 10675 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */ 10676 if ( !fLongModeGuest 10677 || !pCtx->cs.Attr.n.u1Long) 10678 { 10679 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID); 10680 } 10681 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N 10682 * must be identical if the "IA-32e mode guest" VM-entry 10683 * control is 1 and CS.L is 1. No check applies if the 10684 * CPU supports 64 linear-address bits. */ 10685 10686 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */ 10687 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val); 10688 AssertRCBreak(rc); 10689 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */ 10690 VMX_IGS_RFLAGS_RESERVED); 10691 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */ 10692 u32Eflags = u64Val; 10693 #else 10694 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags); 10695 AssertRCBreak(rc); 10696 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */ 10697 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */ 10698 #endif 10699 10700 if ( fLongModeGuest 10701 || ( fUnrestrictedGuest 10702 && !(u32GuestCr0 & X86_CR0_PE))) 10703 { 10704 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID); 10705 } 10706 10707 uint32_t u32EntryInfo; 10708 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo); 10709 AssertRCBreak(rc); 10710 if ( VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo) 10711 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT) 10712 { 10713 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID); 10714 } 10715 10716 /* 10717 * 64-bit checks. 10718 */ 10719 #if HC_ARCH_BITS == 64 10720 if (fLongModeGuest) 10721 { 10722 HMVMX_CHECK_BREAK(u32GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE); 10723 HMVMX_CHECK_BREAK(u32GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE); 10724 } 10725 10726 if ( !fLongModeGuest 10727 && (u32GuestCr4 & X86_CR4_PCIDE)) 10728 { 10729 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE); 10730 } 10731 10732 /** @todo CR3 field must be such that bits 63:52 and bits in the range 10733 * 51:32 beyond the processor's physical-address width are 0. */ 10734 10735 if ( (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 10736 && (pCtx->dr[7] & X86_DR7_MBZ_MASK)) 10737 { 10738 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED); 10739 } 10740 10741 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val); 10742 AssertRCBreak(rc); 10743 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL); 10744 10745 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val); 10746 AssertRCBreak(rc); 10747 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL); 10748 #endif 10749 10750 /* 10751 * PERF_GLOBAL MSR. 10752 */ 10753 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR) 10754 { 10755 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val); 10756 AssertRCBreak(rc); 10757 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)), 10758 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */ 10759 } 10760 10761 /* 10762 * PAT MSR. 10763 */ 10764 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR) 10765 { 10766 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val); 10767 AssertRCBreak(rc); 10768 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED); 10769 for (unsigned i = 0; i < 8; i++) 10770 { 10771 uint8_t u8Val = (u64Val & 0xff); 10772 if ( u8Val != 0 /* UC */ 10773 && u8Val != 1 /* WC */ 10774 && u8Val != 4 /* WT */ 10775 && u8Val != 5 /* WP */ 10776 && u8Val != 6 /* WB */ 10777 && u8Val != 7 /* UC- */) 10778 { 10779 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID); 10780 } 10781 u64Val >>= 8; 10782 } 10783 } 10784 10785 /* 10786 * EFER MSR. 10787 */ 10788 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR) 10789 { 10790 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer); 10791 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val); 10792 AssertRCBreak(rc); 10793 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)), 10794 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */ 10795 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVCpu->hm.s.vmx.Ctls.u32EntryCtls 10796 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST), 10797 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH); 10798 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see 10799 * iemVmxVmentryCheckGuestState(). */ 10800 HMVMX_CHECK_BREAK( fUnrestrictedGuest 10801 || !(u32GuestCr0 & X86_CR0_PG) 10802 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME), 10803 VMX_IGS_EFER_LMA_LME_MISMATCH); 10804 } 10805 10806 /* 10807 * Segment registers. 10808 */ 10809 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 10810 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID); 10811 if (!(u32Eflags & X86_EFL_VM)) 10812 { 10813 /* CS */ 10814 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID); 10815 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED); 10816 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED); 10817 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff 10818 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID); 10819 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000) 10820 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID); 10821 /* CS cannot be loaded with NULL in protected mode. */ 10822 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE); 10823 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID); 10824 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11) 10825 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL); 10826 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15) 10827 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH); 10828 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3) 10829 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID); 10830 else 10831 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID); 10832 10833 /* SS */ 10834 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest 10835 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL); 10836 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL); 10837 if ( !(pCtx->cr0 & X86_CR0_PE) 10838 || pCtx->cs.Attr.n.u4Type == 3) 10839 { 10840 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID); 10841 } 10842 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE)) 10843 { 10844 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID); 10845 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID); 10846 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED); 10847 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED); 10848 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff 10849 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID); 10850 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000) 10851 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID); 10852 } 10853 10854 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmenReg(). */ 10855 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE)) 10856 { 10857 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID); 10858 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID); 10859 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest 10860 || pCtx->ds.Attr.n.u4Type > 11 10861 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL); 10862 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED); 10863 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED); 10864 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff 10865 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID); 10866 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000) 10867 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID); 10868 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE) 10869 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID); 10870 } 10871 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE)) 10872 { 10873 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID); 10874 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID); 10875 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest 10876 || pCtx->es.Attr.n.u4Type > 11 10877 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL); 10878 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED); 10879 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED); 10880 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff 10881 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID); 10882 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000) 10883 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID); 10884 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE) 10885 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID); 10886 } 10887 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE)) 10888 { 10889 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID); 10890 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID); 10891 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest 10892 || pCtx->fs.Attr.n.u4Type > 11 10893 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL); 10894 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED); 10895 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED); 10896 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff 10897 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID); 10898 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000) 10899 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID); 10900 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE) 10901 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID); 10902 } 10903 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE)) 10904 { 10905 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID); 10906 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID); 10907 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest 10908 || pCtx->gs.Attr.n.u4Type > 11 10909 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL); 10910 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED); 10911 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED); 10912 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff 10913 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID); 10914 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000) 10915 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID); 10916 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE) 10917 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID); 10918 } 10919 /* 64-bit capable CPUs. */ 10920 #if HC_ARCH_BITS == 64 10921 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL); 10922 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL); 10923 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 10924 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL); 10925 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID); 10926 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base), 10927 VMX_IGS_LONGMODE_SS_BASE_INVALID); 10928 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base), 10929 VMX_IGS_LONGMODE_DS_BASE_INVALID); 10930 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base), 10931 VMX_IGS_LONGMODE_ES_BASE_INVALID); 10932 #endif 10933 } 10934 else 10935 { 10936 /* V86 mode checks. */ 10937 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr; 10938 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 10939 { 10940 u32CSAttr = 0xf3; u32SSAttr = 0xf3; 10941 u32DSAttr = 0xf3; u32ESAttr = 0xf3; 10942 u32FSAttr = 0xf3; u32GSAttr = 0xf3; 10943 } 10944 else 10945 { 10946 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; 10947 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u; 10948 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u; 10949 } 10950 10951 /* CS */ 10952 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID); 10953 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID); 10954 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID); 10955 /* SS */ 10956 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID); 10957 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID); 10958 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID); 10959 /* DS */ 10960 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID); 10961 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID); 10962 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID); 10963 /* ES */ 10964 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID); 10965 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID); 10966 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID); 10967 /* FS */ 10968 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID); 10969 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID); 10970 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID); 10971 /* GS */ 10972 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID); 10973 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID); 10974 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID); 10975 /* 64-bit capable CPUs. */ 10976 #if HC_ARCH_BITS == 64 10977 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL); 10978 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL); 10979 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 10980 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL); 10981 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID); 10982 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base), 10983 VMX_IGS_LONGMODE_SS_BASE_INVALID); 10984 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base), 10985 VMX_IGS_LONGMODE_DS_BASE_INVALID); 10986 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base), 10987 VMX_IGS_LONGMODE_ES_BASE_INVALID); 10988 #endif 10989 } 10990 10991 /* 10992 * TR. 10993 */ 10994 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID); 10995 /* 64-bit capable CPUs. */ 10996 #if HC_ARCH_BITS == 64 10997 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL); 10998 #endif 10999 if (fLongModeGuest) 11000 { 11001 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */ 11002 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID); 11003 } 11004 else 11005 { 11006 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */ 11007 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/ 11008 VMX_IGS_TR_ATTR_TYPE_INVALID); 11009 } 11010 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID); 11011 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID); 11012 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */ 11013 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff 11014 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID); 11015 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000) 11016 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID); 11017 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE); 11018 11019 /* 11020 * GDTR and IDTR. 11021 */ 11022 #if HC_ARCH_BITS == 64 11023 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 11024 AssertRCBreak(rc); 11025 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL); 11026 11027 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 11028 AssertRCBreak(rc); 11029 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL); 11030 #endif 11031 11032 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); 11033 AssertRCBreak(rc); 11034 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */ 11035 11036 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); 11037 AssertRCBreak(rc); 11038 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */ 11039 11040 /* 11041 * Guest Non-Register State. 11042 */ 11043 /* Activity State. */ 11044 uint32_t u32ActivityState; 11045 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState); 11046 AssertRCBreak(rc); 11047 HMVMX_CHECK_BREAK( !u32ActivityState 11048 || (u32ActivityState & RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)), 11049 VMX_IGS_ACTIVITY_STATE_INVALID); 11050 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl) 11051 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID); 11052 uint32_t u32IntrState; 11053 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32IntrState); 11054 AssertRCBreak(rc); 11055 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS 11056 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI) 11057 { 11058 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID); 11059 } 11060 11061 /** @todo Activity state and injecting interrupts. Left as a todo since we 11062 * currently don't use activity states but ACTIVE. */ 11063 11064 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM) 11065 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID); 11066 11067 /* Guest interruptibility-state. */ 11068 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED); 11069 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)) 11070 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS), 11071 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID); 11072 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF) 11073 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI), 11074 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID); 11075 if (VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)) 11076 { 11077 if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT) 11078 { 11079 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI) 11080 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS), 11081 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID); 11082 } 11083 else if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI) 11084 { 11085 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS), 11086 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID); 11087 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI), 11088 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID); 11089 } 11090 } 11091 /** @todo Assumes the processor is not in SMM. */ 11092 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI), 11093 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID); 11094 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM) 11095 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI), 11096 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID); 11097 if ( (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 11098 && VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo) 11099 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI) 11100 { 11101 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), 11102 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID); 11103 } 11104 11105 /* Pending debug exceptions. */ 11106 #if HC_ARCH_BITS == 64 11107 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val); 11108 AssertRCBreak(rc); 11109 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */ 11110 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED); 11111 u32Val = u64Val; /* For pending debug exceptions checks below. */ 11112 #else 11113 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u32Val); 11114 AssertRCBreak(rc); 11115 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */ 11116 HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED); 11117 #endif 11118 11119 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI) 11120 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS) 11121 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT) 11122 { 11123 if ( (u32Eflags & X86_EFL_TF) 11124 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */ 11125 { 11126 /* Bit 14 is PendingDebug.BS. */ 11127 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET); 11128 } 11129 if ( !(u32Eflags & X86_EFL_TF) 11130 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */ 11131 { 11132 /* Bit 14 is PendingDebug.BS. */ 11133 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR); 11134 } 11135 } 11136 11137 /* VMCS link pointer. */ 11138 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val); 11139 AssertRCBreak(rc); 11140 if (u64Val != UINT64_C(0xffffffffffffffff)) 11141 { 11142 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED); 11143 /** @todo Bits beyond the processor's physical-address width MBZ. */ 11144 /** @todo 32-bit located in memory referenced by value of this field (as a 11145 * physical address) must contain the processor's VMCS revision ID. */ 11146 /** @todo SMM checks. */ 11147 } 11148 11149 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is 11150 * not using Nested Paging? */ 11151 if ( pVM->hm.s.fNestedPaging 11152 && !fLongModeGuest 11153 && CPUMIsGuestInPAEModeEx(pCtx)) 11154 { 11155 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val); 11156 AssertRCBreak(rc); 11157 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED); 11158 11159 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val); 11160 AssertRCBreak(rc); 11161 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED); 11162 11163 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val); 11164 AssertRCBreak(rc); 11165 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED); 11166 11167 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val); 11168 AssertRCBreak(rc); 11169 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED); 11170 } 11171 11172 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */ 11173 if (uError == VMX_IGS_ERROR) 11174 uError = VMX_IGS_REASON_NOT_FOUND; 11175 } while (0); 11176 11177 pVCpu->hm.s.u32HMError = uError; 11178 return uError; 11179 11180 #undef HMVMX_ERROR_BREAK 11181 #undef HMVMX_CHECK_BREAK 12746 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 12747 { 12748 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n", 12749 VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason)); 12750 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 12751 } 12752 } 12753 12754 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT 12755 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG); 12756 return rcStrict; 11182 12757 } 11183 12758 … … 11212 12787 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3); 11213 12788 12789 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 11214 12790 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 11215 12791 AssertRCReturn(rc, rc); 11216 12792 11217 12793 uint32_t uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo); 11218 Assert( !(pV Cpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)12794 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT) 11219 12795 && uIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT); 11220 12796 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo)); … … 11300 12876 { 11301 12877 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk); 11302 if (pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active)12878 if (pVmcsInfo->RealMode.fRealOnV86Active) 11303 12879 { 11304 12880 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); … … 11306 12882 Assert(CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)); 11307 12883 11308 rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);12884 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0); 11309 12885 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11310 12886 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); … … 11347 12923 11348 12924 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */ 11349 hmR0VmxClearIntWindowExitVmcs(pVCpu); 12925 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 12926 int rc = hmR0VmxClearIntWindowExitVmcs(pVmcsInfo); 12927 AssertRCReturn(rc, rc); 11350 12928 11351 12929 /* Evaluate and deliver pending events and resume guest execution. */ … … 11361 12939 { 11362 12940 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11363 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) 12941 12942 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 12943 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */ 11364 12944 { 11365 12945 AssertMsgFailed(("Unexpected NMI-window exit.\n")); … … 11373 12953 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}. 11374 12954 */ 11375 uint32_t fIntrState = 0;12955 uint32_t fIntrState; 11376 12956 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState); 11377 12957 AssertRCReturn(rc, rc); … … 11388 12968 11389 12969 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */ 11390 hmR0VmxClearNmiWindowExitVmcs(pVCpu); 12970 rc = hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo); 12971 AssertRCReturn(rc, rc); 11391 12972 11392 12973 /* Evaluate and deliver pending events and resume guest execution. */ … … 11425 13006 * Get the state we need and update the exit history entry. 11426 13007 */ 13008 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 11427 13009 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11428 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);13010 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 11429 13011 AssertRCReturn(rc, rc); 11430 13012 … … 11452 13034 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 11453 13035 */ 11454 int rc2 = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);13036 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 11455 13037 AssertRCReturn(rc2, rc2); 11456 13038 … … 11475 13057 { 11476 13058 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11477 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR4); 13059 13060 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13061 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4); 11478 13062 AssertRCReturn(rc, rc); 11479 13063 … … 11492 13076 { 11493 13077 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11494 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13078 13079 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13080 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 11495 13081 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11496 13082 AssertRCReturn(rc, rc); … … 11499 13085 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 11500 13086 { 11501 /* If we get a spurious VM-exit when offsetting is enabled,11502 we must reset offsetting on VM- reentry. See @bugref{6634}. */11503 if (pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)11504 pVmxTransient->fUpdate TscOffsettingAndPreemptTimer = true;13087 /* If we get a spurious VM-exit when TSC offsetting is enabled, 13088 we must reset offsetting on VM-entry. See @bugref{6634}. */ 13089 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING) 13090 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false; 11505 13091 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 11506 13092 } … … 11520 13106 { 11521 13107 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11522 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX); 13108 13109 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13110 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX); 11523 13111 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11524 13112 AssertRCReturn(rc, rc); … … 11527 13115 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 11528 13116 { 11529 /* If we get a spurious VM-exit when offsetting is enabled,13117 /* If we get a spurious VM-exit when TSC offsetting is enabled, 11530 13118 we must reset offsetting on VM-reentry. See @bugref{6634}. */ 11531 if (pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)11532 pVmxTransient->fUpdate TscOffsettingAndPreemptTimer = true;13119 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING) 13120 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false; 11533 13121 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 11534 13122 } … … 11548 13136 { 11549 13137 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11550 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS); 13138 13139 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13140 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 13141 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS); 11551 13142 AssertRCReturn(rc, rc); 11552 13143 11553 PVM pVM = pVCpu->CTX_SUFF(pVM);11554 13144 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 11555 rc = EMInterpretRdpmc(pV M, pVCpu, CPUMCTX2CORE(pCtx));13145 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 11556 13146 if (RT_LIKELY(rc == VINF_SUCCESS)) 11557 13147 { … … 11578 13168 if (EMAreHypercallInstructionsEnabled(pVCpu)) 11579 13169 { 11580 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0 11581 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER); 13170 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13171 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0 13172 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER); 11582 13173 AssertRCReturn(rc, rc); 11583 13174 … … 11619 13210 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop); 11620 13211 13212 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 11621 13213 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 11622 13214 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11623 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);13215 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 11624 13216 AssertRCReturn(rc, rc); 11625 13217 … … 11646 13238 { 11647 13239 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11648 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS); 13240 13241 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13242 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS); 11649 13243 AssertRCReturn(rc, rc); 11650 13244 11651 PVM pVM = pVCpu->CTX_SUFF(pVM);11652 13245 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 11653 rc = EMInterpretMonitor(pV M, pVCpu, CPUMCTX2CORE(pCtx));13246 rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 11654 13247 if (RT_LIKELY(rc == VINF_SUCCESS)) 11655 13248 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); … … 11659 13252 rc = VERR_EM_INTERPRETER; 11660 13253 } 11661 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);11662 13254 return rc; 11663 13255 } … … 11670 13262 { 11671 13263 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11672 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS); 13264 13265 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13266 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS); 11673 13267 AssertRCReturn(rc, rc); 11674 13268 11675 PVM pVM = pVCpu->CTX_SUFF(pVM);11676 13269 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 11677 VBOXSTRICTRC rc2 = EMInterpretMWait(pV M, pVCpu, CPUMCTX2CORE(pCtx));13270 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 11678 13271 rc = VBOXSTRICTRC_VAL(rc2); 11679 13272 if (RT_LIKELY( rc == VINF_SUCCESS … … 11694 13287 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER, 11695 13288 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc)); 11696 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);11697 13289 return rc; 11698 13290 } … … 11801 13393 { 11802 13394 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11803 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_HLT_EXIT);11804 13395 11805 13396 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); 11806 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RFLAGS);11807 13397 AssertRCReturn(rc, rc); 11808 13398 13399 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */ 11809 13400 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */ 11810 13401 rc = VINF_SUCCESS; … … 11812 13403 rc = VINF_EM_HALT; 11813 13404 11814 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);11815 13405 if (rc != VINF_SUCCESS) 11816 13406 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3); … … 11832 13422 11833 13423 /** 11834 * VM-exit handler for expiry of the VMX 13424 * VM-exit handler for expiry of the VMX-preemption timer. 11835 13425 */ 11836 13426 HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) … … 11838 13428 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11839 13429 11840 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */11841 pVmxTransient->fUpdate TscOffsettingAndPreemptTimer = true;13430 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */ 13431 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false; 11842 13432 11843 13433 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */ … … 11856 13446 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11857 13447 13448 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 11858 13449 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11859 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);13450 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4); 11860 13451 AssertRCReturn(rc, rc); 11861 13452 … … 11888 13479 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11889 13480 { 11890 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13481 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13482 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 11891 13483 AssertRCReturn(rc, rc); 11892 rc = hmR0VmxCheckVmcsCtls(pVCpu); 13484 13485 rc = hmR0VmxCheckVmcsCtls(pVCpu, pVmcsInfo); 11893 13486 if (RT_FAILURE(rc)) 11894 13487 return rc; 11895 13488 11896 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu);13489 uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo); 11897 13490 NOREF(uInvalidReason); 11898 13491 11899 13492 #ifdef VBOX_STRICT 11900 uint32_t fIntrState; 11901 RTHCUINTREG uHCReg; 11902 uint64_t u64Val; 11903 uint32_t u32Val; 11904 13493 uint32_t fIntrState; 13494 RTHCUINTREG uHCReg; 13495 uint64_t u64Val; 13496 uint32_t u32Val; 11905 13497 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient); 11906 13498 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient); … … 11909 13501 AssertRCReturn(rc, rc); 11910 13502 11911 Log4(("uInvalidReason %u\n", uInvalidReason));13503 Log4(("uInvalidReason %u\n", uInvalidReason)); 11912 13504 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo)); 11913 13505 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode)); … … 11929 13521 11930 13522 hmR0DumpRegs(pVCpu); 11931 #else11932 NOREF(pVmxTransient);11933 13523 #endif 11934 13524 … … 11982 13572 /* By default, we don't enable VMX_PROC_CTLS2_DESCRIPTOR_TABLE_EXIT. */ 11983 13573 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess); 11984 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT) 13574 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13575 if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT) 11985 13576 return VERR_EM_INTERPRETER; 11986 13577 AssertMsgFailed(("Unexpected XDTR access\n")); … … 11997 13588 11998 13589 /* By default, we don't enable VMX_PROC_CTLS2_RDRAND_EXIT. */ 11999 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT) 13590 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13591 if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT) 12000 13592 return VERR_EM_INTERPRETER; 12001 13593 AssertMsgFailed(("Unexpected RDRAND exit\n")); … … 12015 13607 * MSRs required. That would require changes to IEM and possibly CPUM too. 12016 13608 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */ 12017 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx; 13609 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13610 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx; 13611 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS; 13612 switch (idMsr) 13613 { 13614 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break; 13615 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break; 13616 } 13617 12018 13618 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12019 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS); 12020 switch (idMsr) 12021 { 12022 /* The FS and GS base MSRs are not part of the above all-MSRs mask. */ 12023 case MSR_K8_FS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_FS); break; 12024 case MSR_K8_GS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_GS); break; 12025 } 13619 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport); 12026 13620 AssertRCReturn(rc, rc); 12027 13621 … … 12029 13623 12030 13624 #ifdef VBOX_STRICT 12031 if (pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)12032 { 12033 if ( hmR0VmxIsAutoLoad StoreGuestMsr(pVCpu, idMsr)13625 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 13626 { 13627 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr) 12034 13628 && idMsr != MSR_K6_EFER) 12035 13629 { … … 12039 13633 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr)) 12040 13634 { 12041 VMXMSREXITREAD enmRead; 12042 VMXMSREXITWRITE enmWrite; 12043 int rc2 = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, idMsr, &enmRead, &enmWrite); 12044 AssertRCReturn(rc2, rc2); 12045 if (enmRead == VMXMSREXIT_PASSTHRU_READ) 13635 Assert(pVmcsInfo->pvMsrBitmap); 13636 uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr); 13637 if (fMsrpm & VMXMSRPM_ALLOW_RD) 12046 13638 { 12047 13639 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr)); … … 12080 13672 * MSRs required. That would require changes to IEM and possibly CPUM too. 12081 13673 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */ 12082 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx; 13674 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx; 13675 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS; 13676 13677 /* 13678 * The FS and GS base MSRs are not part of the above all-MSRs mask. 13679 * Although we don't need to fetch the base as it will be overwritten shortly, while 13680 * loading guest-state we would also load the entire segment register including limit 13681 * and attributes and thus we need to load them here. 13682 */ 13683 switch (idMsr) 13684 { 13685 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break; 13686 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break; 13687 } 13688 13689 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 12083 13690 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12084 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK 12085 | CPUMCTX_EXTRN_ALL_MSRS); 12086 switch (idMsr) 12087 { 12088 /* 12089 * The FS and GS base MSRs are not part of the above all-MSRs mask. 12090 * 12091 * Although we don't need to fetch the base as it will be overwritten shortly, while 12092 * loading guest-state we would also load the entire segment register including limit 12093 * and attributes and thus we need to load them here. 12094 */ 12095 case MSR_K8_FS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_FS); break; 12096 case MSR_K8_GS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_GS); break; 12097 } 13691 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport); 12098 13692 AssertRCReturn(rc, rc); 12099 13693 … … 12113 13707 { 12114 13708 /* 12115 * We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register 12116 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before IEM changes it. 13709 * We've already saved the APIC related guest-state (TPR) in post-run phase. 13710 * When full APIC register virtualization is implemented we'll have to make 13711 * sure APIC state is saved from the VMCS before IEM changes it. 12117 13712 */ 12118 13713 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 12119 13714 } 12120 13715 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ 12121 pVmxTransient->fUpdate TscOffsettingAndPreemptTimer = true;13716 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false; 12122 13717 else if (idMsr == MSR_K6_EFER) 12123 13718 { 12124 13719 /* 12125 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,12126 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about12127 * the other bits as well, SCE and NXE. See @bugref{7368}.13720 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls 13721 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME). 13722 * We care about the other bits as well, SCE and NXE. See @bugref{7368}. 12128 13723 */ 12129 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS 12130 | HM_CHANGED_VMX_EXIT_CTLS); 13724 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS); 12131 13725 } 12132 13726 12133 13727 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */ 12134 if (!(pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))13728 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)) 12135 13729 { 12136 13730 switch (idMsr) … … 12144 13738 default: 12145 13739 { 12146 if (hmR0VmxIsAutoLoad StoreGuestMsr(pVCpu, idMsr))13740 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)) 12147 13741 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 12148 13742 else if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr)) … … 12164 13758 case MSR_K8_GS_BASE: 12165 13759 { 12166 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr)); 13760 uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr); 13761 Assert(fMsrpm == (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)); 13762 13763 uint32_t u32Proc; 13764 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Proc); 13765 AssertRC(rc); 13766 Assert(u32Proc == pVmcsInfo->u32ProcCtls); 13767 Assert(u32Proc & VMX_PROC_CTLS_USE_MSR_BITMAPS); 13768 13769 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32 fMsrpm=%#RX32\n", idMsr, fMsrpm)); 12167 13770 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 12168 13771 } … … 12171 13774 default: 12172 13775 { 12173 if (hmR0VmxIsAutoLoad StoreGuestMsr(pVCpu, idMsr))13776 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)) 12174 13777 { 12175 /* EFER writes are always intercepted, see hmR0VmxExportGuestMsrs(). */13778 /* EFER MSR writes are always intercepted. */ 12176 13779 if (idMsr != MSR_K6_EFER) 12177 13780 { … … 12184 13787 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr)) 12185 13788 { 12186 VMXMSREXITREAD enmRead; 12187 VMXMSREXITWRITE enmWrite; 12188 int rc2 = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, idMsr, &enmRead, &enmWrite); 12189 AssertRCReturn(rc2, rc2); 12190 if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE) 13789 Assert(pVmcsInfo->pvMsrBitmap); 13790 uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr); 13791 if (fMsrpm & VMXMSRPM_ALLOW_WR) 12191 13792 { 12192 13793 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr)); … … 12231 13832 { 12232 13833 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12233 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 12234 12235 /* 12236 * The TPR shadow would've been synced with the APIC TPR in hmR0VmxPostRunGuest(). We'll re-evaluate 12237 * pending interrupts and inject them before the next VM-entry so we can just continue execution here. 13834 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 13835 13836 /* 13837 * The TPR shadow would've been synced with the APIC TPR in the post-run phase. 13838 * We'll re-evaluate pending interrupts and inject them before the next VM 13839 * entry so we can just continue execution here. 12238 13840 */ 12239 13841 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold); … … 12256 13858 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2); 12257 13859 13860 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 12258 13861 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12259 13862 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12260 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);13863 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 12261 13864 AssertRCReturn(rc, rc); 12262 13865 12263 13866 VBOXSTRICTRC rcStrict; 12264 PVM pVM 13867 PVM pVM = pVCpu->CTX_SUFF(pVM); 12265 13868 RTGCUINTPTR const uExitQual = pVmxTransient->uExitQual; 12266 13869 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual); … … 12340 13943 { 12341 13944 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write); 12342 Assert(!(pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));13945 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 12343 13946 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 12344 13947 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR); … … 12360 13963 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */ 12361 13964 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 8 12362 || !(pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));13965 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 12363 13966 12364 13967 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual), … … 12443 14046 12444 14047 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 14048 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 12445 14049 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12446 14050 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12447 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER); 12448 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */ 14051 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK 14052 | CPUMCTX_EXTRN_EFER); 14053 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */ 12449 14054 AssertRCReturn(rc, rc); 12450 14055 … … 12491 14096 * interpreting the instruction. 12492 14097 */ 12493 Log4Func((" CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));14098 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 12494 14099 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2); 12495 14100 bool const fInsOutsInfo = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS); … … 12527 14132 * IN/OUT - I/O instruction. 12528 14133 */ 12529 Log4Func((" CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));14134 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 12530 14135 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth]; 12531 14136 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual)); … … 12576 14181 * Note that the I/O breakpoint type is undefined if CR4.DE is 0. 12577 14182 */ 12578 rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);14183 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7); 12579 14184 AssertRCReturn(rc, rc); 12580 14185 … … 12648 14253 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 12649 14254 */ 12650 int rc2 = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);14255 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 12651 14256 AssertRCReturn(rc2, rc2); 12652 14257 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead … … 12729 14334 { 12730 14335 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12731 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG); 12732 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG; 12733 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls); 14336 14337 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14338 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG; 14339 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 12734 14340 AssertRCReturn(rc, rc); 12735 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);12736 14341 return VINF_EM_DBG_STEPPED; 12737 14342 } … … 12744 14349 { 12745 14350 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12746 12747 14351 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess); 12748 14352 … … 12766 14370 12767 14371 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */ 12768 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 14372 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14373 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 12769 14374 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12770 14375 AssertRCReturn(rc, rc); … … 12778 14383 case VMX_APIC_ACCESS_TYPE_LINEAR_READ: 12779 14384 { 12780 AssertMsg( !(pV Cpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)14385 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 12781 14386 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR, 12782 14387 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n")); 12783 14388 12784 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64 MsrApicBase; /* Always up-to-date, u64MsrApicBaseis not part of the VMCS. */14389 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */ 12785 14390 GCPhys &= PAGE_BASE_GC_MASK; 12786 14391 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual); … … 12832 14437 } 12833 14438 14439 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 12834 14440 if ( !pVCpu->hm.s.fSingleInstruction 12835 14441 && !pVmxTransient->fWasHyperDebugStateActive) 12836 14442 { 12837 14443 Assert(!DBGFIsStepping(pVCpu)); 12838 Assert(pV Cpu->hm.s.vmx.Ctls.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));14444 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB)); 12839 14445 12840 14446 /* Don't intercept MOV DRx any more. */ 12841 pV Cpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;12842 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pV Cpu->hm.s.vmx.Ctls.u32ProcCtls);14447 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT; 14448 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 12843 14449 AssertRCReturn(rc, rc); 12844 14450 … … 12867 14473 12868 14474 /* 12869 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date. 14475 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS. 14476 * The EFER MSR is always up-to-date. 12870 14477 * Update the segment registers and DR7 from the CPU. 12871 14478 */ 12872 14479 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12873 14480 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12874 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);14481 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7); 12875 14482 AssertRCReturn(rc, rc); 12876 Log4Func((" CS:RIP=%04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));14483 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip)); 12877 14484 12878 14485 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 12937 14544 */ 12938 14545 RTGCPHYS GCPhys; 14546 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 12939 14547 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys); 12940 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);14548 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 12941 14549 AssertRCReturn(rc, rc); 12942 14550 … … 12973 14581 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 12974 14582 */ 12975 int rc2 = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);14583 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 12976 14584 AssertRCReturn(rc2, rc2); 12977 14585 … … 13015 14623 13016 14624 RTGCPHYS GCPhys; 14625 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13017 14626 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys); 13018 14627 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 13019 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);14628 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 13020 14629 AssertRCReturn(rc, rc); 13021 14630 … … 13038 14647 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 13039 14648 13040 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode,14649 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x cs:rip=%#04x:%#RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode, 13041 14650 pCtx->cs.Sel, pCtx->rip)); 13042 14651 … … 13076 14685 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); 13077 14686 13078 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);14687 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0); 13079 14688 AssertRCReturn(rc, rc); 13080 14689 … … 13106 14715 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); 13107 14716 13108 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);14717 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 13109 14718 AssertRCReturn(rc, rc); 13110 14719 … … 13187 14796 VMMRZCallRing3Enable(pVCpu); 13188 14797 13189 rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);14798 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7); 13190 14799 AssertRCReturn(rc, rc); 13191 14800 … … 13239 14848 static int hmR0VmxHandleMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx) 13240 14849 { 13241 Log (("hmR0VmxHandleMesaDrvGp: at %04x:%08RX64 rcx=%RX64 rbx=%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));14850 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx)); 13242 14851 RT_NOREF(pCtx); 13243 14852 … … 13310 14919 13311 14920 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 13312 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 14921 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14922 if (pVmcsInfo->RealMode.fRealOnV86Active) 13313 14923 { /* likely */ } 13314 14924 else … … 13317 14927 Assert(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv); 13318 14928 #endif 13319 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */14929 /* If the guest is not in real-mode or we have unrestricted guest execution support, reflect #GP to the guest. */ 13320 14930 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 13321 14931 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 13322 14932 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13323 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);14933 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 13324 14934 AssertRCReturn(rc, rc); 13325 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip,14935 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip, 13326 14936 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel)); 13327 14937 … … 13338 14948 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest); 13339 14949 13340 int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);14950 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 13341 14951 AssertRCReturn(rc, rc); 13342 14952 … … 13350 14960 * guest using hardware-assisted VMX. Otherwise, fall back to emulation. 13351 14961 */ 13352 pV Cpu->hm.s.vmx.RealMode.fRealOnV86Active = false;14962 pVmcsInfo->RealMode.fRealOnV86Active = false; 13353 14963 if (HMCanExecuteVmxGuest(pVCpu, pCtx)) 13354 14964 { 13355 Log4Func(("Mode changed but guest still suitable for executing using VT-x\n"));14965 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n")); 13356 14966 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13357 14967 } … … 13385 14995 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13386 14996 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 13387 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active, 14997 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14998 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVmcsInfo->RealMode.fRealOnV86Active, 13388 14999 ("uVector=%#x u32XcptBitmap=%#X32\n", 13389 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVCpu->hm.s.vmx.Ctls.u32XcptBitmap)); 15000 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap)); 15001 NOREF(pVmcsInfo); 13390 15002 #endif 13391 15003 … … 13398 15010 13399 15011 #ifdef DEBUG_ramshankar 13400 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);13401 uint8_t uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);13402 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));15012 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 15013 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", 15014 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pCtx->cs.Sel, pCtx->rip)); 13403 15015 #endif 13404 15016 … … 13453 15065 13454 15066 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 13455 rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);15067 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 13456 15068 AssertRCReturn(rc, rc); 13457 15069 … … 13507 15119 13508 15120 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 13509 /** @name Nested-guest VM-exithandlers.15121 /** @name VMX instruction handlers. 13510 15122 * @{ 13511 15123 */ 13512 15124 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 13513 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- =-= Nested-guest VM-exit handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */15125 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VMX instructions VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 13514 15126 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 13515 15127 … … 13522 15134 13523 15135 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13524 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK13525 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);15136 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 15137 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13526 15138 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13527 15139 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); … … 13557 15169 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13558 15170 15171 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH, 15172 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */ 13559 15173 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13560 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);15174 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 13561 15175 AssertRCReturn(rc, rc); 13562 15176 … … 13582 15196 13583 15197 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13584 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK13585 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);15198 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 15199 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13586 15200 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13587 15201 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); … … 13618 15232 13619 15233 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13620 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK13621 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);15234 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 15235 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13622 15236 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13623 15237 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); … … 13636 15250 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo); 13637 15251 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13638 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);15252 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13639 15253 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13640 15254 { … … 13654 15268 13655 15269 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13656 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK13657 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);15270 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 15271 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13658 15272 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13659 15273 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); … … 13673 15287 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo); 13674 15288 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13675 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);15289 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13676 15290 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13677 15291 { … … 13690 15304 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13691 15305 15306 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME, 15307 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */ 13692 15308 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13693 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);15309 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 13694 15310 AssertRCReturn(rc, rc); 13695 15311 … … 13715 15331 13716 15332 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13717 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK13718 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);15333 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 15334 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13719 15335 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13720 15336 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); … … 13752 15368 13753 15369 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13754 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR4 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 15370 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4 15371 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 13755 15372 AssertRCReturn(rc, rc); 13756 15373 … … 13759 15376 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr); 13760 15377 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13761 {13762 /* VMXOFF changes the internal hwvirt. state but not anything that's visible to the guest other than RIP. */13763 15378 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT); 13764 }13765 15379 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13766 15380 { … … 13780 15394 13781 15395 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13782 rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK13783 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);15396 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 15397 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 13784 15398 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13785 15399 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r77481 r78220 46 46 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat); 47 47 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu); 48 DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCS BATCHCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);49 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCS BATCHCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);48 DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu); 49 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu); 50 50 51 51 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 52 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCS BATCHCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);52 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu); 53 53 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cbParam, uint32_t *paParam); 54 54 # endif … … 61 61 { 62 62 Assert(idxCache <= VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX); 63 *pVal = pVCpu->hm.s.vmx.Vmcs BatchCache.Read.aFieldVal[idxCache];63 *pVal = pVCpu->hm.s.vmx.VmcsCache.Read.aFieldVal[idxCache]; 64 64 return VINF_SUCCESS; 65 65 } -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r76993 r78220 326 326 }; 327 327 328 /** Saved state field descriptors for VMX nested hardware-virtualization 329 * VMCS. */ 330 static const SSMFIELD g_aVmxHwvirtVmcs[] = 331 { 332 SSMFIELD_ENTRY( VMXVVMCS, u32VmcsRevId), 333 SSMFIELD_ENTRY( VMXVVMCS, enmVmxAbort), 334 SSMFIELD_ENTRY( VMXVVMCS, fVmcsState), 335 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au8Padding0), 336 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved0), 337 338 SSMFIELD_ENTRY( VMXVVMCS, u16Vpid), 339 SSMFIELD_ENTRY( VMXVVMCS, u16PostIntNotifyVector), 340 SSMFIELD_ENTRY( VMXVVMCS, u16EptpIndex), 341 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved0), 342 343 SSMFIELD_ENTRY( VMXVVMCS, GuestEs), 344 SSMFIELD_ENTRY( VMXVVMCS, GuestCs), 345 SSMFIELD_ENTRY( VMXVVMCS, GuestSs), 346 SSMFIELD_ENTRY( VMXVVMCS, GuestDs), 347 SSMFIELD_ENTRY( VMXVVMCS, GuestFs), 348 SSMFIELD_ENTRY( VMXVVMCS, GuestGs), 349 SSMFIELD_ENTRY( VMXVVMCS, GuestLdtr), 350 SSMFIELD_ENTRY( VMXVVMCS, GuestTr), 351 SSMFIELD_ENTRY( VMXVVMCS, u16GuestIntStatus), 352 SSMFIELD_ENTRY( VMXVVMCS, u16PmlIndex), 353 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved1[8]), 354 355 SSMFIELD_ENTRY( VMXVVMCS, HostEs), 356 SSMFIELD_ENTRY( VMXVVMCS, HostCs), 357 SSMFIELD_ENTRY( VMXVVMCS, HostSs), 358 SSMFIELD_ENTRY( VMXVVMCS, HostDs), 359 SSMFIELD_ENTRY( VMXVVMCS, HostFs), 360 SSMFIELD_ENTRY( VMXVVMCS, HostGs), 361 SSMFIELD_ENTRY( VMXVVMCS, HostTr), 362 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved2), 363 364 SSMFIELD_ENTRY( VMXVVMCS, u32PinCtls), 365 SSMFIELD_ENTRY( VMXVVMCS, u32ProcCtls), 366 SSMFIELD_ENTRY( VMXVVMCS, u32XcptBitmap), 367 SSMFIELD_ENTRY( VMXVVMCS, u32XcptPFMask), 368 SSMFIELD_ENTRY( VMXVVMCS, u32XcptPFMatch), 369 SSMFIELD_ENTRY( VMXVVMCS, u32Cr3TargetCount), 370 SSMFIELD_ENTRY( VMXVVMCS, u32ExitCtls), 371 SSMFIELD_ENTRY( VMXVVMCS, u32ExitMsrStoreCount), 372 SSMFIELD_ENTRY( VMXVVMCS, u32ExitMsrLoadCount), 373 SSMFIELD_ENTRY( VMXVVMCS, u32EntryCtls), 374 SSMFIELD_ENTRY( VMXVVMCS, u32EntryMsrLoadCount), 375 SSMFIELD_ENTRY( VMXVVMCS, u32EntryIntInfo), 376 SSMFIELD_ENTRY( VMXVVMCS, u32EntryXcptErrCode), 377 SSMFIELD_ENTRY( VMXVVMCS, u32EntryInstrLen), 378 SSMFIELD_ENTRY( VMXVVMCS, u32TprThreshold), 379 SSMFIELD_ENTRY( VMXVVMCS, u32ProcCtls2), 380 SSMFIELD_ENTRY( VMXVVMCS, u32PleGap), 381 SSMFIELD_ENTRY( VMXVVMCS, u32PleWindow), 382 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved1), 383 384 SSMFIELD_ENTRY( VMXVVMCS, u32RoVmInstrError), 385 SSMFIELD_ENTRY( VMXVVMCS, u32RoExitReason), 386 SSMFIELD_ENTRY( VMXVVMCS, u32RoExitIntInfo), 387 SSMFIELD_ENTRY( VMXVVMCS, u32RoExitIntErrCode), 388 SSMFIELD_ENTRY( VMXVVMCS, u32RoIdtVectoringInfo), 389 SSMFIELD_ENTRY( VMXVVMCS, u32RoIdtVectoringErrCode), 390 SSMFIELD_ENTRY( VMXVVMCS, u32RoExitInstrLen), 391 SSMFIELD_ENTRY( VMXVVMCS, u32RoExitInstrInfo), 392 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32RoReserved2), 393 394 SSMFIELD_ENTRY( VMXVVMCS, u32GuestEsLimit), 395 SSMFIELD_ENTRY( VMXVVMCS, u32GuestCsLimit), 396 SSMFIELD_ENTRY( VMXVVMCS, u32GuestSsLimit), 397 SSMFIELD_ENTRY( VMXVVMCS, u32GuestDsLimit), 398 SSMFIELD_ENTRY( VMXVVMCS, u32GuestFsLimit), 399 SSMFIELD_ENTRY( VMXVVMCS, u32GuestGsLimit), 400 SSMFIELD_ENTRY( VMXVVMCS, u32GuestLdtrLimit), 401 SSMFIELD_ENTRY( VMXVVMCS, u32GuestTrLimit), 402 SSMFIELD_ENTRY( VMXVVMCS, u32GuestGdtrLimit), 403 SSMFIELD_ENTRY( VMXVVMCS, u32GuestIdtrLimit), 404 SSMFIELD_ENTRY( VMXVVMCS, u32GuestEsAttr), 405 SSMFIELD_ENTRY( VMXVVMCS, u32GuestCsAttr), 406 SSMFIELD_ENTRY( VMXVVMCS, u32GuestSsAttr), 407 SSMFIELD_ENTRY( VMXVVMCS, u32GuestDsAttr), 408 SSMFIELD_ENTRY( VMXVVMCS, u32GuestFsAttr), 409 SSMFIELD_ENTRY( VMXVVMCS, u32GuestGsAttr), 410 SSMFIELD_ENTRY( VMXVVMCS, u32GuestLdtrAttr), 411 SSMFIELD_ENTRY( VMXVVMCS, u32GuestTrAttr), 412 SSMFIELD_ENTRY( VMXVVMCS, u32GuestIntrState), 413 SSMFIELD_ENTRY( VMXVVMCS, u32GuestActivityState), 414 SSMFIELD_ENTRY( VMXVVMCS, u32GuestSmBase), 415 SSMFIELD_ENTRY( VMXVVMCS, u32GuestSysenterCS), 416 SSMFIELD_ENTRY( VMXVVMCS, u32PreemptTimer), 417 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved3), 418 419 SSMFIELD_ENTRY( VMXVVMCS, u32HostSysenterCs), 420 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved4), 421 422 SSMFIELD_ENTRY( VMXVVMCS, u64AddrIoBitmapA), 423 SSMFIELD_ENTRY( VMXVVMCS, u64AddrIoBitmapB), 424 SSMFIELD_ENTRY( VMXVVMCS, u64AddrMsrBitmap), 425 SSMFIELD_ENTRY( VMXVVMCS, u64AddrExitMsrStore), 426 SSMFIELD_ENTRY( VMXVVMCS, u64AddrExitMsrLoad), 427 SSMFIELD_ENTRY( VMXVVMCS, u64AddrEntryMsrLoad), 428 SSMFIELD_ENTRY( VMXVVMCS, u64ExecVmcsPtr), 429 SSMFIELD_ENTRY( VMXVVMCS, u64AddrPml), 430 SSMFIELD_ENTRY( VMXVVMCS, u64TscOffset), 431 SSMFIELD_ENTRY( VMXVVMCS, u64AddrVirtApic), 432 SSMFIELD_ENTRY( VMXVVMCS, u64AddrApicAccess), 433 SSMFIELD_ENTRY( VMXVVMCS, u64AddrPostedIntDesc), 434 SSMFIELD_ENTRY( VMXVVMCS, u64VmFuncCtls), 435 SSMFIELD_ENTRY( VMXVVMCS, u64EptpPtr), 436 SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap0), 437 SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap1), 438 SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap2), 439 SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap3), 440 SSMFIELD_ENTRY( VMXVVMCS, u64AddrEptpList), 441 SSMFIELD_ENTRY( VMXVVMCS, u64AddrVmreadBitmap), 442 SSMFIELD_ENTRY( VMXVVMCS, u64AddrVmwriteBitmap), 443 SSMFIELD_ENTRY( VMXVVMCS, u64AddrXcptVeInfo), 444 SSMFIELD_ENTRY( VMXVVMCS, u64XssBitmap), 445 SSMFIELD_ENTRY( VMXVVMCS, u64AddrEnclsBitmap), 446 SSMFIELD_ENTRY( VMXVVMCS, u64TscMultiplier), 447 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved0), 448 449 SSMFIELD_ENTRY( VMXVVMCS, u64RoGuestPhysAddr), 450 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved1), 451 452 SSMFIELD_ENTRY( VMXVVMCS, u64VmcsLinkPtr), 453 SSMFIELD_ENTRY( VMXVVMCS, u64GuestDebugCtlMsr), 454 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPatMsr), 455 SSMFIELD_ENTRY( VMXVVMCS, u64GuestEferMsr), 456 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPerfGlobalCtlMsr), 457 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte0), 458 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte1), 459 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte2), 460 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte3), 461 SSMFIELD_ENTRY( VMXVVMCS, u64GuestBndcfgsMsr), 462 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved2), 463 464 SSMFIELD_ENTRY( VMXVVMCS, u64HostPatMsr), 465 SSMFIELD_ENTRY( VMXVVMCS, u64HostEferMsr), 466 SSMFIELD_ENTRY( VMXVVMCS, u64HostPerfGlobalCtlMsr), 467 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved3), 468 469 SSMFIELD_ENTRY( VMXVVMCS, u64Cr0Mask), 470 SSMFIELD_ENTRY( VMXVVMCS, u64Cr4Mask), 471 SSMFIELD_ENTRY( VMXVVMCS, u64Cr0ReadShadow), 472 SSMFIELD_ENTRY( VMXVVMCS, u64Cr4ReadShadow), 473 SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target0), 474 SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target1), 475 SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target2), 476 SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target3), 477 SSMFIELD_ENTRY( VMXVVMCS, au64Reserved4), 478 479 SSMFIELD_ENTRY( VMXVVMCS, u64RoExitQual), 480 SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRcx), 481 SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRsi), 482 SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRdi), 483 SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRip), 484 SSMFIELD_ENTRY( VMXVVMCS, u64RoGuestLinearAddr), 485 SSMFIELD_ENTRY( VMXVVMCS, au64Reserved5), 486 487 SSMFIELD_ENTRY( VMXVVMCS, u64GuestCr0), 488 SSMFIELD_ENTRY( VMXVVMCS, u64GuestCr3), 489 SSMFIELD_ENTRY( VMXVVMCS, u64GuestCr4), 490 SSMFIELD_ENTRY( VMXVVMCS, u64GuestEsBase), 491 SSMFIELD_ENTRY( VMXVVMCS, u64GuestCsBase), 492 SSMFIELD_ENTRY( VMXVVMCS, u64GuestSsBase), 493 SSMFIELD_ENTRY( VMXVVMCS, u64GuestDsBase), 494 SSMFIELD_ENTRY( VMXVVMCS, u64GuestFsBase), 495 SSMFIELD_ENTRY( VMXVVMCS, u64GuestGsBase), 496 SSMFIELD_ENTRY( VMXVVMCS, u64GuestLdtrBase), 497 SSMFIELD_ENTRY( VMXVVMCS, u64GuestTrBase), 498 SSMFIELD_ENTRY( VMXVVMCS, u64GuestGdtrBase), 499 SSMFIELD_ENTRY( VMXVVMCS, u64GuestIdtrBase), 500 SSMFIELD_ENTRY( VMXVVMCS, u64GuestDr7), 501 SSMFIELD_ENTRY( VMXVVMCS, u64GuestRsp), 502 SSMFIELD_ENTRY( VMXVVMCS, u64GuestRip), 503 SSMFIELD_ENTRY( VMXVVMCS, u64GuestRFlags), 504 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPendingDbgXcpt), 505 SSMFIELD_ENTRY( VMXVVMCS, u64GuestSysenterEsp), 506 SSMFIELD_ENTRY( VMXVVMCS, u64GuestSysenterEip), 507 SSMFIELD_ENTRY( VMXVVMCS, au64Reserved6), 508 509 SSMFIELD_ENTRY( VMXVVMCS, u64HostCr0), 510 SSMFIELD_ENTRY( VMXVVMCS, u64HostCr3), 511 SSMFIELD_ENTRY( VMXVVMCS, u64HostCr4), 512 SSMFIELD_ENTRY( VMXVVMCS, u64HostFsBase), 513 SSMFIELD_ENTRY( VMXVVMCS, u64HostGsBase), 514 SSMFIELD_ENTRY( VMXVVMCS, u64HostTrBase), 515 SSMFIELD_ENTRY( VMXVVMCS, u64HostGdtrBase), 516 SSMFIELD_ENTRY( VMXVVMCS, u64HostIdtrBase), 517 SSMFIELD_ENTRY( VMXVVMCS, u64HostSysenterEsp), 518 SSMFIELD_ENTRY( VMXVVMCS, u64HostSysenterEip), 519 SSMFIELD_ENTRY( VMXVVMCS, u64HostRsp), 520 SSMFIELD_ENTRY( VMXVVMCS, u64HostRip), 521 SSMFIELD_ENTRY( VMXVVMCS, au64Reserved7), 522 SSMFIELD_ENTRY_TERM() 523 }; 524 328 525 /** Saved state field descriptors for CPUMCTX. */ 329 526 static const SSMFIELD g_aCpumX87Fields[] = … … 928 1125 for (VMCPUID i = 0; i < pVM->cCpus; i++) 929 1126 { 930 PVMCPU pVCpu = &pVM->aCpus[i]; 931 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3) 932 { 933 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3, VMX_V_VMCS_PAGES); 934 pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3 = NULL; 935 } 936 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3) 937 { 938 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3, VMX_V_VMCS_PAGES); 939 pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3 = NULL; 940 } 941 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3) 942 { 943 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3, VMX_V_VIRT_APIC_PAGES); 944 pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3 = NULL; 945 } 946 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3) 947 { 948 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES); 949 pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3 = NULL; 950 } 951 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3) 952 { 953 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES); 954 pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3 = NULL; 955 } 956 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3) 957 { 958 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3, VMX_V_AUTOMSR_AREA_PAGES); 959 pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3 = NULL; 960 } 961 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3) 962 { 963 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_PAGES); 964 pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3 = NULL; 965 } 966 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3) 967 { 968 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES); 969 pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3 = NULL; 1127 PVMCPU pVCpu = &pVM->aCpus[i]; 1128 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 1129 1130 if (pCtx->hwvirt.vmx.pVmcsR3) 1131 { 1132 SUPR3ContFree(pCtx->hwvirt.vmx.pVmcsR3, VMX_V_VMCS_PAGES); 1133 pCtx->hwvirt.vmx.pVmcsR3 = NULL; 1134 } 1135 if (pCtx->hwvirt.vmx.pShadowVmcsR3) 1136 { 1137 SUPR3ContFree(pCtx->hwvirt.vmx.pShadowVmcsR3, VMX_V_VMCS_PAGES); 1138 pCtx->hwvirt.vmx.pShadowVmcsR3 = NULL; 1139 } 1140 if (pCtx->hwvirt.vmx.pvVmreadBitmapR3) 1141 { 1142 SUPR3ContFree(pCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES); 1143 pCtx->hwvirt.vmx.pvVmreadBitmapR3 = NULL; 1144 } 1145 if (pCtx->hwvirt.vmx.pvVmwriteBitmapR3) 1146 { 1147 SUPR3ContFree(pCtx->hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES); 1148 pCtx->hwvirt.vmx.pvVmwriteBitmapR3 = NULL; 1149 } 1150 if (pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3) 1151 { 1152 SUPR3ContFree(pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_PAGES); 1153 pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3 = NULL; 1154 } 1155 if (pCtx->hwvirt.vmx.pExitMsrStoreAreaR3) 1156 { 1157 SUPR3ContFree(pCtx->hwvirt.vmx.pExitMsrStoreAreaR3, VMX_V_AUTOMSR_AREA_PAGES); 1158 pCtx->hwvirt.vmx.pExitMsrStoreAreaR3 = NULL; 1159 } 1160 if (pCtx->hwvirt.vmx.pExitMsrLoadAreaR3) 1161 { 1162 SUPR3ContFree(pCtx->hwvirt.vmx.pExitMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_PAGES); 1163 pCtx->hwvirt.vmx.pExitMsrLoadAreaR3 = NULL; 1164 } 1165 if (pCtx->hwvirt.vmx.pvMsrBitmapR3) 1166 { 1167 SUPR3ContFree(pCtx->hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_PAGES); 1168 pCtx->hwvirt.vmx.pvMsrBitmapR3 = NULL; 1169 } 1170 if (pCtx->hwvirt.vmx.pvIoBitmapR3) 1171 { 1172 SUPR3ContFree(pCtx->hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES); 1173 pCtx->hwvirt.vmx.pvIoBitmapR3 = NULL; 970 1174 } 971 1175 } … … 982 1186 { 983 1187 int rc = VINF_SUCCESS; 984 LogRel(("CPUM: Allocating %u pages for the nested-guest VMCS and related structures\n", 985 pVM->cCpus * ( VMX_V_VMCS_PAGES + VMX_V_VIRT_APIC_PAGES + VMX_V_VMREAD_VMWRITE_BITMAP_PAGES * 2 986 + VMX_V_AUTOMSR_AREA_PAGES))); 1188 uint32_t const cPages = (2 * VMX_V_VMCS_PAGES) 1189 + VMX_V_VIRT_APIC_PAGES 1190 + (2 * VMX_V_VMREAD_VMWRITE_BITMAP_SIZE) 1191 + (3 * VMX_V_AUTOMSR_AREA_SIZE) 1192 + VMX_V_MSR_BITMAP_SIZE 1193 + (VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE); 1194 LogRel(("CPUM: Allocating %u pages for the nested-guest VMCS and related structures\n", pVM->cCpus * cPages)); 987 1195 for (VMCPUID i = 0; i < pVM->cCpus; i++) 988 1196 { 989 PVMCPU pVCpu = &pVM->aCpus[i]; 990 pVCpu->cpum.s.Guest.hwvirt.enmHwvirt = CPUMHWVIRT_VMX; 1197 PVMCPU pVCpu = &pVM->aCpus[i]; 1198 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 1199 pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_VMX; 991 1200 992 1201 /* … … 994 1203 */ 995 1204 Assert(VMX_V_VMCS_PAGES == 1); 996 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3); 997 rc = SUPR3PageAllocEx(VMX_V_VMCS_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3, 998 &pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR0, NULL /* paPages */); 999 if (RT_FAILURE(rc)) 1000 { 1001 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3); 1205 pCtx->hwvirt.vmx.pVmcsR3 = (PVMXVVMCS)SUPR3ContAlloc(VMX_V_VMCS_PAGES, 1206 &pCtx->hwvirt.vmx.pVmcsR0, 1207 &pCtx->hwvirt.vmx.HCPhysVmcs); 1208 if (pCtx->hwvirt.vmx.pVmcsR3) 1209 { /* likely */ } 1210 else 1211 { 1002 1212 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMCS\n", pVCpu->idCpu, VMX_V_VMCS_PAGES)); 1003 1213 break; … … 1008 1218 */ 1009 1219 Assert(VMX_V_VMCS_PAGES == 1); 1010 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3); 1011 rc = SUPR3PageAllocEx(VMX_V_VMCS_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3, 1012 &pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR0, NULL /* paPages */); 1013 if (RT_FAILURE(rc)) 1014 { 1015 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3); 1220 pCtx->hwvirt.vmx.pShadowVmcsR3 = (PVMXVVMCS)SUPR3ContAlloc(VMX_V_VMCS_PAGES, 1221 &pCtx->hwvirt.vmx.pShadowVmcsR0, 1222 &pCtx->hwvirt.vmx.HCPhysShadowVmcs); 1223 if (pCtx->hwvirt.vmx.pShadowVmcsR3) 1224 { /* likely */ } 1225 else 1226 { 1016 1227 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's shadow VMCS\n", pVCpu->idCpu, VMX_V_VMCS_PAGES)); 1017 break;1018 }1019 1020 /*1021 * Allocate the Virtual-APIC page.1022 */1023 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3);1024 rc = SUPR3PageAllocEx(VMX_V_VIRT_APIC_PAGES, 0 /* fFlags */, &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3,1025 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR0, NULL /* paPages */);1026 if (RT_FAILURE(rc))1027 {1028 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3);1029 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's Virtual-APIC page\n", pVCpu->idCpu,1030 VMX_V_VIRT_APIC_PAGES));1031 1228 break; 1032 1229 } … … 1035 1232 * Allocate the VMREAD-bitmap. 1036 1233 */ 1037 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3); 1038 rc = SUPR3PageAllocEx(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES, 0 /* fFlags */, &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3, 1039 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR0, NULL /* paPages */); 1040 if (RT_FAILURE(rc)) 1041 { 1042 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3); 1234 pCtx->hwvirt.vmx.pvVmreadBitmapR3 = SUPR3ContAlloc(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES, 1235 &pCtx->hwvirt.vmx.pvVmreadBitmapR0, 1236 &pCtx->hwvirt.vmx.HCPhysVmreadBitmap); 1237 if (pCtx->hwvirt.vmx.pvVmreadBitmapR3) 1238 { /* likely */ } 1239 else 1240 { 1043 1241 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMREAD-bitmap\n", pVCpu->idCpu, 1044 1242 VMX_V_VMREAD_VMWRITE_BITMAP_PAGES)); … … 1049 1247 * Allocatge the VMWRITE-bitmap. 1050 1248 */ 1051 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3);1052 rc = SUPR3PageAllocEx(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES, 0 /* fFlags */,1053 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3,1054 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR0, NULL /* paPages */);1055 if (RT_FAILURE(rc))1056 {1057 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3);1249 pCtx->hwvirt.vmx.pvVmwriteBitmapR3 = SUPR3ContAlloc(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES, 1250 &pCtx->hwvirt.vmx.pvVmwriteBitmapR0, 1251 &pCtx->hwvirt.vmx.HCPhysVmwriteBitmap); 1252 if (pCtx->hwvirt.vmx.pvVmwriteBitmapR3) 1253 { /* likely */ } 1254 else 1255 { 1058 1256 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMWRITE-bitmap\n", pVCpu->idCpu, 1059 1257 VMX_V_VMREAD_VMWRITE_BITMAP_PAGES)); … … 1062 1260 1063 1261 /* 1064 * Allocate the MSR auto-load/storearea.1262 * Allocate the VM-entry MSR-load area. 1065 1263 */ 1066 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3); 1067 rc = SUPR3PageAllocEx(VMX_V_AUTOMSR_AREA_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3, 1068 &pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR0, NULL /* paPages */); 1069 if (RT_FAILURE(rc)) 1070 { 1071 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3); 1072 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's auto-load/store MSR area\n", pVCpu->idCpu, 1264 pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3 = (PVMXAUTOMSR)SUPR3ContAlloc(VMX_V_AUTOMSR_AREA_PAGES, 1265 &pCtx->hwvirt.vmx.pEntryMsrLoadAreaR0, 1266 &pCtx->hwvirt.vmx.HCPhysEntryMsrLoadArea); 1267 if (pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3) 1268 { /* likely */ } 1269 else 1270 { 1271 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VM-entry MSR-load area\n", pVCpu->idCpu, 1272 VMX_V_AUTOMSR_AREA_PAGES)); 1273 break; 1274 } 1275 1276 /* 1277 * Allocate the VM-exit MSR-store area. 1278 */ 1279 pCtx->hwvirt.vmx.pExitMsrStoreAreaR3 = (PVMXAUTOMSR)SUPR3ContAlloc(VMX_V_AUTOMSR_AREA_PAGES, 1280 &pCtx->hwvirt.vmx.pExitMsrStoreAreaR0, 1281 &pCtx->hwvirt.vmx.HCPhysExitMsrStoreArea); 1282 if (pCtx->hwvirt.vmx.pExitMsrStoreAreaR3) 1283 { /* likely */ } 1284 else 1285 { 1286 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VM-exit MSR-store area\n", pVCpu->idCpu, 1287 VMX_V_AUTOMSR_AREA_PAGES)); 1288 break; 1289 } 1290 1291 /* 1292 * Allocate the VM-exit MSR-load area. 1293 */ 1294 pCtx->hwvirt.vmx.pExitMsrLoadAreaR3 = (PVMXAUTOMSR)SUPR3ContAlloc(VMX_V_AUTOMSR_AREA_PAGES, 1295 &pCtx->hwvirt.vmx.pExitMsrLoadAreaR0, 1296 &pCtx->hwvirt.vmx.HCPhysExitMsrLoadArea); 1297 if (pCtx->hwvirt.vmx.pExitMsrLoadAreaR3) 1298 { /* likely */ } 1299 else 1300 { 1301 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VM-exit MSR-load area\n", pVCpu->idCpu, 1073 1302 VMX_V_AUTOMSR_AREA_PAGES)); 1074 1303 break; … … 1078 1307 * Allocate the MSR bitmap. 1079 1308 */ 1080 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3); 1081 rc = SUPR3PageAllocEx(VMX_V_MSR_BITMAP_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3, 1082 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR0, NULL /* paPages */); 1083 if (RT_FAILURE(rc)) 1084 { 1085 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3); 1309 pCtx->hwvirt.vmx.pvMsrBitmapR3 = SUPR3ContAlloc(VMX_V_MSR_BITMAP_PAGES, 1310 &pCtx->hwvirt.vmx.pvMsrBitmapR0, 1311 &pCtx->hwvirt.vmx.HCPhysMsrBitmap); 1312 if (pCtx->hwvirt.vmx.pvMsrBitmapR3) 1313 { /* likely */ } 1314 else 1315 { 1086 1316 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's MSR bitmap\n", pVCpu->idCpu, 1087 1317 VMX_V_MSR_BITMAP_PAGES)); … … 1092 1322 * Allocate the I/O bitmaps (A and B). 1093 1323 */ 1094 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3);1095 rc = SUPR3PageAllocEx(VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES, 0 /* fFlags */,1096 (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3,1097 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR0, NULL /* paPages */);1098 if (RT_FAILURE(rc))1099 {1100 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3);1324 pCtx->hwvirt.vmx.pvIoBitmapR3 = SUPR3ContAlloc(VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES, 1325 &pCtx->hwvirt.vmx.pvIoBitmapR0, 1326 &pCtx->hwvirt.vmx.HCPhysIoBitmap); 1327 if (pCtx->hwvirt.vmx.pvIoBitmapR3) 1328 { /* likely */ } 1329 else 1330 { 1101 1331 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's I/O bitmaps\n", pVCpu->idCpu, 1102 1332 VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES)); 1103 1333 break; 1104 1334 } 1335 1336 /* 1337 * Zero out all allocated pages (should compress well for saved-state). 1338 */ 1339 memset(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs), 0, VMX_V_VMCS_SIZE); 1340 memset(pCtx->hwvirt.vmx.CTX_SUFF(pShadowVmcs), 0, VMX_V_VMCS_SIZE); 1341 memset(pCtx->hwvirt.vmx.CTX_SUFF(pvVmreadBitmap), 0, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 1342 memset(pCtx->hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap), 0, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 1343 memset(pCtx->hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea), 0, VMX_V_AUTOMSR_AREA_SIZE); 1344 memset(pCtx->hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea), 0, VMX_V_AUTOMSR_AREA_SIZE); 1345 memset(pCtx->hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea), 0, VMX_V_AUTOMSR_AREA_SIZE); 1346 memset(pCtx->hwvirt.vmx.CTX_SUFF(pvMsrBitmap), 0, VMX_V_MSR_BITMAP_SIZE); 1347 memset(pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap), 0, VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE); 1105 1348 } 1106 1349 … … 1454 1697 1455 1698 1456 #if 01457 1699 /** 1458 1700 * Checks whether the given guest CPU VMX features are compatible with the provided … … 1468 1710 static bool cpumR3AreVmxCpuFeaturesCompatible(PVM pVM, PCCPUMFEATURES pBase, PCCPUMFEATURES pGst) 1469 1711 { 1470 if (cpumR3IsHwAssist VmxNstGstExecAllowed(pVM))1712 if (cpumR3IsHwAssistNstGstExecAllowed(pVM)) 1471 1713 { 1472 1714 uint64_t const fBase = ((uint64_t)pBase->fVmxInsOutInfo << 0) | ((uint64_t)pBase->fVmxExtIntExit << 1) … … 1537 1779 1538 1780 if ((fBase | fGst) != fBase) 1781 { 1782 LogRel(("CPUM: Host VMX features are incompatible with those from the saved state. fBase=%#RX64 fGst=%#RX64\n", 1783 fBase, fGst)); 1539 1784 return false; 1785 } 1540 1786 return true; 1541 1787 } 1542 1788 return true; 1543 1789 } 1544 #endif1545 1790 1546 1791 … … 2336 2581 SSMR3PutBool(pSSM, pGstCtx->hwvirt.fGif); 2337 2582 } 2583 if (pVM->cpum.s.GuestFeatures.fVmx) 2584 { 2585 Assert(pGstCtx->hwvirt.vmx.CTX_SUFF(pVmcs)); 2586 SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysVmxon); 2587 SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysVmcs); 2588 SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysShadowVmcs); 2589 SSMR3PutU32(pSSM, (uint32_t)pGstCtx->hwvirt.vmx.enmDiag); 2590 SSMR3PutU32(pSSM, (uint32_t)pGstCtx->hwvirt.vmx.enmAbort); 2591 SSMR3PutU32(pSSM, pGstCtx->hwvirt.vmx.uAbortAux); 2592 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fInVmxRootMode); 2593 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fInVmxNonRootMode); 2594 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fInterceptEvents); 2595 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fNmiUnblockingIret); 2596 SSMR3PutStructEx(pSSM, pGstCtx->hwvirt.vmx.pVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2597 SSMR3PutStructEx(pSSM, pGstCtx->hwvirt.vmx.pShadowVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2598 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2599 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2600 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pEntryMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2601 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pExitMsrStoreAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2602 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pExitMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2603 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_SIZE); 2604 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE); 2605 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.uFirstPauseLoopTick); 2606 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.uPrevPauseTick); 2607 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.uEntryTick); 2608 SSMR3PutU16(pSSM, pGstCtx->hwvirt.vmx.offVirtApicWrite); 2609 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fVirtNmiBlocking); 2610 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64FeatCtrl); 2611 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Basic); 2612 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.PinCtls.u); 2613 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.ProcCtls.u); 2614 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.ProcCtls2.u); 2615 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.ExitCtls.u); 2616 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.EntryCtls.u); 2617 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TruePinCtls.u); 2618 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TrueProcCtls.u); 2619 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TrueEntryCtls.u); 2620 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TrueExitCtls.u); 2621 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Misc); 2622 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed0); 2623 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed1); 2624 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed0); 2625 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed1); 2626 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64VmcsEnum); 2627 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64VmFunc); 2628 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64EptVpidCaps); 2629 } 2338 2630 SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags); 2339 2631 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged); … … 2368 2660 * Validate version. 2369 2661 */ 2370 if ( uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 2662 if ( uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM 2663 && uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 2371 2664 && uVersion != CPUM_SAVED_STATE_VERSION_XSAVE 2372 2665 && uVersion != CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT … … 2577 2870 } 2578 2871 } 2579 /** @todo NSTVMX: Load VMX state. */ 2872 if (uVersion >= CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM) 2873 { 2874 if (pVM->cpum.s.GuestFeatures.fVmx) 2875 { 2876 Assert(pGstCtx->hwvirt.vmx.CTX_SUFF(pVmcs)); 2877 SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysVmxon); 2878 SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysVmcs); 2879 SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysShadowVmcs); 2880 SSMR3GetU32(pSSM, (uint32_t *)&pGstCtx->hwvirt.vmx.enmDiag); 2881 SSMR3GetU32(pSSM, (uint32_t *)&pGstCtx->hwvirt.vmx.enmAbort); 2882 SSMR3GetU32(pSSM, &pGstCtx->hwvirt.vmx.uAbortAux); 2883 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fInVmxRootMode); 2884 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fInVmxNonRootMode); 2885 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fInterceptEvents); 2886 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fNmiUnblockingIret); 2887 SSMR3GetStructEx(pSSM, pGstCtx->hwvirt.vmx.pVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2888 SSMR3GetStructEx(pSSM, pGstCtx->hwvirt.vmx.pShadowVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2889 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2890 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2891 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pEntryMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2892 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pExitMsrStoreAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2893 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pExitMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2894 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_SIZE); 2895 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE); 2896 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.uFirstPauseLoopTick); 2897 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.uPrevPauseTick); 2898 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.uEntryTick); 2899 SSMR3GetU16(pSSM, &pGstCtx->hwvirt.vmx.offVirtApicWrite); 2900 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fVirtNmiBlocking); 2901 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64FeatCtrl); 2902 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Basic); 2903 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.PinCtls.u); 2904 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.ProcCtls.u); 2905 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.ProcCtls2.u); 2906 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.ExitCtls.u); 2907 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.EntryCtls.u); 2908 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TruePinCtls.u); 2909 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TrueProcCtls.u); 2910 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TrueEntryCtls.u); 2911 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TrueExitCtls.u); 2912 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Misc); 2913 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed0); 2914 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed1); 2915 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed0); 2916 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed1); 2917 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64VmcsEnum); 2918 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64VmFunc); 2919 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64EptVpidCaps); 2920 } 2921 } 2580 2922 } 2581 2923 else … … 2678 3020 2679 3021 /* 2680 * Guest CPUIDs .3022 * Guest CPUIDs (and VMX MSR features). 2681 3023 */ 2682 3024 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2) … … 2684 3026 CPUMMSRS GuestMsrs; 2685 3027 RT_ZERO(GuestMsrs); 2686 if (pVM->cpum.s.GuestFeatures.fVmx) 3028 3029 CPUMFEATURES BaseFeatures; 3030 bool const fVmxGstFeat = pVM->cpum.s.GuestFeatures.fVmx; 3031 if (fVmxGstFeat) 3032 { 3033 /* 3034 * At this point the MSRs in the guest CPU-context are loaded with the guest VMX MSRs from the saved state. 3035 * However the VMX sub-features have not been exploded yet. So cache the base (host derived) VMX features 3036 * here so we can compare them for compatibility after exploding guest features. 3037 */ 3038 BaseFeatures = pVM->cpum.s.GuestFeatures; 3039 3040 /* Use the VMX MSR features from the saved state while exploding guest features. */ 2687 3041 GuestMsrs.hwvirt.vmx = pVM->aCpus[0].cpum.s.Guest.hwvirt.vmx.Msrs; 2688 return cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs); 3042 } 3043 3044 /* Load CPUID and explode guest features. */ 3045 rc = cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs); 3046 if (fVmxGstFeat) 3047 { 3048 /* 3049 * Check if the exploded VMX features from the saved state are compatible with the host-derived features 3050 * we cached earlier (above). The is required if we use hardware-assisted nested-guest execution with 3051 * VMX features presented to the guest. 3052 */ 3053 bool const fIsCompat = cpumR3AreVmxCpuFeaturesCompatible(pVM, &BaseFeatures, &pVM->cpum.s.GuestFeatures); 3054 if (!fIsCompat) 3055 return VERR_CPUM_INVALID_HWVIRT_FEAT_COMBO; 3056 } 3057 return rc; 2689 3058 } 2690 3059 return cpumR3LoadCpuIdPre32(pVM, pSSM, uVersion); … … 3716 4085 pHlp->pfnPrintf(pHlp, " uFirstPauseLoopTick = %RX64\n", pCtx->hwvirt.vmx.uFirstPauseLoopTick); 3717 4086 pHlp->pfnPrintf(pHlp, " uPrevPauseTick = %RX64\n", pCtx->hwvirt.vmx.uPrevPauseTick); 3718 pHlp->pfnPrintf(pHlp, " u VmentryTick = %RX64\n", pCtx->hwvirt.vmx.uVmentryTick);4087 pHlp->pfnPrintf(pHlp, " uEntryTick = %RX64\n", pCtx->hwvirt.vmx.uEntryTick); 3719 4088 pHlp->pfnPrintf(pHlp, " offVirtApicWrite = %#RX16\n", pCtx->hwvirt.vmx.offVirtApicWrite); 4089 pHlp->pfnPrintf(pHlp, " fVirtNmiBlocking = %RTbool\n", pCtx->hwvirt.vmx.fVirtNmiBlocking); 3720 4090 pHlp->pfnPrintf(pHlp, " VMCS cache:\n"); 3721 4091 cpumR3InfoVmxVmcs(pHlp, pCtx->hwvirt.vmx.pVmcsR3, " " /* pszPrefix */); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r77611 r78220 2146 2146 { 2147 2147 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu)); 2148 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);2149 UPDATE_RC();2148 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE) 2149 UPDATE_RC(); 2150 2150 } 2151 2151 … … 2169 2169 { 2170 2170 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu)); 2171 if (rc2 == VINF_VMX_INTERCEPT_NOT_ACTIVE) 2172 rc2 = VINF_SUCCESS; 2173 UPDATE_RC(); 2171 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE) 2172 UPDATE_RC(); 2174 2173 } 2175 2174 … … 2198 2197 * NMIs (take priority over external interrupts). 2199 2198 */ 2200 Assert(!HMR3IsEventPending(pVCpu));2201 2199 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI) 2202 2200 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r77591 r78220 734 734 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF, "/HM/CPU%d/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception."); 735 735 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk, "/HM/CPU%d/Exit/Trap/Gst/Other", "Other guest exceptions."); 736 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt, "/HM/CPU%d/Exit/Instr/Hlt", "HLT instruction."); 737 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr", "RDMSR instruction."); 738 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr", "WRMSR instruction."); 739 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait, "/HM/CPU%d/Exit/Instr/Mwait", "MWAIT instruction."); 740 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor, "/HM/CPU%d/Exit/Instr/Monitor", "MONITOR instruction."); 736 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr", "MSR read."); 737 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr", "MSR write."); 741 738 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite, "/HM/CPU%d/Exit/Instr/DR-Write", "Debug register write."); 742 739 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead, "/HM/CPU%d/Exit/Instr/DR-Read", "Debug register read."); … … 753 750 HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts, "/HM/CPU%d/Exit/Instr/CLTS", "CLTS instruction."); 754 751 HM_REG_COUNTER(&pVCpu->hm.s.StatExitLmsw, "/HM/CPU%d/Exit/Instr/LMSW", "LMSW instruction."); 755 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli, "/HM/CPU%d/Exit/Instr/Cli", "CLI instruction.");756 HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti, "/HM/CPU%d/Exit/Instr/Sti", "STI instruction.");757 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf, "/HM/CPU%d/Exit/Instr/Pushf", "PUSHF instruction.");758 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf, "/HM/CPU%d/Exit/Instr/Popf", "POPF instruction.");759 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret, "/HM/CPU%d/Exit/Instr/Iret", "IRET instruction.");760 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt, "/HM/CPU%d/Exit/Instr/Int", "INT instruction.");761 752 HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess, "/HM/CPU%d/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access."); 762 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/I O/Write", "I/O write.");763 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead, "/HM/CPU%d/Exit/I O/Read", "I/O read.");764 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite, "/HM/CPU%d/Exit/I O/WriteString", "String I/O write.");765 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead, "/HM/CPU%d/Exit/I O/ReadString", "String I/O read.");766 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow, "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts again.");753 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/Instr/IO/Write", "I/O write."); 754 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead, "/HM/CPU%d/Exit/Instr/IO/Read", "I/O read."); 755 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite, "/HM/CPU%d/Exit/Instr/IO/WriteString", "String I/O write."); 756 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead, "/HM/CPU%d/Exit/Instr/IO/ReadString", "String I/O read."); 757 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow, "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts."); 767 758 HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt, "/HM/CPU%d/Exit/ExtInt", "Physical maskable interrupt (host)."); 768 759 #endif … … 772 763 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTprBelowThreshold, "/HM/CPU%d/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest."); 773 764 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTaskSwitch, "/HM/CPU%d/Exit/TaskSwitch", "Task switch."); 774 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMtf, "/HM/CPU%d/Exit/MonitorTrapFlag", "Monitor Trap Flag.");775 765 HM_REG_COUNTER(&pVCpu->hm.s.StatExitApicAccess, "/HM/CPU%d/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page."); 776 766 … … 961 951 PVMCPU pVCpu = &pVM->aCpus[i]; 962 952 963 PVMXVMCS BATCHCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsBatchCache;953 PVMXVMCSCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsCache; 964 954 strcpy((char *)pVmcsCache->aMagic, "VMCSCACHE Magic"); 965 955 pVmcsCache->uMagic = UINT64_C(0xdeadbeefdeadbeef); … … 1493 1483 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); 1494 1484 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4)); 1495 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64Host Efer));1485 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64HostMsrEfer)); 1496 1486 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl)); 1497 1487 … … 1527 1517 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1528 1518 { 1529 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap)); 1530 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs)); 1531 } 1519 PCVMXVMCSINFO pVmcsInfo = &pVM->aCpus[i].hm.s.vmx.VmcsInfo; 1520 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVmcsInfo->HCPhysMsrBitmap)); 1521 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVmcsInfo->HCPhysVmcs)); 1522 } 1523 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1524 if (pVM->cpum.ro.GuestFeatures.fVmx) 1525 { 1526 LogRel(("HM: Nested-guest:\n")); 1527 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1528 { 1529 PCVMXVMCSINFO pVmcsInfoNstGst = &pVM->aCpus[i].hm.s.vmx.VmcsInfoNstGst; 1530 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVmcsInfoNstGst->HCPhysMsrBitmap)); 1531 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVmcsInfoNstGst->HCPhysVmcs)); 1532 } 1533 } 1534 #endif 1532 1535 1533 1536 /* … … 1681 1684 else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)) 1682 1685 { 1683 if (pVM->hm.s.vmx.u64Host Efer & MSR_K6_EFER_NXE)1686 if (pVM->hm.s.vmx.u64HostMsrEfer & MSR_K6_EFER_NXE) 1684 1687 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1685 1688 else … … 1974 1977 pVCpu->hm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR; 1975 1978 } 1979 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 1980 if (pVCpu->hm.s.paStatNestedExitReason) 1981 { 1982 MMHyperFree(pVM, pVCpu->hm.s.paStatNestedExitReason); 1983 pVCpu->hm.s.paStatNestedExitReason = NULL; 1984 pVCpu->hm.s.paStatNestedExitReasonR0 = NIL_RTR0PTR; 1985 } 1986 # endif 1976 1987 #endif 1977 1988 1978 1989 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 1979 memset(pVCpu->hm.s.vmx.Vmcs BatchCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VmcsBatchCache.aMagic));1980 pVCpu->hm.s.vmx.Vmcs BatchCache.uMagic = 0;1981 pVCpu->hm.s.vmx.Vmcs BatchCache.uPos = 0xffffffff;1990 memset(pVCpu->hm.s.vmx.VmcsCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VmcsCache.aMagic)); 1991 pVCpu->hm.s.vmx.VmcsCache.uMagic = 0; 1992 pVCpu->hm.s.vmx.VmcsCache.uPos = 0xffffffff; 1982 1993 #endif 1983 1994 } … … 1995 2006 VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu) 1996 2007 { 1997 /* Sync. entire state on VM reset R0-reentry. It's safe to reset2008 /* Sync. entire state on VM reset ring-0 re-entry. It's safe to reset 1998 2009 the HM flags here, all other EMTs are in ring-3. See VMR3Reset(). */ 1999 2010 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST; 2000 2011 2001 pVCpu->hm.s.fActive = false; 2002 pVCpu->hm.s.Event.fPending = false; 2003 pVCpu->hm.s.vmx.fWasInRealMode = true; 2004 pVCpu->hm.s.vmx.u64MsrApicBase = 0; 2005 pVCpu->hm.s.vmx.fSwitchedTo64on32 = false; 2012 pVCpu->hm.s.fActive = false; 2013 pVCpu->hm.s.Event.fPending = false; 2014 pVCpu->hm.s.vmx.u64GstMsrApicBase = 0; 2015 pVCpu->hm.s.vmx.VmcsInfo.fSwitchedTo64on32 = false; 2016 pVCpu->hm.s.vmx.VmcsInfo.fWasInRealMode = true; 2017 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 2018 if (pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmx) 2019 { 2020 pVCpu->hm.s.vmx.VmcsInfoNstGst.fSwitchedTo64on32 = false; 2021 pVCpu->hm.s.vmx.VmcsInfoNstGst.fWasInRealMode = true; 2022 } 2023 #endif 2006 2024 2007 2025 /* Reset the contents of the read cache. */ 2008 PVMXVMCS BATCHCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsBatchCache;2026 PVMXVMCSCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsCache; 2009 2027 for (unsigned j = 0; j < pVmcsCache->Read.cValidEntries; j++) 2010 2028 pVmcsCache->Read.aFieldVal[j] = 0; … … 2847 2865 2848 2866 /** 2849 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.2850 *2851 * @returns true if an internal event is pending, otherwise false.2852 * @param pVCpu The cross context virtual CPU structure.2853 */2854 VMMR3_INT_DECL(bool) HMR3IsEventPending(PVMCPU pVCpu)2855 {2856 return HMIsEnabled(pVCpu->pVMR3)2857 && pVCpu->hm.s.Event.fPending;2858 }2859 2860 2861 /**2862 2867 * Checks if the VMX-preemption timer is being used. 2863 2868 * … … 2884 2889 for (VMCPUID i = 0; i < pVM->cCpus; i++) 2885 2890 { 2886 PVMCPU pVCpu = &pVM->aCpus[i]; 2891 PVMCPU pVCpu = &pVM->aCpus[i]; 2892 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 2893 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs; 2887 2894 switch (iStatusCode) 2888 2895 { … … 2893 2900 2894 2901 case VERR_VMX_INVALID_VMCS_PTR: 2902 { 2895 2903 LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n")); 2896 LogRel(("HM: CPU[%u] Current pointer %#RGp vs %#RGp\n", i, pVCpu->hm.s.vmx.LastError.u64VmcsPhys, 2897 pVCpu->hm.s.vmx.HCPhysVmcs)); 2904 LogRel(("HM: CPU[%u] %s VMCS active\n", i, fNstGstVmcsActive ? "Nested-guest" : "Guest")); 2905 LogRel(("HM: CPU[%u] Current pointer %#RHp vs %#RHp\n", i, pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs, 2906 pVmcsInfo->HCPhysVmcs)); 2898 2907 LogRel(("HM: CPU[%u] Current VMCS version %#x\n", i, pVCpu->hm.s.vmx.LastError.u32VmcsRev)); 2899 2908 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idEnteredCpu)); 2900 2909 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idCurrentCpu)); 2901 2910 break; 2911 } 2902 2912 2903 2913 case VERR_VMX_UNABLE_TO_START_VM: 2904 2914 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n")); 2915 LogRel(("HM: CPU[%u] %s VMCS active\n", i, fNstGstVmcsActive ? "Nested-guest" : "Guest")); 2905 2916 LogRel(("HM: CPU[%u] Instruction error %#x\n", i, pVCpu->hm.s.vmx.LastError.u32InstrError)); 2906 2917 LogRel(("HM: CPU[%u] Exit reason %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason)); … … 2914 2925 else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS) 2915 2926 { 2916 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32PinCtls));2927 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", i, pVmcsInfo->u32PinCtls)); 2917 2928 { 2918 uint32_t const u32Val = pV Cpu->hm.s.vmx.Ctls.u32PinCtls;2929 uint32_t const u32Val = pVmcsInfo->u32PinCtls; 2919 2930 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_EXT_INT_EXIT ); 2920 2931 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_NMI_EXIT ); … … 2923 2934 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_POSTED_INT ); 2924 2935 } 2925 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ProcCtls));2936 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", i, pVmcsInfo->u32ProcCtls)); 2926 2937 { 2927 uint32_t const u32Val = pV Cpu->hm.s.vmx.Ctls.u32ProcCtls;2938 uint32_t const u32Val = pVmcsInfo->u32ProcCtls; 2928 2939 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INT_WINDOW_EXIT ); 2929 2940 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TSC_OFFSETTING); … … 2948 2959 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_SECONDARY_CTLS); 2949 2960 } 2950 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ProcCtls2));2961 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", i, pVmcsInfo->u32ProcCtls2)); 2951 2962 { 2952 uint32_t const u32Val = pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2;2963 uint32_t const u32Val = pVmcsInfo->u32ProcCtls2; 2953 2964 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_APIC_ACCESS ); 2954 2965 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT ); … … 2974 2985 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_TSC_SCALING ); 2975 2986 } 2976 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32EntryCtls));2987 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", i, pVmcsInfo->u32EntryCtls)); 2977 2988 { 2978 uint32_t const u32Val = pV Cpu->hm.s.vmx.Ctls.u32EntryCtls;2989 uint32_t const u32Val = pVmcsInfo->u32EntryCtls; 2979 2990 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_DEBUG ); 2980 2991 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_IA32E_MODE_GUEST ); … … 2985 2996 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_EFER_MSR ); 2986 2997 } 2987 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ExitCtls));2998 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", i, pVmcsInfo->u32ExitCtls)); 2988 2999 { 2989 uint32_t const u32Val = pV Cpu->hm.s.vmx.Ctls.u32ExitCtls;3000 uint32_t const u32Val = pVmcsInfo->u32ExitCtls; 2990 3001 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_DEBUG ); 2991 3002 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE ); … … 2998 3009 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER ); 2999 3010 } 3000 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysMsrBitmap)); 3001 LogRel(("HM: CPU[%u] HCPhysGuestMsr %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysGuestMsr)); 3002 LogRel(("HM: CPU[%u] HCPhysHostMsr %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysHostMsr)); 3003 LogRel(("HM: CPU[%u] cMsrs %u\n", i, pVCpu->hm.s.vmx.cMsrs)); 3011 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", i, pVmcsInfo->HCPhysMsrBitmap)); 3012 LogRel(("HM: CPU[%u] HCPhysGuestMsrLoad %#RHp\n", i, pVmcsInfo->HCPhysGuestMsrLoad)); 3013 LogRel(("HM: CPU[%u] HCPhysGuestMsrStore %#RHp\n", i, pVmcsInfo->HCPhysGuestMsrStore)); 3014 LogRel(("HM: CPU[%u] HCPhysHostMsrLoad %#RHp\n", i, pVmcsInfo->HCPhysHostMsrLoad)); 3015 LogRel(("HM: CPU[%u] cEntryMsrLoad %u\n", i, pVmcsInfo->cEntryMsrLoad)); 3016 LogRel(("HM: CPU[%u] cExitMsrStore %u\n", i, pVmcsInfo->cExitMsrStore)); 3017 LogRel(("HM: CPU[%u] cExitMsrLoad %u\n", i, pVmcsInfo->cExitMsrLoad)); 3004 3018 } 3005 3019 /** @todo Log VM-entry event injection control fields … … 3249 3263 if (pVM->hm.s.vmx.fSupported) 3250 3264 { 3251 bool const fRealOnV86Active = pVCpu->hm.s.vmx.RealMode.fRealOnV86Active; 3265 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 3266 bool const fRealOnV86Active = pVmcsInfo->RealMode.fRealOnV86Active; 3267 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs; 3268 3269 pHlp->pfnPrintf(pHlp, " %s VMCS active\n", fNstGstVmcsActive ? "Nested-guest" :" Guest"); 3252 3270 pHlp->pfnPrintf(pHlp, " Real-on-v86 active = %RTbool\n", fRealOnV86Active); 3253 3271 if (fRealOnV86Active) 3254 3272 { 3255 pHlp->pfnPrintf(pHlp, " EFlags = %#x\n", pV Cpu->hm.s.vmx.RealMode.Eflags.u32);3256 pHlp->pfnPrintf(pHlp, " Attr CS = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrCS.u);3257 pHlp->pfnPrintf(pHlp, " Attr SS = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrSS.u);3258 pHlp->pfnPrintf(pHlp, " Attr DS = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrDS.u);3259 pHlp->pfnPrintf(pHlp, " Attr ES = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrES.u);3260 pHlp->pfnPrintf(pHlp, " Attr FS = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrFS.u);3261 pHlp->pfnPrintf(pHlp, " Attr GS = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrGS.u);3273 pHlp->pfnPrintf(pHlp, " EFlags = %#x\n", pVmcsInfo->RealMode.Eflags.u32); 3274 pHlp->pfnPrintf(pHlp, " Attr CS = %#x\n", pVmcsInfo->RealMode.AttrCS.u); 3275 pHlp->pfnPrintf(pHlp, " Attr SS = %#x\n", pVmcsInfo->RealMode.AttrSS.u); 3276 pHlp->pfnPrintf(pHlp, " Attr DS = %#x\n", pVmcsInfo->RealMode.AttrDS.u); 3277 pHlp->pfnPrintf(pHlp, " Attr ES = %#x\n", pVmcsInfo->RealMode.AttrES.u); 3278 pHlp->pfnPrintf(pHlp, " Attr FS = %#x\n", pVmcsInfo->RealMode.AttrFS.u); 3279 pHlp->pfnPrintf(pHlp, " Attr GS = %#x\n", pVmcsInfo->RealMode.AttrGS.u); 3262 3280 } 3263 3281 } -
trunk/src/VBox/VMM/include/CPUMInternal.h
r76678 r78220 121 121 * @{ */ 122 122 /** The current saved state version. */ 123 #define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 123 #define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM 124 /** The saved state version including VMX hardware virtualization state (IEM only 125 * execution). */ 126 #define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM 19 124 127 /** The saved state version including SVM hardware virtualization state. */ 125 128 #define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 18 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r76553 r78220 253 253 alignb 8 254 254 .Guest.hwvirt.svm.HCPhysVmcb RTHCPHYS_RES 1 255 .Guest.hwvirt.svm. u64Padding0 resq 19255 .Guest.hwvirt.svm.au64Padding0 resq 33 256 256 .Guest.hwvirt.enmHwvirt resd 1 257 257 .Guest.hwvirt.fGif resb 1 … … 543 543 alignb 8 544 544 .Hyper.hwvirt.svm.HCPhysVmcb RTHCPHYS_RES 1 545 .Hyper.hwvirt.svm. u64Padding0 resq 19545 .Hyper.hwvirt.svm.au64Padding0 resq 33 546 546 .Hyper.hwvirt.enmHwvirt resd 1 547 547 .Hyper.hwvirt.fGif resb 1 -
trunk/src/VBox/VMM/include/HMInternal.h
r77716 r78220 142 142 #define HM_CHANGED_KEEPER_STATE_MASK UINT64_C(0xffff000000000000) 143 143 144 /** @todo r=ramshankar: Remove "GUEST" from XCPT_INTERCEPTS. */ 144 145 #define HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS UINT64_C(0x0001000000000000) 145 146 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS UINT64_C(0x0002000000000000) 146 147 #define HM_CHANGED_VMX_GUEST_LAZY_MSRS UINT64_C(0x0004000000000000) 147 #define HM_CHANGED_VMX_ENTRY_CTLS UINT64_C(0x0008000000000000) 148 #define HM_CHANGED_VMX_EXIT_CTLS UINT64_C(0x0010000000000000) 149 #define HM_CHANGED_VMX_MASK UINT64_C(0x001f000000000000) 148 #define HM_CHANGED_VMX_ENTRY_EXIT_CTLS UINT64_C(0x0008000000000000) 149 #define HM_CHANGED_VMX_MASK UINT64_C(0x000f000000000000) 150 150 #define HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_DR_MASK \ 151 151 | HM_CHANGED_VMX_GUEST_LAZY_MSRS) 152 152 153 /** @todo r=ramshankar: Remove "GUEST" from XCPT_INTERCEPTS. */ 153 154 #define HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS UINT64_C(0x0001000000000000) 154 155 #define HM_CHANGED_SVM_MASK UINT64_C(0x0001000000000000) … … 405 406 406 407 /** 408 * HM event. 409 * 410 * VT-x and AMD-V common event injection structure. 411 */ 412 typedef struct HMEVENT 413 { 414 /** Whether the event is pending. */ 415 uint32_t fPending; 416 /** The error-code associated with the event. */ 417 uint32_t u32ErrCode; 418 /** The length of the instruction in bytes (only relevant for software 419 * interrupts or software exceptions). */ 420 uint32_t cbInstr; 421 /** Alignment. */ 422 uint32_t u32Padding; 423 /** The encoded event (VM-entry interruption-information for VT-x or EVENTINJ 424 * for SVM). */ 425 uint64_t u64IntInfo; 426 /** Guest virtual address if this is a page-fault event. */ 427 RTGCUINTPTR GCPtrFaultAddress; 428 } HMEVENT; 429 /** Pointer to a HMEVENT struct. */ 430 typedef HMEVENT *PHMEVENT; 431 /** Pointer to a const HMEVENT struct. */ 432 typedef const HMEVENT *PCHMEVENT; 433 AssertCompileSizeAlignment(HMEVENT, 8); 434 435 /** 407 436 * HM VM Instance data. 408 437 * Changes to this must checked against the padding of the hm union in VM! … … 522 551 uint64_t u64HostSmmMonitorCtl; 523 552 /** Host EFER value (set by ring-0 VMX init) */ 524 uint64_t u64Host Efer;553 uint64_t u64HostMsrEfer; 525 554 /** Whether the CPU supports VMCS fields for swapping EFER. */ 526 555 bool fSupportsVmcsEfer; … … 600 629 601 630 /* Maximum number of cached entries. */ 602 #define VMX_VMCS_ BATCH_CACHE_MAX_ENTRY128631 #define VMX_VMCS_CACHE_MAX_ENTRY 128 603 632 604 633 /** 605 634 * Cache of a VMCS for batch reads or writes. 606 635 */ 607 typedef struct VMXVMCS BATCHCACHE636 typedef struct VMXVMCSCACHE 608 637 { 609 638 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 625 654 uint32_t cValidEntries; 626 655 uint32_t uAlignment; 627 uint32_t aField[VMX_VMCS_ BATCH_CACHE_MAX_ENTRY];628 uint64_t aFieldVal[VMX_VMCS_ BATCH_CACHE_MAX_ENTRY];656 uint32_t aField[VMX_VMCS_CACHE_MAX_ENTRY]; 657 uint64_t aFieldVal[VMX_VMCS_CACHE_MAX_ENTRY]; 629 658 } Write; 630 659 struct … … 632 661 uint32_t cValidEntries; 633 662 uint32_t uAlignment; 634 uint32_t aField[VMX_VMCS_ BATCH_CACHE_MAX_ENTRY];635 uint64_t aFieldVal[VMX_VMCS_ BATCH_CACHE_MAX_ENTRY];663 uint32_t aField[VMX_VMCS_CACHE_MAX_ENTRY]; 664 uint64_t aFieldVal[VMX_VMCS_CACHE_MAX_ENTRY]; 636 665 } Read; 637 666 #ifdef VBOX_STRICT … … 659 688 } ScratchPad; 660 689 #endif 661 } VMXVMCS BATCHCACHE;662 /** Pointer to VMXVMCS BATCHCACHE. */663 typedef VMXVMCS BATCHCACHE *PVMXVMCSBATCHCACHE;664 AssertCompileSizeAlignment(VMXVMCS BATCHCACHE, 8);690 } VMXVMCSCACHE; 691 /** Pointer to VMXVMCSCACHE. */ 692 typedef VMXVMCSCACHE *PVMXVMCSCACHE; 693 AssertCompileSizeAlignment(VMXVMCSCACHE, 8); 665 694 666 695 /** … … 674 703 * @param pVCpu Pointer to the cross context per-CPU structure. 675 704 */ 676 typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCS BATCHCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);705 typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu); 677 706 /** Pointer to a VMX StartVM function. */ 678 707 typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM; … … 682 711 /** Pointer to a SVM VMRun function. */ 683 712 typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN; 684 685 /**686 * Cache of certain VMCS fields during execution of a guest or nested-guest.687 */688 typedef struct VMXVMCSCTLSCACHE689 {690 /** Cache of pin-based VM-execution controls. */691 uint32_t u32PinCtls;692 /** Cache of processor-based VM-execution controls. */693 uint32_t u32ProcCtls;694 /** Cache of secondary processor-based VM-execution controls. */695 uint32_t u32ProcCtls2;696 /** Cache of VM-entry controls. */697 uint32_t u32EntryCtls;698 /** Cache of VM-exit controls. */699 uint32_t u32ExitCtls;700 /** Cache of CR0 mask. */701 uint32_t u32Cr0Mask;702 /** Cache of CR4 mask. */703 uint32_t u32Cr4Mask;704 /** Cache of exception bitmap. */705 uint32_t u32XcptBitmap;706 /** Cache of TSC offset. */707 uint64_t u64TscOffset;708 } VMXVMCSCTLSCACHE;709 /** Pointer to a VMXVMCSCTLSCACHE struct. */710 typedef VMXVMCSCTLSCACHE *PVMXVMCSCTLSCACHE;711 /** Pointer to a VMXVMCSCTLSCACHE struct. */712 typedef const VMXVMCSCTLSCACHE *PCVMXVMCSCTLSCACHE;713 AssertCompileSizeAlignment(VMXVMCSCTLSCACHE, 8);714 713 715 714 /** … … 721 720 typedef struct VMXVMCSINFO 722 721 { 722 /** @name VMLAUNCH/VMRESUME information. 723 * @{ */ 724 /** Ring-0 pointer to the hardware-assisted VMX execution function. */ 725 PFNHMVMXSTARTVM pfnStartVM; 726 #if HC_ARCH_BITS == 32 727 uint32_t u32Alignment0; 728 #endif 729 /** @} */ 730 723 731 /** @name VMCS and related data structures. 724 732 * @{ */ … … 732 740 /** Host-physical address of the virtual APIC page. */ 733 741 RTHCPHYS HCPhysVirtApic; 734 /** Padding. */742 /** Alignment. */ 735 743 R0PTRTYPE(void *) pvAlignment0; 736 744 /** Host-virtual address of the virtual-APIC page. */ … … 744 752 R0PTRTYPE(void *) pvMsrBitmap; 745 753 746 /** Host-physical address of the VM-entry MSR-load and VM-exit MSR-store area. */ 747 RTHCPHYS HCPhysGuestMsr; 748 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area. */ 749 RTR0MEMOBJ hMemObjGuestMsr; 750 /** Host-virtual address of the VM-entry MSR-load and VM-exit MSR-store area. */ 751 R0PTRTYPE(void *) pvGuestMsr; 754 /** Host-physical address of the VM-entry MSR-load area. */ 755 RTHCPHYS HCPhysGuestMsrLoad; 756 /** R0 memory object of the VM-entry MSR-load area. */ 757 RTR0MEMOBJ hMemObjGuestMsrLoad; 758 /** Host-virtual address of the VM-entry MSR-load area. */ 759 R0PTRTYPE(void *) pvGuestMsrLoad; 760 761 /** Host-physical address of the VM-exit MSR-store area. */ 762 RTHCPHYS HCPhysGuestMsrStore; 763 /** R0 memory object of the VM-exit MSR-store area. */ 764 RTR0MEMOBJ hMemObjGuestMsrStore; 765 /** Host-virtual address of the VM-exit MSR-store area. */ 766 R0PTRTYPE(void *) pvGuestMsrStore; 752 767 753 768 /** Host-physical address of the VM-exit MSR-load area. */ 754 RTHCPHYS HCPhysHostMsr ;769 RTHCPHYS HCPhysHostMsrLoad; 755 770 /** R0 memory object for the VM-exit MSR-load area. */ 756 RTR0MEMOBJ hMemObjHostMsr ;771 RTR0MEMOBJ hMemObjHostMsrLoad; 757 772 /** Host-virtual address of the VM-exit MSR-load area. */ 758 R0PTRTYPE(void *) pvHostMsr ;773 R0PTRTYPE(void *) pvHostMsrLoad; 759 774 760 775 /** Host-physical address of the EPTP. */ 761 776 RTHCPHYS HCPhysEPTP; 777 /** Number of guest MSRs in the VM-entry MSR-load area. */ 778 uint32_t cEntryMsrLoad; 779 /** Number of guest MSRs in the VM-exit MSR-store area. */ 780 uint32_t cExitMsrStore; 781 /** Number of host MSRs in the VM-exit MSR-load area. */ 782 uint32_t cExitMsrLoad; 783 /** Padding. */ 784 uint32_t u32Padding0; 762 785 /** @} */ 763 786 764 787 /** @name Auxiliary information. 765 788 * @{ */ 766 /** Number of guest/host MSR pairs in the auto-load/store area. */ 767 uint32_t cMsrs; 768 /** The VMCS state, see VMX_V_VMCS_STATE_XXX. */ 789 /** The VMCS launch state, see VMX_V_VMCS_LAUNCH_STATE_XXX. */ 769 790 uint32_t fVmcsState; 791 /** Set if guest was executing in real mode (extra checks). */ 792 bool fWasInRealMode; 793 /** Set if the guest switched to 64-bit mode on a 32-bit host. */ 794 bool fSwitchedTo64on32; 795 /** Padding. */ 796 bool afPadding0[2]; 770 797 /** @} */ 771 798 … … 782 809 /** VM-exit controls. */ 783 810 uint32_t u32ExitCtls; 784 /** CR0 guest/host mask. */785 uint32_t u32Cr0Mask;786 /** CR4 guset/host mask. */787 uint32_t u32Cr4Mask;788 811 /** Exception bitmap. */ 789 812 uint32_t u32XcptBitmap; 813 /** CR0 guest/host mask. */ 814 uint64_t u64Cr0Mask; 815 /** CR4 guest/host mask. */ 816 uint64_t u64Cr4Mask; 817 /** Page-fault exception error-code mask. */ 818 uint32_t u32XcptPFMask; 819 /** Page-fault exception error-code match. */ 820 uint32_t u32XcptPFMatch; 790 821 /** TSC offset. */ 791 822 uint64_t u64TscOffset; 823 /** VMCS link pointer. */ 824 uint64_t u64VmcsLinkPtr; 792 825 /** @} */ 793 826 827 /** @name Real-mode emulation state. 828 * @{ */ 829 struct 830 { 831 X86DESCATTR AttrCS; 832 X86DESCATTR AttrDS; 833 X86DESCATTR AttrES; 834 X86DESCATTR AttrFS; 835 X86DESCATTR AttrGS; 836 X86DESCATTR AttrSS; 837 X86EFLAGS Eflags; 838 bool fRealOnV86Active; 839 bool afPadding1[3]; 840 } RealMode; 841 /** @} */ 842 794 843 /** Padding. */ 795 uint64_t u64Padding[4];844 uint64_t au64Padding[2]; 796 845 } VMXVMCSINFO; 797 846 /** Pointer to a VMXVMCSINFO struct. */ 798 847 typedef VMXVMCSINFO *PVMXVMCSINFO; 799 /** Pointer to a VMXVMCSINFO struct. */848 /** Pointer to a const VMXVMCSINFO struct. */ 800 849 typedef const VMXVMCSINFO *PCVMXVMCSINFO; 801 850 AssertCompileSizeAlignment(VMXVMCSINFO, 8); 851 AssertCompileMemberAlignment(VMXVMCSINFO, fVmcsState, 8); 852 AssertCompileMemberAlignment(VMXVMCSINFO, u32PinCtls, 8); 853 AssertCompileMemberAlignment(VMXVMCSINFO, u64VmcsLinkPtr, 8); 854 AssertCompileMemberAlignment(VMXVMCSINFO, pvMsrBitmap, 8); 802 855 803 856 /** … … 857 910 uint64_t fCtxChanged; 858 911 /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */ 859 uint64_t u64HostTscAux; 912 uint64_t u64HostTscAux; /** @todo r=ramshankar: Can be removed and put in SVMTRANSIENT instead! */ 860 913 861 914 union /* no tag! */ … … 864 917 struct 865 918 { 866 /** Ring 0 handlers for VT-x. */ 867 PFNHMVMXSTARTVM pfnStartVM; 868 #if HC_ARCH_BITS == 32 919 /** @name Guest information. 920 * @{ */ 921 /** Guest VMCS information. */ 922 VMXVMCSINFO VmcsInfo; 923 /** Nested-guest VMCS information. */ 924 VMXVMCSINFO VmcsInfoNstGst; 925 /** Whether the nested-guest VMCS was the last current VMCS. */ 926 bool fSwitchedToNstGstVmcs; 927 /** Whether the static guest VMCS controls has been merged with the 928 * nested-guest VMCS controls. */ 929 bool fMergedNstGstCtls; 930 /** Alignment. */ 931 bool afAlignment0[6]; 932 /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */ 933 uint64_t u64GstMsrApicBase; 934 /** VMCS cache for batched vmread/vmwrites. */ 935 VMXVMCSCACHE VmcsCache; 936 /** @} */ 937 938 /** @name Host information. 939 * @{ */ 940 /** Host LSTAR MSR to restore lazily while leaving VT-x. */ 941 uint64_t u64HostMsrLStar; 942 /** Host STAR MSR to restore lazily while leaving VT-x. */ 943 uint64_t u64HostMsrStar; 944 /** Host SF_MASK MSR to restore lazily while leaving VT-x. */ 945 uint64_t u64HostMsrSfMask; 946 /** Host KernelGS-Base MSR to restore lazily while leaving VT-x. */ 947 uint64_t u64HostMsrKernelGsBase; 948 /** The mask of lazy MSRs swap/restore state, see VMX_LAZY_MSRS_XXX. */ 949 uint32_t fLazyMsrs; 950 /** Whether the host MSR values are up-to-date in the auto-load/store MSR area. */ 951 bool fUpdatedHostAutoMsrs; 952 /** Alignment. */ 953 uint8_t au8Alignment0[3]; 954 /** Which host-state bits to restore before being preempted. */ 955 uint32_t fRestoreHostFlags; 956 /** Alignment. */ 869 957 uint32_t u32Alignment0; 870 #endif 871 872 /** Cache of the executing guest (or nested-guest) VMCS control fields. */ 873 VMXVMCSCTLSCACHE Ctls; 874 /** Cache of guest (level 1) VMCS control fields when executing a nested-guest 875 * (level 2). */ 876 VMXVMCSCTLSCACHE Level1Ctls; 877 878 /** Physical address of the VM control structure (VMCS). */ 879 RTHCPHYS HCPhysVmcs; 880 /** R0 memory object for the VM control structure (VMCS). */ 881 RTR0MEMOBJ hMemObjVmcs; 882 /** Virtual address of the VM control structure (VMCS). */ 883 R0PTRTYPE(void *) pvVmcs; 884 885 /** Physical address of the virtual APIC page for TPR caching. */ 886 RTHCPHYS HCPhysVirtApic; 887 /** Padding. */ 888 R0PTRTYPE(void *) pvAlignment0; 889 /** Virtual address of the virtual APIC page for TPR caching. */ 890 R0PTRTYPE(uint8_t *) pbVirtApic; 891 892 /** Physical address of the MSR bitmap. */ 893 RTHCPHYS HCPhysMsrBitmap; 894 /** R0 memory object for the MSR bitmap. */ 895 RTR0MEMOBJ hMemObjMsrBitmap; 896 /** Virtual address of the MSR bitmap. */ 897 R0PTRTYPE(void *) pvMsrBitmap; 898 899 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used 900 * for guest MSRs). */ 901 RTHCPHYS HCPhysGuestMsr; 902 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area 903 * (used for guest MSRs). */ 904 RTR0MEMOBJ hMemObjGuestMsr; 905 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used 906 * for guest MSRs). */ 907 R0PTRTYPE(void *) pvGuestMsr; 908 909 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */ 910 RTHCPHYS HCPhysHostMsr; 911 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */ 912 RTR0MEMOBJ hMemObjHostMsr; 913 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */ 914 R0PTRTYPE(void *) pvHostMsr; 915 916 /** Physical address of the current EPTP. */ 917 RTHCPHYS HCPhysEPTP; 918 919 /** Number of guest/host MSR pairs in the auto-load/store area. */ 920 uint32_t cMsrs; 921 /** Whether the host MSR values are up-to-date in the auto-load/store area. */ 922 bool fUpdatedHostMsrs; 923 uint8_t au8Alignment0[3]; 924 925 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */ 926 uint64_t u64HostLStarMsr; 927 /** Host STAR MSR value to restore lazily while leaving VT-x. */ 928 uint64_t u64HostStarMsr; 929 /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */ 930 uint64_t u64HostSFMaskMsr; 931 /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */ 932 uint64_t u64HostKernelGSBaseMsr; 933 /** A mask of which MSRs have been swapped and need restoration. */ 934 uint32_t fLazyMsrs; 935 uint32_t u32Alignment1; 936 937 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */ 938 uint64_t u64MsrApicBase; 939 940 /** VMCS cache for batched vmread/vmwrites. */ 941 VMXVMCSBATCHCACHE VmcsBatchCache; 942 943 /** Real-mode emulation state. */ 944 struct 945 { 946 X86DESCATTR AttrCS; 947 X86DESCATTR AttrDS; 948 X86DESCATTR AttrES; 949 X86DESCATTR AttrFS; 950 X86DESCATTR AttrGS; 951 X86DESCATTR AttrSS; 952 X86EFLAGS Eflags; 953 bool fRealOnV86Active; 954 } RealMode; 955 958 /** The host-state restoration structure. */ 959 VMXRESTOREHOST RestoreHost; 960 /** @} */ 961 962 /** @name Error reporting and diagnostics. 963 * @{ */ 956 964 /** VT-x error-reporting (mainly for ring-3 propagation). */ 957 965 struct 958 966 { 959 uint64_t u64VmcsPhys;967 RTHCPHYS HCPhysCurrentVmcs; 960 968 uint32_t u32VmcsRev; 961 969 uint32_t u32InstrError; … … 965 973 RTCPUID idCurrentCpu; 966 974 } LastError; 967 968 /** Current state of the VMCS. */ 969 uint32_t fVmcsState; 970 /** Which host-state bits to restore before being preempted. */ 971 uint32_t fRestoreHostFlags; 972 /** The host-state restoration structure. */ 973 VMXRESTOREHOST RestoreHost; 974 975 /** Set if guest was executing in real mode (extra checks). */ 976 bool fWasInRealMode; 977 /** Set if guest switched to 64-bit mode on a 32-bit host. */ 978 bool fSwitchedTo64on32; 979 /** Padding. */ 980 uint8_t au8Alignment1[6]; 975 /** @} */ 981 976 } vmx; 982 977 … … 1023 1018 1024 1019 /** Event injection state. */ 1025 struct 1026 { 1027 uint32_t fPending; 1028 uint32_t u32ErrCode; 1029 uint32_t cbInstr; 1030 uint32_t u32Padding; /**< Explicit alignment padding. */ 1031 uint64_t u64IntInfo; 1032 RTGCUINTPTR GCPtrFaultAddress; 1033 } Event; 1020 HMEVENT Event; 1034 1021 1035 1022 /** The PAE PDPEs used with Nested Paging (only valid when … … 1087 1074 STAMCOUNTER StatExitGuestXF; 1088 1075 STAMCOUNTER StatExitGuestXcpUnk; 1089 STAMCOUNTER StatExitCli;1090 STAMCOUNTER StatExitSti;1091 STAMCOUNTER StatExitPushf;1092 STAMCOUNTER StatExitPopf;1093 STAMCOUNTER StatExitIret;1094 STAMCOUNTER StatExitInt;1095 STAMCOUNTER StatExitHlt;1096 1076 STAMCOUNTER StatExitDRxWrite; 1097 1077 STAMCOUNTER StatExitDRxRead; … … 1110 1090 STAMCOUNTER StatExitClts; 1111 1091 STAMCOUNTER StatExitXdtrAccess; 1112 STAMCOUNTER StatExitMwait;1113 STAMCOUNTER StatExitMonitor;1114 1092 STAMCOUNTER StatExitLmsw; 1115 1093 STAMCOUNTER StatExitIOWrite; … … 1123 1101 STAMCOUNTER StatExitTprBelowThreshold; 1124 1102 STAMCOUNTER StatExitTaskSwitch; 1125 STAMCOUNTER StatExitMtf;1126 1103 STAMCOUNTER StatExitApicAccess; 1127 1104 STAMCOUNTER StatExitReasonNpf; … … 1204 1181 1205 1182 #ifdef IN_RING0 1206 VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void);1207 VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPU pVCpu);1183 VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void); 1184 VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPU pVCpu); 1208 1185 1209 1186 # ifdef VBOX_STRICT 1210 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu);1211 VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);1187 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu); 1188 VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg); 1212 1189 # endif 1213 1190 1214 1191 # ifdef VBOX_WITH_KERNEL_USING_XMM 1215 DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu,1216 PFNHMVMXSTARTVM pfnStartVM);1217 DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu,1218 PFNHMSVMVMRUN pfnVMRun);1192 DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pVmcsCache, PVM pVM, 1193 PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM); 1194 DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, 1195 PFNHMSVMVMRUN pfnVMRun); 1219 1196 # endif 1220 1197 #endif /* IN_RING0 */ 1221 1198 1222 VMM_INT_DECL(int) hmEmulateSvmMovTpr(PVMCPU pVCpu); 1199 VMM_INT_DECL(int) hmEmulateSvmMovTpr(PVMCPU pVCpu); 1200 1201 VMM_INT_DECL(PVMXVMCSINFO) hmGetVmxActiveVmcsInfo(PVMCPU pVCpu); 1223 1202 1224 1203 /** @} */ -
trunk/src/VBox/VMM/include/HMInternal.mac
r77481 r78220 21 21 22 22 ;Maximum number of cached entries. 23 %define VMX_VMCS_ BATCH_CACHE_MAX_ENTRY 12823 %define VMX_VMCS_CACHE_MAX_ENTRY 128 24 24 25 25 ; Structure for storing read and write VMCS actions. 26 struc VMXVMCS BATCHCACHE26 struc VMXVMCSCACHE 27 27 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 28 28 .aMagic resb 16 … … 39 39 .Write.cValidEntries resd 1 40 40 .Write.uAlignment resd 1 41 .Write.aField resd VMX_VMCS_ BATCH_CACHE_MAX_ENTRY42 .Write.aFieldVal resq VMX_VMCS_ BATCH_CACHE_MAX_ENTRY41 .Write.aField resd VMX_VMCS_CACHE_MAX_ENTRY 42 .Write.aFieldVal resq VMX_VMCS_CACHE_MAX_ENTRY 43 43 .Read.cValidEntries resd 1 44 44 .Read.uAlignment resd 1 45 .Read.aField resd VMX_VMCS_ BATCH_CACHE_MAX_ENTRY46 .Read.aFieldVal resq VMX_VMCS_ BATCH_CACHE_MAX_ENTRY45 .Read.aField resd VMX_VMCS_CACHE_MAX_ENTRY 46 .Read.aFieldVal resq VMX_VMCS_CACHE_MAX_ENTRY 47 47 %ifdef VBOX_STRICT 48 48 .TestIn.HCPhysCpuPage resq 1 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r77609 r78220 159 159 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR0); 160 160 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR3); 161 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVirtApicPageR0);162 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVirtApicPageR3);163 161 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmreadBitmapR0); 164 162 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmreadBitmapR3); 165 163 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmwriteBitmapR0); 166 164 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmwriteBitmapR3); 167 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pAutoMsrAreaR0); 168 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pAutoMsrAreaR3); 165 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pEntryMsrLoadAreaR0); 166 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pEntryMsrLoadAreaR3); 167 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrStoreAreaR0); 168 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrStoreAreaR3); 169 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrLoadAreaR0); 170 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrLoadAreaR3); 169 171 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvMsrBitmapR0); 170 172 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvMsrBitmapR3); … … 173 175 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uFirstPauseLoopTick); 174 176 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uPrevPauseTick); 175 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.u VmentryTick);177 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uEntryTick); 176 178 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.offVirtApicWrite); 177 179 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fVirtNmiBlocking); 178 180 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.Msrs); 181 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysVmcs); 182 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysShadowVmcs); 183 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysVmreadBitmap); 184 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysVmwriteBitmap); 185 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysEntryMsrLoadArea); 186 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysExitMsrStoreArea); 187 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysExitMsrLoadArea); 188 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysMsrBitmap); 189 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysIoBitmap); 179 190 GEN_CHECK_OFF(CPUMCTX, hwvirt.enmHwvirt); 180 191 GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r76553 r78220 428 428 CHECK_MEMBER_ALIGNMENT(HM, aPatches, 8); 429 429 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx, 8); 430 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.pfnStartVM, 8); 431 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVmcs, 8); 430 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfo, 8); 431 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfo.pfnStartVM, 8); 432 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfoNstGst, 8); 433 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfoNstGst.pfnStartVM, 8); 434 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.RestoreHost, 8); 432 435 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.LastError, 8); 433 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.RestoreHost, 8);434 436 CHECK_MEMBER_ALIGNMENT(HMCPU, svm, 8); 435 437 CHECK_MEMBER_ALIGNMENT(HMCPU, svm.pfnVMRun, 8);
Note:
See TracChangeset
for help on using the changeset viewer.