Changeset 78220 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Apr 20, 2019 4:08:44 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 130157
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r77712 r78220 3010 3010 3011 3011 /** 3012 * Applies the TSC offset of a nested-guest if any and returns the new TSC3013 * value for the guest (or nested-guest).3012 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the 3013 * nested-guest. 3014 3014 * 3015 3015 * @returns The TSC offset after applying any nested-guest TSC offset. … … 3017 3017 * @param uTicks The guest TSC. 3018 3018 * 3019 * @sa HMApplySvmNstGstTscOffset.3019 * @sa CPUMRemoveNestedGuestTscOffset. 3020 3020 */ 3021 3021 VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks) … … 3033 3033 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 3034 3034 { 3035 /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMHasGuestSvmVmcbCached to save a call. */ 3035 3036 if (!HMHasGuestSvmVmcbCached(pVCpu)) 3036 3037 { … … 3039 3040 } 3040 3041 return HMApplySvmNstGstTscOffset(pVCpu, uTicks); 3042 } 3043 #else 3044 RT_NOREF(pVCpu); 3045 #endif 3046 return uTicks; 3047 } 3048 3049 3050 /** 3051 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the 3052 * guest. 3053 * 3054 * @returns The TSC offset after removing any nested-guest TSC offset. 3055 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 3056 * @param uTicks The nested-guest TSC. 3057 * 3058 * @sa CPUMApplyNestedGuestTscOffset. 3059 */ 3060 VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks) 3061 { 3062 #ifndef IN_RC 3063 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 3064 if (CPUMIsGuestInVmxNonRootMode(pCtx)) 3065 { 3066 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 3067 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING) 3068 return uTicks - pVmcs->u64TscOffset.u; 3069 return uTicks; 3070 } 3071 3072 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 3073 { 3074 /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMRemoveSvmNstGstTscOffset to save a call. */ 3075 if (!HMHasGuestSvmVmcbCached(pVCpu)) 3076 { 3077 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 3078 return uTicks - pVmcb->ctrl.u64TSCOffset; 3079 } 3080 return HMRemoveSvmNstGstTscOffset(pVCpu, uTicks); 3041 3081 } 3042 3082 #else -
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r77589 r78220 799 799 */ 800 800 if (enmGuestMode == PGMMODE_REAL) 801 pVCpu->hm.s.vmx.fWasInRealMode = true; 801 { 802 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 803 pVmcsInfo->fWasInRealMode = true; 804 } 802 805 803 806 # ifdef IN_RING0 … … 814 817 fChanged |= HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS; 815 818 else 816 fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_ CTLS | HM_CHANGED_VMX_EXIT_CTLS;819 fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_EXIT_CTLS; 817 820 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fChanged); 818 821 } -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r77902 r78220 204 204 * using hardware-assisted SVM. 205 205 * 206 * @note If you make any changes to this function, please check if 207 * hmR0SvmNstGstUndoTscOffset() needs adjusting. 208 * 209 * @sa CPUMApplyNestedGuestTscOffset(), hmR0SvmNstGstUndoTscOffset(). 206 * @sa CPUMRemoveNestedGuestTscOffset, HMRemoveSvmNstGstTscOffset. 210 207 */ 211 208 VMM_INT_DECL(uint64_t) HMApplySvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks) … … 216 213 Assert(pVmcbNstGstCache->fCacheValid); 217 214 return uTicks + pVmcbNstGstCache->u64TSCOffset; 215 } 216 217 218 /** 219 * Removes the TSC offset of an SVM nested-guest if any and returns the new TSC 220 * value for the guest. 221 * 222 * @returns The TSC offset after removing any nested-guest TSC offset. 223 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 224 * @param uTicks The nested-guest TSC. 225 * 226 * @remarks This function looks at the VMCB cache rather than directly at the 227 * nested-guest VMCB. The latter may have been modified for executing 228 * using hardware-assisted SVM. 229 * 230 * @sa CPUMApplyNestedGuestTscOffset, HMApplySvmNstGstTscOffset. 231 */ 232 VMM_INT_DECL(uint64_t) HMRemoveSvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks) 233 { 234 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 235 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx); 236 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 237 Assert(pVmcbNstGstCache->fCacheValid); 238 return uTicks - pVmcbNstGstCache->u64TSCOffset; 218 239 } 219 240 -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r77745 r78220 716 716 * (all sorts of RPL & DPL assumptions). 717 717 */ 718 if (pVCpu->hm.s.vmx.fWasInRealMode) 718 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 719 if (pVmcsInfo->fWasInRealMode) 719 720 { 720 721 if (!CPUMIsGuestInV86ModeEx(pCtx)) … … 858 859 859 860 /** 860 * Gets the permission bits for the specified MSR in the specifiedMSR bitmap.861 * 862 * @returns V Box status code.861 * Gets the read and write permission bits for an MSR in an MSR bitmap. 862 * 863 * @returns VMXMSRPM_XXX - the MSR permission. 863 864 * @param pvMsrBitmap Pointer to the MSR bitmap. 864 * @param idMsr The MSR. 865 * @param penmRead Where to store the read permissions. Optional, can be 866 * NULL. 867 * @param penmWrite Where to store the write permissions. Optional, can be 868 * NULL. 869 */ 870 VMM_INT_DECL(int) HMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr, PVMXMSREXITREAD penmRead, 871 PVMXMSREXITWRITE penmWrite) 872 { 873 AssertPtrReturn(pvMsrBitmap, VERR_INVALID_PARAMETER); 874 875 int32_t iBit; 876 uint8_t const *pbMsrBitmap = (uint8_t *)pvMsrBitmap; 865 * @param idMsr The MSR to get permissions for. 866 * 867 * @sa hmR0VmxSetMsrPermission. 868 */ 869 VMM_INT_DECL(uint32_t) HMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr) 870 { 871 AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR); 872 873 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap; 877 874 878 875 /* … … 885 882 * 886 883 * A bit corresponding to an MSR within the above range causes a VM-exit 887 * if the bit is 1 on executions of RDMSR/WRMSR. 888 * 889 * If an MSR falls out of the MSR range, it always cause a VM-exit. 884 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of 885 * the MSR range, it always cause a VM-exit. 890 886 * 891 887 * See Intel spec. 24.6.9 "MSR-Bitmap Address". 892 888 */ 893 if (idMsr <= 0x00001fff) 894 iBit = idMsr; 895 else if ( idMsr >= 0xc0000000 896 && idMsr <= 0xc0001fff) 897 { 898 iBit = (idMsr - 0xc0000000); 899 pbMsrBitmap += 0x400; 889 uint32_t const offBitmapRead = 0; 890 uint32_t const offBitmapWrite = 0x800; 891 uint32_t offMsr; 892 uint32_t iBit; 893 if (idMsr <= UINT32_C(0x00001fff)) 894 { 895 offMsr = 0; 896 iBit = idMsr; 897 } 898 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff)) 899 { 900 offMsr = 0x400; 901 iBit = idMsr - UINT32_C(0xc0000000); 900 902 } 901 903 else 902 904 { 903 if (penmRead) 904 *penmRead = VMXMSREXIT_INTERCEPT_READ; 905 if (penmWrite) 906 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE; 907 Log(("CPUMVmxGetMsrPermission: Warning! Out of range MSR %#RX32\n", idMsr)); 908 return VINF_SUCCESS; 909 } 910 911 /* Validate the MSR bit position. */ 912 Assert(iBit <= 0x1fff); 913 914 /* Get the MSR read permissions. */ 915 if (penmRead) 916 { 917 if (ASMBitTest(pbMsrBitmap, iBit)) 918 *penmRead = VMXMSREXIT_INTERCEPT_READ; 919 else 920 *penmRead = VMXMSREXIT_PASSTHRU_READ; 921 } 922 923 /* Get the MSR write permissions. */ 924 if (penmWrite) 925 { 926 if (ASMBitTest(pbMsrBitmap + 0x800, iBit)) 927 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE; 928 else 929 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE; 930 } 931 932 return VINF_SUCCESS; 905 LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr)); 906 return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR; 907 } 908 909 /* 910 * Get the MSR read permissions. 911 */ 912 uint32_t fRet; 913 uint32_t const offMsrRead = offBitmapRead + offMsr; 914 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite); 915 if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit)) 916 fRet = VMXMSRPM_EXIT_RD; 917 else 918 fRet = VMXMSRPM_ALLOW_RD; 919 920 /* 921 * Get the MSR write permissions. 922 */ 923 uint32_t const offMsrWrite = offBitmapWrite + offMsr; 924 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE); 925 if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit)) 926 fRet |= VMXMSRPM_EXIT_WR; 927 else 928 fRet |= VMXMSRPM_ALLOW_WR; 929 930 Assert(VMXMSRPM_IS_FLAG_VALID(fRet)); 931 return fRet; 933 932 } 934 933 … … 943 942 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes). 944 943 */ 945 VMM_INT_DECL(bool) HMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort, 946 uint8_t cbAccess) 944 VMM_INT_DECL(bool) HMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort, uint8_t cbAccess) 947 945 { 948 946 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4); … … 1017 1015 LogRel(("uFirstPauseLoopTick = %RX64\n", pCtx->hwvirt.vmx.uFirstPauseLoopTick)); 1018 1016 LogRel(("uPrevPauseTick = %RX64\n", pCtx->hwvirt.vmx.uPrevPauseTick)); 1019 LogRel(("u VmentryTick = %RX64\n", pCtx->hwvirt.vmx.uVmentryTick));1017 LogRel(("uEntryTick = %RX64\n", pCtx->hwvirt.vmx.uEntryTick)); 1020 1018 LogRel(("offVirtApicWrite = %#RX16\n", pCtx->hwvirt.vmx.offVirtApicWrite)); 1019 LogRel(("fVirtNmiBlocking = %RTbool\n", pCtx->hwvirt.vmx.fVirtNmiBlocking)); 1021 1020 LogRel(("VMCS cache:\n")); 1022 1021 … … 1243 1242 } 1244 1243 1244 1245 /** 1246 * Gets the active (in use) VMCS info. object for the specified VCPU. 1247 * 1248 * This is either the guest or nested-guest VMCS and need not necessarily pertain to 1249 * the "current" VMCS (in the VMX definition of the term). For instance, if the 1250 * VM-entry failed due to an invalid-guest state, we may have "cleared" the VMCS 1251 * while returning to ring-3. The VMCS info. object for that VMCS would still be 1252 * active and returned so that we could dump the VMCS fields to ring-3 for 1253 * diagnostics. This function is thus only used to distinguish between the 1254 * nested-guest or guest VMCS. 1255 * 1256 * @returns The active VMCS information. 1257 * @param pVCpu The cross context virtual CPU structure. 1258 * 1259 * @thread EMT. 1260 * @remarks This function may be called with preemption or interrupts disabled! 1261 */ 1262 VMM_INT_DECL(PVMXVMCSINFO) hmGetVmxActiveVmcsInfo(PVMCPU pVCpu) 1263 { 1264 if (!pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs) 1265 return &pVCpu->hm.s.vmx.VmcsInfo; 1266 return &pVCpu->hm.s.vmx.VmcsInfoNstGst; 1267 } 1268 1269 1270 /** 1271 * Converts a VMX event type into an appropriate TRPM event type. 1272 * 1273 * @returns TRPM event. 1274 * @param uIntInfo The VMX event. 1275 */ 1276 VMM_INT_DECL(TRPMEVENT) HMVmxEventToTrpmEventType(uint32_t uIntInfo) 1277 { 1278 TRPMEVENT enmTrapType; 1279 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uIntInfo); 1280 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(uIntInfo); 1281 1282 switch (uType) 1283 { 1284 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT: 1285 enmTrapType = TRPM_HARDWARE_INT; 1286 break; 1287 1288 case VMX_ENTRY_INT_INFO_TYPE_NMI: 1289 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT: 1290 enmTrapType = TRPM_TRAP; 1291 break; 1292 1293 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT: /* INT1 (ICEBP). */ 1294 Assert(uVector == X86_XCPT_DB); NOREF(uVector); 1295 enmTrapType = TRPM_SOFTWARE_INT; 1296 break; 1297 1298 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: /* INT3 (#BP) and INTO (#OF) */ 1299 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF); NOREF(uVector); 1300 enmTrapType = TRPM_SOFTWARE_INT; 1301 break; 1302 1303 case VMX_ENTRY_INT_INFO_TYPE_SW_INT: 1304 enmTrapType = TRPM_SOFTWARE_INT; 1305 break; 1306 1307 case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT: /* Shouldn't really happen. */ 1308 default: 1309 AssertMsgFailed(("Invalid trap type %#x\n", uType)); 1310 enmTrapType = TRPM_32BIT_HACK; 1311 break; 1312 } 1313 1314 return enmTrapType; 1315 } 1316 1317 1318 #ifndef IN_RC 1319 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1320 /** 1321 * Notification callback for when a VM-exit happens outside VMX R0 code (e.g. in 1322 * IEM). 1323 * 1324 * @param pVCpu The cross context virtual CPU structure. 1325 * @param pCtx Pointer to the guest-CPU context. 1326 */ 1327 VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx) 1328 { 1329 NOREF(pCtx); 1330 pVCpu->hm.s.vmx.fMergedNstGstCtls = false; 1331 } 1332 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 1333 #endif /* IN_RC */ 1334 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r77899 r78220 162 162 return VERR_VMX_VMEXIT_FAILED; \ 163 163 } while (0) 164 165 /** Enables/disables IEM-only EM execution policy in and from ring-3. */166 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)167 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) \168 do { \169 Log(("%s: Enabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \170 int rcSched = EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); \171 if (rcSched != VINF_SUCCESS) \172 iemSetPassUpStatus(pVCpu, rcSched); \173 return (a_rcStrictRet); \174 } while (0)175 176 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) \177 do { \178 Log(("%s: Disabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \179 int rcSched = EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); \180 if (rcSched != VINF_SUCCESS) \181 iemSetPassUpStatus(pVCpu, rcSched); \182 return (a_rcStrictRet); \183 } while (0)184 # else185 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) do { return (a_rcRet); } while (0)186 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) do { return (a_rcRet); } while (0)187 # endif188 164 189 165 … … 1646 1622 * PreemptTimerShift = 5 1647 1623 * VmcsPreemptTimer = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks) 1648 * VmentryTick= 50000 (TSC at time of VM-entry)1624 * EntryTick = 50000 (TSC at time of VM-entry) 1649 1625 * 1650 1626 * CurTick Delta PreemptTimerVal … … 1670 1646 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 1671 1647 uint64_t const uCurTick = TMCpuTickGetNoCheck(pVCpu); 1672 uint64_t const u VmentryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick;1673 uint64_t const uDelta = uCurTick - u VmentryTick;1648 uint64_t const uEntryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick; 1649 uint64_t const uDelta = uCurTick - uEntryTick; 1674 1650 uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer; 1675 1651 uint32_t const uPreemptTimer = uVmcsPreemptVal … … 1913 1889 1914 1890 /** 1915 * Saves the guest MSRs into the VM-exit auto-store MSRsarea as part of VM-exit.1891 * Saves the guest MSRs into the VM-exit MSR-store area as part of VM-exit. 1916 1892 * 1917 1893 * @returns VBox status code. … … 1948 1924 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount); 1949 1925 1950 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(p AutoMsrArea);1926 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea); 1951 1927 Assert(pMsr); 1952 1928 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++) … … 1980 1956 } 1981 1957 1982 RTGCPHYS const GCPhys AutoMsrArea = pVmcs->u64AddrExitMsrStore.u;1983 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhys AutoMsrArea,1984 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(p AutoMsrArea), cMsrs * sizeof(VMXAUTOMSR));1958 RTGCPHYS const GCPhysVmExitMsrStoreArea = pVmcs->u64AddrExitMsrStore.u; 1959 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmExitMsrStoreArea, 1960 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea), cMsrs * sizeof(VMXAUTOMSR)); 1985 1961 if (RT_SUCCESS(rc)) 1986 1962 { /* likely */ } 1987 1963 else 1988 1964 { 1989 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhys AutoMsrArea, rc));1965 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc)); 1990 1966 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys); 1991 1967 } … … 2256 2232 2257 2233 /** 2258 * Loads the host MSRs from the VM-exit auto-load MSRsarea as part of VM-exit.2234 * Loads the host MSRs from the VM-exit MSR-load area as part of VM-exit. 2259 2235 * 2260 2236 * @returns VBox status code. … … 2291 2267 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount); 2292 2268 2293 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea)); 2294 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u; 2295 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), 2296 GCPhysAutoMsrArea, cMsrs * sizeof(VMXAUTOMSR)); 2269 RTGCPHYS const GCPhysVmExitMsrLoadArea = pVmcs->u64AddrExitMsrLoad.u; 2270 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea), 2271 GCPhysVmExitMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR)); 2297 2272 if (RT_SUCCESS(rc)) 2298 2273 { 2299 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(p AutoMsrArea);2274 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea); 2300 2275 Assert(pMsr); 2301 2276 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++) … … 2331 2306 else 2332 2307 { 2333 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhys AutoMsrArea, rc));2308 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrLoadArea, rc)); 2334 2309 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys); 2335 2310 } … … 2896 2871 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false; 2897 2872 2898 /* Revert any IEM-only nested-guest execution policy if it was set earlier, otherwise return rcStrict. */ 2899 IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(pVCpu, "VM-exit", rcStrict); 2873 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 2874 /* Revert any IEM-only nested-guest execution policy, otherwise return rcStrict. */ 2875 Log(("vmexit: Disabling IEM-only EM execution policy!\n")); 2876 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); 2877 if (rcSched != VINF_SUCCESS) 2878 iemSetPassUpStatus(pVCpu, rcSched); 2879 # endif 2880 return VINF_SUCCESS; 2900 2881 # endif 2901 2882 } … … 4312 4293 * @param offReg The offset of the register being read. 4313 4294 */ 4314 DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg) 4315 { 4316 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t)); 4317 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage); 4318 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)); 4319 uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg); 4295 IEM_STATIC uint32_t iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg) 4296 { 4297 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4298 Assert(pVmcs); 4299 4300 uint32_t uReg; 4301 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg)); 4302 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4303 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg)); 4304 if (RT_FAILURE(rc)) 4305 { 4306 AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 4307 GCPhysVirtApic)); 4308 uReg = 0; 4309 } 4320 4310 return uReg; 4321 4311 } … … 4329 4319 * @param offReg The offset of the register being read. 4330 4320 */ 4331 DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg) 4332 { 4333 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t)); 4334 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage); 4335 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)); 4336 uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg); 4321 IEM_STATIC uint64_t iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg) 4322 { 4323 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4324 Assert(pVmcs); 4325 4326 uint64_t uReg; 4327 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg)); 4328 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4329 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg)); 4330 if (RT_FAILURE(rc)) 4331 { 4332 AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 4333 GCPhysVirtApic)); 4334 uReg = 0; 4335 } 4337 4336 return uReg; 4338 4337 } … … 4346 4345 * @param uReg The register value to write. 4347 4346 */ 4348 DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg) 4349 { 4350 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t)); 4351 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage); 4352 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)); 4353 *(uint32_t *)(pbVirtApic + offReg) = uReg; 4347 IEM_STATIC void iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg) 4348 { 4349 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4350 Assert(pVmcs); 4351 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg)); 4352 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4353 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg)); 4354 if (RT_FAILURE(rc)) 4355 { 4356 AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 4357 GCPhysVirtApic)); 4358 } 4354 4359 } 4355 4360 … … 4362 4367 * @param uReg The register value to write. 4363 4368 */ 4364 DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg) 4365 { 4366 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t)); 4367 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage); 4368 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)); 4369 *(uint64_t *)(pbVirtApic + offReg) = uReg; 4369 IEM_STATIC void iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg) 4370 { 4371 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4372 Assert(pVmcs); 4373 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg)); 4374 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4375 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg)); 4376 if (RT_FAILURE(rc)) 4377 { 4378 AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 4379 GCPhysVirtApic)); 4380 } 4370 4381 } 4371 4382 … … 4380 4391 * @remarks This is based on our APIC device code. 4381 4392 */ 4382 DECLINLINE(void) iemVmxVirtApicSetVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector) 4383 { 4384 Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0); 4385 uint8_t *pbBitmap = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg; 4386 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1; 4387 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f); 4388 ASMAtomicBitSet(pbBitmap + offVector, idxVectorBit); 4393 IEM_STATIC void iemVmxVirtApicSetVectorInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector) 4394 { 4395 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4396 Assert(pVmcs); 4397 uint32_t uReg; 4398 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1; 4399 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4400 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg)); 4401 if (RT_SUCCESS(rc)) 4402 { 4403 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f); 4404 uReg |= RT_BIT(idxVectorBit); 4405 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg)); 4406 if (RT_FAILURE(rc)) 4407 { 4408 AssertMsgFailed(("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4409 uVector, offReg, GCPhysVirtApic)); 4410 } 4411 } 4412 else 4413 { 4414 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4415 uVector, offReg, GCPhysVirtApic)); 4416 } 4389 4417 } 4390 4418 … … 4399 4427 * @remarks This is based on our APIC device code. 4400 4428 */ 4401 DECLINLINE(void) iemVmxVirtApicClearVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector) 4402 { 4403 Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0); 4404 uint8_t *pbBitmap = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg; 4405 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1; 4406 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f); 4407 ASMAtomicBitClear(pbBitmap + offVector, idxVectorBit); 4429 IEM_STATIC void iemVmxVirtApicClearVectorInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector) 4430 { 4431 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4432 Assert(pVmcs); 4433 uint32_t uReg; 4434 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1; 4435 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 4436 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg)); 4437 if (RT_SUCCESS(rc)) 4438 { 4439 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f); 4440 uReg &= ~RT_BIT(idxVectorBit); 4441 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg)); 4442 if (RT_FAILURE(rc)) 4443 { 4444 AssertMsgFailed(("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4445 uVector, offReg, GCPhysVirtApic)); 4446 } 4447 } 4448 else 4449 { 4450 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4451 uVector, offReg, GCPhysVirtApic)); 4452 } 4408 4453 } 4409 4454 … … 4833 4878 Assert(offReg < XAPIC_OFF_END + 4); 4834 4879 Assert(pidxHighestBit); 4880 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)); 4835 4881 4836 4882 /* … … 5007 5053 5008 5054 uint8_t uVector = uSvi; 5009 iemVmxVirtApicClearVector (pVCpu, XAPIC_OFF_ISR0, uVector);5055 iemVmxVirtApicClearVectorInReg(pVCpu, XAPIC_OFF_ISR0, uVector); 5010 5056 5011 5057 uVector = 0; … … 5048 5094 uint8_t const uVector = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_LO); 5049 5095 Log2(("self_ipi_virt: uVector=%#x\n", uVector)); 5050 iemVmxVirtApicSetVector (pVCpu, XAPIC_OFF_IRR0, uVector);5096 iemVmxVirtApicSetVectorInReg(pVCpu, XAPIC_OFF_IRR0, uVector); 5051 5097 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus); 5052 5098 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus); … … 6652 6698 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage); 6653 6699 6654 /* Read the Virtual-APIC page. */6655 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));6656 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),6657 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);6658 if (RT_SUCCESS(rc))6659 { /* likely */ }6660 else6661 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);6662 6663 6700 /* TPR threshold without virtual-interrupt delivery. */ 6664 6701 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY) … … 6667 6704 6668 6705 /* TPR threshold and VTPR. */ 6669 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);6670 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);6671 6706 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS) 6672 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY) 6673 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */) 6674 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr); 6707 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)) 6708 { 6709 /* Read the VTPR from the virtual-APIC page. */ 6710 uint8_t u8VTpr; 6711 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &u8VTpr, GCPhysVirtApic + XAPIC_OFF_TPR, sizeof(u8VTpr)); 6712 if (RT_SUCCESS(rc)) 6713 { /* likely */ } 6714 else 6715 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys); 6716 6717 /* Bits 3:0 of the TPR-threshold must not be greater than bits 7:4 of VTPR. */ 6718 if ((uint8_t)RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) <= (u8VTpr & 0xf0)) 6719 { /* likely */ } 6720 else 6721 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr); 6722 } 6675 6723 } 6676 6724 else … … 7009 7057 7010 7058 /** 7011 * Loads the guest MSRs from the VM-entry auto-load MSRsas part of VM-entry.7059 * Loads the guest MSRs from the VM-entry MSR-load area as part of VM-entry. 7012 7060 * 7013 7061 * @returns VBox status code. … … 7047 7095 } 7048 7096 7049 RTGCPHYS const GCPhys AutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;7050 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(p AutoMsrArea),7051 GCPhys AutoMsrArea, cMsrs * sizeof(VMXAUTOMSR));7097 RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u; 7098 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea), 7099 GCPhysVmEntryMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR)); 7052 7100 if (RT_SUCCESS(rc)) 7053 7101 { 7054 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(p AutoMsrArea);7102 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea); 7055 7103 Assert(pMsr); 7056 7104 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++) … … 7089 7137 else 7090 7138 { 7091 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhys AutoMsrArea, rc));7139 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysVmEntryMsrLoadArea, rc)); 7092 7140 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys); 7093 7141 } … … 7298 7346 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER) 7299 7347 { 7300 uint64_t const u VmentryTick = TMCpuTickGetNoCheck(pVCpu);7301 pVCpu->cpum.GstCtx.hwvirt.vmx.u VmentryTick = uVmentryTick;7348 uint64_t const uEntryTick = TMCpuTickGetNoCheck(pVCpu); 7349 pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick = uEntryTick; 7302 7350 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER); 7303 7351 7304 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, u VmentryTick));7352 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uEntryTick)); 7305 7353 } 7306 7354 else … … 7340 7388 break; 7341 7389 7390 case VMX_ENTRY_INT_INFO_TYPE_NMI: 7391 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT: 7392 enmTrapType = TRPM_TRAP; 7393 break; 7394 7342 7395 case VMX_ENTRY_INT_INFO_TYPE_SW_INT: 7343 7396 enmTrapType = TRPM_SOFTWARE_INT; 7344 7397 break; 7345 7398 7346 case VMX_ENTRY_INT_INFO_TYPE_NMI:7347 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT: /* ICEBP. */7348 7399 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: /* #BP and #OF */ 7349 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT: 7350 enmTrapType = TRPM_TRAP; 7400 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF); 7401 enmTrapType = TRPM_SOFTWARE_INT; 7402 break; 7403 7404 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT: /* #DB (INT1/ICEBP). */ 7405 Assert(uVector == X86_XCPT_DB); 7406 enmTrapType = TRPM_SOFTWARE_INT; 7351 7407 break; 7352 7408 … … 7363 7419 TRPMSetErrorCode(pVCpu, uErrCode); 7364 7420 7365 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT7366 && uVector == X86_XCPT_PF)7421 if ( enmTrapType == TRPM_TRAP 7422 && uVector == X86_XCPT_PF) 7367 7423 TRPMSetFaultAddress(pVCpu, GCPtrFaultAddress); 7368 else if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT 7369 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT 7370 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT) 7371 { 7372 AssertMsg( uType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT 7373 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF), 7374 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uType)); 7424 else if (enmTrapType == TRPM_SOFTWARE_INT) 7375 7425 TRPMSetInstrLength(pVCpu, cbInstr); 7376 }7377 7426 7378 7427 return VINF_SUCCESS; … … 7709 7758 { 7710 7759 /* Reschedule to IEM-only execution of the nested-guest or return VINF_SUCCESS. */ 7711 IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(pVCpu, pszInstr, VINF_SUCCESS); 7760 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 7761 Log(("%s: Enabling IEM-only EM execution policy!\n", pszInstr)); 7762 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); 7763 if (rcSched != VINF_SUCCESS) 7764 iemSetPassUpStatus(pVCpu, rcSched); 7765 # endif 7766 return VINF_SUCCESS; 7712 7767 } 7713 7768 … … 7757 7812 { 7758 7813 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap)); 7814 uint32_t fMsrpm = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr); 7759 7815 if (uExitReason == VMX_EXIT_RDMSR) 7760 { 7761 VMXMSREXITREAD enmRead; 7762 int rc = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead, 7763 NULL /* penmWrite */); 7764 AssertRC(rc); 7765 if (enmRead == VMXMSREXIT_INTERCEPT_READ) 7766 return true; 7767 } 7768 else 7769 { 7770 VMXMSREXITWRITE enmWrite; 7771 int rc = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */, 7772 &enmWrite); 7773 AssertRC(rc); 7774 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE) 7775 return true; 7776 } 7777 return false; 7816 return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_RD); 7817 return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_WR); 7778 7818 } 7779 7819
Note:
See TracChangeset
for help on using the changeset viewer.