Changeset 72967 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jul 8, 2018 10:38:08 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r72907 r72967 91 91 DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback, (RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)); 92 92 DECLR0CALLBACKMEMBER(int, pfnExportHostState, (PVMCPU pVCpu)); 93 DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPU pVCpu , PCPUMCTX pCtx));93 DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPU pVCpu)); 94 94 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 95 95 bool fEnabledByHost, void *pvArg)); … … 276 276 } 277 277 278 static DECLCALLBACK(VBOXSTRICTRC) hmR0DummyRunGuestCode(PVMCPU pVCpu , PCPUMCTX pCtx)279 { 280 RT_NOREF 2(pVCpu, pCtx);278 static DECLCALLBACK(VBOXSTRICTRC) hmR0DummyRunGuestCode(PVMCPU pVCpu) 279 { 280 RT_NOREF(pVCpu); 281 281 return VINF_SUCCESS; 282 282 } … … 1555 1555 #endif 1556 1556 1557 VBOXSTRICTRC rcStrict = g_HmR0.pfnRunGuestCode(pVCpu , &pVCpu->cpum.GstCtx);1557 VBOXSTRICTRC rcStrict = g_HmR0.pfnRunGuestCode(pVCpu); 1558 1558 1559 1559 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72966 r72967 149 149 * event in the guest. */ 150 150 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 151 # define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY( ) \151 # define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \ 152 152 do \ 153 153 { \ 154 int rc = hmR0SvmCheckExitDueToEventDelivery( pVCpu, pCtx, pSvmTransient); \154 int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \ 155 155 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \ 156 156 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \ 157 157 else if ( rc == VINF_EM_RESET \ 158 && CPUMIsGuestSvmCtrlInterceptSet( pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \158 && CPUMIsGuestSvmCtrlInterceptSet((a_pVCpu), &(a_pVCpu)->cpum.GstCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \ 159 159 { \ 160 HMSVM_CPUMCTX_IMPORT_STATE( pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); \161 return VBOXSTRICTRC_TODO(IEMExecSvmVmexit( pVCpu, SVM_EXIT_SHUTDOWN, 0, 0)); \160 HMSVM_CPUMCTX_IMPORT_STATE((a_pVCpu), IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); \ 161 return VBOXSTRICTRC_TODO(IEMExecSvmVmexit((a_pVCpu), SVM_EXIT_SHUTDOWN, 0, 0)); \ 162 162 } \ 163 163 else \ … … 165 165 } while (0) 166 166 #else 167 # define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY( ) \167 # define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \ 168 168 do \ 169 169 { \ 170 int rc = hmR0SvmCheckExitDueToEventDelivery( pVCpu, pCtx, pSvmTransient); \170 int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \ 171 171 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \ 172 172 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \ … … 177 177 178 178 /** Macro which updates interrupt shadow for the current RIP. */ 179 #define HMSVM_UPDATE_INTR_SHADOW( pVCpu, pCtx) \179 #define HMSVM_UPDATE_INTR_SHADOW(a_pVCpu) \ 180 180 do { \ 181 181 /* Update interrupt shadow. */ \ 182 if ( VMCPU_FF_IS_PENDING( pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) \183 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) \184 VMCPU_FF_CLEAR( pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); \182 if ( VMCPU_FF_IS_PENDING((a_pVCpu), VMCPU_FF_INHIBIT_INTERRUPTS) \ 183 && (a_pVCpu)->cpum.GstCtx.rip != EMGetInhibitInterruptsPC((a_pVCpu))) \ 184 VMCPU_FF_CLEAR((a_pVCpu), VMCPU_FF_INHIBIT_INTERRUPTS); \ 185 185 } while (0) 186 186 … … 361 361 * @returns VBox status code. 362 362 * @param pVCpu The cross context virtual CPU structure. 363 * @param pCtx Pointer to the guest-CPU context.364 363 * @param pSvmTransient Pointer to the SVM-transient structure. 365 364 */ 366 typedef int FNSVMEXITHANDLER(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);365 typedef int FNSVMEXITHANDLER(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient); 367 366 368 367 … … 425 424 /** @} */ 426 425 427 static int hmR0SvmHandleExit(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);426 static int hmR0SvmHandleExit(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient); 428 427 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 429 static int hmR0SvmHandleExitNested(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);428 static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient); 430 429 #endif 431 430 … … 808 807 * @return @c true if supported, @c false otherwise. 809 808 * @param pVCpu The cross context virtual CPU structure. 810 * @param pCtx Pointer to the guest-CPU context. 811 */ 812 DECLINLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPU pVCpu, PCCPUMCTX pCtx) 809 */ 810 DECLINLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPU pVCpu) 813 811 { 814 812 PVM pVM = pVCpu->CTX_SUFF(pVM); 815 813 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 816 if (CPUMIsGuestInSvmNestedHwVirtMode( pCtx))814 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 817 815 { 818 816 return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN) 819 817 && pVM->cpum.ro.GuestFeatures.fSvmVmcbClean; 820 818 } 821 #else822 RT_NOREF(pCtx);823 819 #endif 824 820 return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN); … … 831 827 * @return @c true if supported, @c false otherwise. 832 828 * @param pVCpu The cross context virtual CPU structure. 833 * @param pCtx Pointer to the guest-CPU context. 834 */ 835 DECLINLINE(bool) hmR0SvmSupportsDecodeAssists(PVMCPU pVCpu, PCPUMCTX pCtx) 829 */ 830 DECLINLINE(bool) hmR0SvmSupportsDecodeAssists(PVMCPU pVCpu) 836 831 { 837 832 PVM pVM = pVCpu->CTX_SUFF(pVM); 838 833 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 839 if (CPUMIsGuestInSvmNestedHwVirtMode( pCtx))834 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 840 835 { 841 836 return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS) 842 837 && pVM->cpum.ro.GuestFeatures.fSvmDecodeAssists; 843 838 } 844 #else845 RT_NOREF(pCtx);846 839 #endif 847 840 return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS); … … 854 847 * @return @c true if supported, @c false otherwise. 855 848 * @param pVCpu The cross context virtual CPU structure. 856 * @param pCtx Pointer to the guest-CPU context. 857 */ 858 DECLINLINE(bool) hmR0SvmSupportsNextRipSave(PVMCPU pVCpu, PCPUMCTX pCtx) 849 */ 850 DECLINLINE(bool) hmR0SvmSupportsNextRipSave(PVMCPU pVCpu) 859 851 { 860 852 PVM pVM = pVCpu->CTX_SUFF(pVM); 861 853 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 862 if (CPUMIsGuestInSvmNestedHwVirtMode( pCtx))854 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 863 855 { 864 856 return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE) 865 857 && pVM->cpum.ro.GuestFeatures.fSvmNextRipSave; 866 858 } 867 #else868 RT_NOREF(pCtx);869 859 #endif 870 860 return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE); … … 875 865 * Sets the permission bits for the specified MSR in the MSRPM bitmap. 876 866 * 877 * @param p Ctx Pointer to the guest-CPU or nested-guest-CPU context.867 * @param pVCpu The cross context virtual CPU structure. 878 868 * @param pbMsrBitmap Pointer to the MSR bitmap. 879 869 * @param idMsr The MSR for which the permissions are being set. … … 884 874 * caller needs to take care of this. 885 875 */ 886 static void hmR0SvmSetMsrPermission(P CCPUMCTX pCtx, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead,876 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead, 887 877 SVMMSREXITWRITE enmWrite) 888 878 { 889 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode( pCtx);879 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx); 890 880 uint16_t offMsrpm; 891 881 uint8_t uMsrpmBit; … … 907 897 { 908 898 /* Only clear the bit if the nested-guest is also not intercepting the MSR read.*/ 909 uint8_t const *pbNstGstMsrBitmap = (uint8_t *)p Ctx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);899 uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap); 910 900 pbNstGstMsrBitmap += offMsrpm; 911 901 if (!(*pbNstGstMsrBitmap & RT_BIT(uMsrpmBit))) … … 927 917 { 928 918 /* Only clear the bit if the nested-guest is also not intercepting the MSR write.*/ 929 uint8_t const *pbNstGstMsrBitmap = (uint8_t *)p Ctx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);919 uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap); 930 920 pbNstGstMsrBitmap += offMsrpm; 931 921 if (!(*pbNstGstMsrBitmap & RT_BIT(uMsrpmBit + 1))) … … 1084 1074 */ 1085 1075 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap; 1086 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1087 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1088 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1089 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1090 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1091 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1092 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1093 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1094 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1095 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1096 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1076 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1077 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1078 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1079 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1080 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1081 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1082 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1083 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1084 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1085 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1097 1086 pVmcbCtrl->u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap; 1098 1087 … … 1131 1120 * @returns Pointer to the current context VMCB. 1132 1121 * @param pVCpu The cross context virtual CPU structure. 1133 * @param pCtx Pointer to the guest-CPU context. 1134 */ 1135 DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPU pVCpu, PCPUMCTX pCtx) 1122 */ 1123 DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPU pVCpu) 1136 1124 { 1137 1125 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1138 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1139 return pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 1140 #else 1141 RT_NOREF(pCtx); 1126 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 1127 return pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb); 1142 1128 #endif 1143 1129 return pVCpu->hm.s.svm.pVmcb; … … 1181 1167 Log4Func(("%#RGv\n", GCVirt)); 1182 1168 1183 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1184 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 1169 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 1185 1170 AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB); 1186 1171 … … 1204 1189 * 1205 1190 * @param pVCpu The cross context virtual CPU structure. 1206 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context.1207 1191 * @param pVmcb Pointer to the VM control block. 1208 1192 * @param pHostCpu Pointer to the HM host-CPU info. 1209 1193 */ 1210 static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb, PHMGLOBALCPUINFO pHostCpu) 1211 { 1212 #ifndef VBOX_WITH_NESTED_HWVIRT_SVM 1213 RT_NOREF(pCtx); 1214 #endif 1215 PVM pVM = pVCpu->CTX_SUFF(pVM); 1216 1194 static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PSVMVMCB pVmcb, PHMGLOBALCPUINFO pHostCpu) 1195 { 1217 1196 /* 1218 1197 * Force a TLB flush for the first world switch if the current CPU differs from the one … … 1231 1210 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes 1232 1211 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1233 || CPUMIsGuestInSvmNestedHwVirtMode( pCtx)1212 || CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx) 1234 1213 #endif 1235 1214 ) … … 1254 1233 * This Host CPU requirement takes precedence. 1255 1234 */ 1235 PVM pVM = pVCpu->CTX_SUFF(pVM); 1256 1236 if (pVM->hm.s.svm.fAlwaysFlushTLB) 1257 1237 { … … 1497 1477 * @returns @c true if the intercept is still set, @c false otherwise. 1498 1478 * @param pVCpu The cross context virtual CPU structure. 1499 * @param pCtx Pointer to the guest-CPU context.1500 1479 * @param pVmcb Pointer to the VM control block. 1501 1480 * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*). … … 1505 1484 * are not intercepting it. 1506 1485 */ 1507 DECLINLINE(bool) hmR0SvmClearCtrlIntercept(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)1486 static bool hmR0SvmClearCtrlIntercept(PVMCPU pVCpu, PSVMVMCB pVmcb, uint64_t fCtrlIntercept) 1508 1487 { 1509 1488 if (pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept) … … 1512 1491 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1513 1492 /* Only remove the control intercept if the nested-guest is also not intercepting it! */ 1514 if (CPUMIsGuestInSvmNestedHwVirtMode( pCtx))1493 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 1515 1494 { 1516 1495 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu); 1517 1496 fRemove = !(pVmcbNstGstCache->u64InterceptCtrl & fCtrlIntercept); 1518 1497 } 1519 #else1520 RT_NOREF2(pVCpu, pCtx);1521 1498 #endif 1522 1499 if (fRemove) … … 2126 2103 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR) 2127 2104 { 2128 PVM pVM = pVCpu->CTX_SUFF(pVM); 2129 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2105 PVM pVM = pVCpu->CTX_SUFF(pVM); 2130 2106 if ( PDMHasApic(pVM) 2131 2107 && APICIsEnabled(pVCpu)) … … 2166 2142 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */ 2167 2143 if (fPendingIntr) 2168 hmR0SvmSetMsrPermission(p Ctx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);2144 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE); 2169 2145 else 2170 2146 { 2171 hmR0SvmSetMsrPermission(p Ctx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);2147 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 2172 2148 pVCpu->hm.s.svm.fSyncVTpr = true; 2173 2149 } … … 2226 2202 * @param pVCpu The cross context virtual CPU structure. 2227 2203 * @param pVmcbNstGst Pointer to the nested-guest VM control block. 2228 * @param pCtx Pointer to the nested-guest-CPU context. 2229 */ 2230 static void hmR0SvmMergeVmcbCtrlsNested(PVMCPU pVCpu, PCCPUMCTX pCtx) 2204 */ 2205 static void hmR0SvmMergeVmcbCtrlsNested(PVMCPU pVCpu) 2231 2206 { 2232 2207 PVM pVM = pVCpu->CTX_SUFF(pVM); 2233 2208 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 2234 PSVMVMCB pVmcbNstGst = p Ctx->hwvirt.svm.CTX_SUFF(pVmcb);2209 PSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb); 2235 2210 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2236 2211 … … 2536 2511 * @param pHostCpu Pointer to the physical CPU HM info. struct. 2537 2512 * @param pVCpu The cross context virtual CPU structure. 2538 * @param pCtx Pointer to the nested-guest-CPU context.2539 2513 * 2540 2514 * @remarks No-long-jmp zone!!! 2541 2515 */ 2542 DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu , PCCPUMCTX pCtx)2516 DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu) 2543 2517 { 2544 2518 uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hm.s.svm.pvMsrBitmap; 2545 uint64_t const *pu64NstGstMsrpm = (uint64_t const *)p Ctx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);2519 uint64_t const *pu64NstGstMsrpm = (uint64_t const *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap); 2546 2520 uint64_t *pu64DstMsrpm = (uint64_t *)pHostCpu->n.svm.pvNstGstMsrpm; 2547 2521 … … 2558 2532 * 2559 2533 * @returns true if the VMCB was previously already cached, false otherwise. 2560 * @param p Ctx Pointer to the guest-CPU context.2534 * @param pVCpu The cross context virtual CPU structure. 2561 2535 * 2562 2536 * @sa HMSvmNstGstVmExitNotify. 2563 2537 */ 2564 static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu , PCCPUMCTX pCtx)2538 static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu) 2565 2539 { 2566 2540 /* … … 2575 2549 if (!fWasCached) 2576 2550 { 2577 PCSVMVMCB pVmcbNstGst = p Ctx->hwvirt.svm.CTX_SUFF(pVmcb);2551 PCSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb); 2578 2552 PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2579 2553 pVmcbNstGstCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx; … … 2608 2582 static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu) 2609 2583 { 2610 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2611 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2584 PSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb); 2612 2585 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2613 2586 … … 2615 2588 * First cache the nested-guest VMCB fields we may potentially modify. 2616 2589 */ 2617 bool const fVmcbCached = hmR0SvmCacheVmcbNested(pVCpu , pCtx);2590 bool const fVmcbCached = hmR0SvmCacheVmcbNested(pVCpu); 2618 2591 if (!fVmcbCached) 2619 2592 { … … 2663 2636 2664 2637 /* Merge the guest and nested-guest intercepts. */ 2665 hmR0SvmMergeVmcbCtrlsNested(pVCpu , pCtx);2638 hmR0SvmMergeVmcbCtrlsNested(pVCpu); 2666 2639 2667 2640 /* Update the VMCB clean bits. */ … … 2803 2776 2804 2777 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2805 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);2778 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 2806 2779 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest; 2807 2780 PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl; … … 3191 3164 * @param pVCpu The cross context virtual CPU structure. 3192 3165 * @param enmOperation The operation causing the ring-3 longjump. 3193 * @param pvUser The user argument (pointer to the possibly 3194 * out-of-date guest-CPU context). 3166 * @param pvUser The user argument, NULL (currently unused). 3195 3167 */ 3196 3168 static DECLCALLBACK(int) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser) … … 3255 3227 * @returns VBox status code. 3256 3228 * @param pVCpu The cross context virtual CPU structure. 3257 * @param pCtx Pointer to the guest-CPU context.3258 3229 * @param rcExit The reason for exiting to ring-3. Can be 3259 3230 * VINF_VMM_UNKNOWN_RING3_CALL. 3260 3231 */ 3261 static int hmR0SvmExitToRing3(PVMCPU pVCpu, PCPUMCTX pCtx,int rcExit)3232 static int hmR0SvmExitToRing3(PVMCPU pVCpu, int rcExit) 3262 3233 { 3263 3234 Assert(pVCpu); 3264 Assert(pCtx);3265 3235 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu); 3266 3236 … … 3289 3259 | CPUM_CHANGED_HIDDEN_SEL_REGS); 3290 3260 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging 3291 && CPUMIsGuestPagingEnabledEx( pCtx))3261 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)) 3292 3262 { 3293 3263 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); … … 3300 3270 if (rcExit != VINF_EM_RAW_INTERRUPT) 3301 3271 { 3302 Assert(!(p Ctx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));3272 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)); 3303 3273 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 3304 3274 } … … 3330 3300 * 3331 3301 * @param pVCpu The cross context virtual CPU structure. 3332 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context.3333 3302 * @param pVmcb Pointer to the VM control block. 3334 3303 * 3335 3304 * @remarks No-long-jump zone!!! 3336 3305 */ 3337 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu, P CCPUMCTX pCtx, PSVMVMCB pVmcb)3306 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu, PSVMVMCB pVmcb) 3338 3307 { 3339 3308 /* … … 3348 3317 bool fIntercept; 3349 3318 if (fCanUseRealTsc) 3350 fIntercept = hmR0SvmClearCtrlIntercept(pVCpu, p Ctx, pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);3319 fIntercept = hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP); 3351 3320 else 3352 3321 { … … 3357 3326 if (!fIntercept) 3358 3327 { 3328 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 3359 3329 /* Apply the nested-guest VMCB's TSC offset over the guest TSC offset. */ 3360 if (CPUMIsGuestInSvmNestedHwVirtMode( pCtx))3330 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 3361 3331 uTscOffset = HMSvmNstGstApplyTscOffset(pVCpu, uTscOffset); 3332 #endif 3362 3333 3363 3334 /* Update the TSC offset in the VMCB and the relevant clean bits. */ … … 3438 3409 * 3439 3410 * @param pVCpu The cross context virtual CPU structure. 3440 * @param pCtx Pointer to the guest-CPU context.3441 3411 * @param u32ErrCode The error-code for the page-fault. 3442 3412 * @param uFaultAddress The page fault address (CR2). … … 3444 3414 * @remarks This updates the guest CR2 with @a uFaultAddress! 3445 3415 */ 3446 DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx,uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)3416 DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPU pVCpu, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress) 3447 3417 { 3448 3418 SVMEVENT Event; … … 3456 3426 /* Update CR2 of the guest. */ 3457 3427 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR2); 3458 if (p Ctx->cr2 != uFaultAddress)3459 { 3460 p Ctx->cr2 = uFaultAddress;3428 if (pVCpu->cpum.GstCtx.cr2 != uFaultAddress) 3429 { 3430 pVCpu->cpum.GstCtx.cr2 = uFaultAddress; 3461 3431 /* The VMCB clean bit for CR2 will be updated while re-loading the guest state. */ 3462 3432 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2); … … 3642 3612 * @returns @c true if the interrupt shadow is active, @c false otherwise. 3643 3613 * @param pVCpu The cross context virtual CPU structure. 3644 * @param pCtx Pointer to the guest-CPU context.3645 3614 * 3646 3615 * @remarks No-long-jump zone!!! 3647 3616 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag. 3648 3617 */ 3649 DECLINLINE(bool) hmR0SvmIsIntrShadowActive(PVMCPU pVCpu, PCCPUMCTX pCtx)3618 static bool hmR0SvmIsIntrShadowActive(PVMCPU pVCpu) 3650 3619 { 3651 3620 /* … … 3656 3625 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3657 3626 { 3658 if (p Ctx->rip != EMGetInhibitInterruptsPC(pVCpu))3627 if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) 3659 3628 { 3660 3629 /* … … 3677 3646 * @param pVCpu The cross context virtual CPU structure. 3678 3647 * @param pVmcb Pointer to the VM control block. 3679 * @param pCtx Pointer to the guest-CPU context. 3680 */ 3681 DECLINLINE(void) hmR0SvmSetIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 3648 */ 3649 static void hmR0SvmSetIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb) 3682 3650 { 3683 3651 /* … … 3704 3672 if (!fEnableIntWindow) 3705 3673 { 3706 Assert(CPUMIsGuestInSvmNestedHwVirtMode( pCtx)); RT_NOREF(pCtx);3674 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)); 3707 3675 Log4(("Nested-guest V_IRQ already pending\n")); 3708 3676 } 3709 3677 #else 3710 RT_NOREF2(pVCpu, pCtx);3711 3678 bool const fEnableIntWindow = true; 3712 3679 #endif … … 3729 3696 * @param pVCpu The cross context virtual CPU structure. 3730 3697 * @param pVmcb Pointer to the VM control block. 3731 * @param pCtx Pointer to the guest-CPU context. 3732 */ 3733 DECLINLINE(void) hmR0SvmClearIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 3698 */ 3699 static void hmR0SvmClearIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb) 3734 3700 { 3735 3701 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl; … … 3739 3705 pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0; 3740 3706 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL; 3741 hmR0SvmClearCtrlIntercept(pVCpu, p Ctx, pVmcb, SVM_CTRL_INTERCEPT_VINTR);3707 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_VINTR); 3742 3708 Log4(("Cleared VINTR intercept\n")); 3743 3709 } … … 3751 3717 * @returns VBox strict status code. 3752 3718 * @param pVCpu The cross context virtual CPU structure. 3753 * @param pCtx Pointer to the guest-CPU context.3754 */ 3755 static VBOXSTRICTRC hmR0SvmEvaluatePendingEventNested(PVMCPU pVCpu, PCPUMCTX pCtx) 3756 { 3719 */ 3720 static VBOXSTRICTRC hmR0SvmEvaluatePendingEventNested(PVMCPU pVCpu) 3721 { 3722 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 3757 3723 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 3758 3724 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT … … 3763 3729 Assert(!pVCpu->hm.s.Event.fPending); 3764 3730 Assert(pCtx->hwvirt.fGif); 3765 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);3731 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 3766 3732 Assert(pVmcb); 3767 3733 3768 3734 bool const fVirtualGif = CPUMGetSvmNstGstVGif(pCtx); 3769 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu , pCtx);3735 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu); 3770 3736 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3771 3737 … … 3806 3772 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI); 3807 3773 else 3808 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb , pCtx);3774 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb); 3809 3775 } 3810 3776 /* … … 3861 3827 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI); 3862 3828 else 3863 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb , pCtx);3829 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb); 3864 3830 } 3865 3831 … … 3873 3839 * 3874 3840 * @param pVCpu The cross context virtual CPU structure. 3875 * @param pCtx Pointer to the guest-CPU context.3876 */ 3877 static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx) 3878 { 3841 */ 3842 static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu) 3843 { 3844 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 3879 3845 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 3880 3846 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT … … 3883 3849 3884 3850 Assert(!pVCpu->hm.s.Event.fPending); 3885 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);3851 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 3886 3852 Assert(pVmcb); 3887 3853 … … 3891 3857 bool const fGif = true; 3892 3858 #endif 3893 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu , pCtx);3859 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu); 3894 3860 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 3895 3861 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); … … 3925 3891 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI); 3926 3892 else 3927 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb , pCtx);3893 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb); 3928 3894 } 3929 3895 /* … … 3965 3931 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI); 3966 3932 else 3967 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb , pCtx);3933 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb); 3968 3934 } 3969 3935 } … … 3974 3940 * 3975 3941 * @param pVCpu The cross context virtual CPU structure. 3976 * @param pCtx Pointer to the guest-CPU context.3977 3942 * @param pVmcb Pointer to the VM control block. 3978 3943 * … … 3981 3946 * prematurely. 3982 3947 */ 3983 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, P CCPUMCTX pCtx, PSVMVMCB pVmcb)3948 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PSVMVMCB pVmcb) 3984 3949 { 3985 3950 Assert(!TRPMHasTrap(pVCpu)); 3986 3951 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 3987 3952 3988 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu , pCtx);3953 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu); 3989 3954 #ifdef VBOX_STRICT 3955 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 3990 3956 bool const fGif = pCtx->hwvirt.fGif; 3991 3957 bool fAllowInt = fGif; … … 4080 4046 * VERR_SVM_INVALID_GUEST_STATE for invalid 4081 4047 * guest-state). 4082 * @param pCtx Pointer to the guest-CPU context. 4083 */ 4084 static void hmR0SvmReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx) 4048 */ 4049 static void hmR0SvmReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun) 4085 4050 { 4086 4051 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu); 4087 HMSVM_ASSERT_NOT_IN_NESTED_GUEST( pCtx);4052 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); 4088 4053 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 4089 4054 … … 4091 4056 { 4092 4057 #ifdef VBOX_STRICT 4093 hmR0DumpRegs(pVCpu, pCtx);4094 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);4058 hmR0DumpRegs(pVCpu, &pVCpu->cpum.GstCtx); 4059 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 4095 4060 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits)); 4096 4061 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx)); … … 4219 4184 else 4220 4185 Log4Func(("rcVMRun=%d\n", rcVMRun)); 4221 4222 NOREF(pCtx);4223 4186 } 4224 4187 … … 4241 4204 * 4242 4205 * @param pVCpu The cross context virtual CPU structure. 4243 * @param pCtx Pointer to the guest-CPU context. 4244 */ 4245 static int hmR0SvmCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pCtx) 4206 */ 4207 static int hmR0SvmCheckForceFlags(PVMCPU pVCpu) 4246 4208 { 4247 4209 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 4265 4227 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 4266 4228 { 4267 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 4229 int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, 4230 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 4268 4231 if (rc != VINF_SUCCESS) 4269 4232 { … … 4320 4283 * 4321 4284 * @param pVCpu The cross context virtual CPU structure. 4322 * @param pCtx Pointer to the nested-guest-CPU context.4323 4285 * @param pSvmTransient Pointer to the SVM transient structure. 4324 4286 * … … 4326 4288 * @sa hmR0SvmPreRunGuest. 4327 4289 */ 4328 static int hmR0SvmPreRunGuestNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4329 { 4290 static int hmR0SvmPreRunGuestNested(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 4291 { 4292 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4330 4293 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu); 4331 4294 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); … … 4337 4300 4338 4301 /* Check force flag actions that might require us to go back to ring-3. */ 4339 int rc = hmR0SvmCheckForceFlags(pVCpu , pCtx);4302 int rc = hmR0SvmCheckForceFlags(pVCpu); 4340 4303 if (rc != VINF_SUCCESS) 4341 4304 return rc; … … 4345 4308 else if (!pVCpu->hm.s.Event.fPending) 4346 4309 { 4347 VBOXSTRICTRC rcStrict = hmR0SvmEvaluatePendingEventNested(pVCpu , pCtx);4310 VBOXSTRICTRC rcStrict = hmR0SvmEvaluatePendingEventNested(pVCpu); 4348 4311 if ( rcStrict != VINF_SUCCESS 4349 4312 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) … … 4442 4405 * 4443 4406 * @param pVCpu The cross context virtual CPU structure. 4444 * @param pCtx Pointer to the guest-CPU context.4445 4407 * @param pSvmTransient Pointer to the SVM transient structure. 4446 4408 */ 4447 static int hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4448 { 4409 static int hmR0SvmPreRunGuest(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 4410 { 4411 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4449 4412 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu); 4450 4413 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 4451 4414 4452 4415 /* Check force flag actions that might require us to go back to ring-3. */ 4453 int rc = hmR0SvmCheckForceFlags(pVCpu , pCtx);4416 int rc = hmR0SvmCheckForceFlags(pVCpu); 4454 4417 if (rc != VINF_SUCCESS) 4455 4418 return rc; … … 4458 4421 hmR0SvmTrpmTrapToPendingEvent(pVCpu); 4459 4422 else if (!pVCpu->hm.s.Event.fPending) 4460 hmR0SvmEvaluatePendingEvent(pVCpu , pCtx);4423 hmR0SvmEvaluatePendingEvent(pVCpu); 4461 4424 4462 4425 /* … … 4546 4509 * 4547 4510 * @param pVCpu The cross context virtual CPU structure. 4548 * @param pCtx Pointer to the guest-CPU context.4549 4511 * @param pSvmTransient Pointer to the SVM transient structure. 4550 4512 * … … 4552 4514 * @remarks No-long-jump zone!!! 4553 4515 */ 4554 static void hmR0SvmPreRunGuestCommitted(PVMCPU pVCpu, P CCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)4516 static void hmR0SvmPreRunGuestCommitted(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 4555 4517 { 4556 4518 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 4564 4526 PSVMVMCB pVmcb = pSvmTransient->pVmcb; 4565 4527 4566 hmR0SvmInjectPendingEvent(pVCpu, p Ctx, pVmcb);4528 hmR0SvmInjectPendingEvent(pVCpu, pVmcb); 4567 4529 4568 4530 if (!CPUMIsGuestFPUStateActive(pVCpu)) … … 4589 4551 || fMigratedHostCpu) 4590 4552 { 4591 hmR0SvmUpdateTscOffsetting(pVCpu, p Ctx, pVmcb);4553 hmR0SvmUpdateTscOffsetting(pVCpu, pVmcb); 4592 4554 pSvmTransient->fUpdateTscOffsetting = false; 4593 4555 } … … 4599 4561 /* Store status of the shared guest-host state at the time of VMRUN. */ 4600 4562 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 4601 if (CPUMIsGuestInLongModeEx( pCtx))4563 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)) 4602 4564 { 4603 4565 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu); … … 4617 4579 else 4618 4580 { 4619 hmR0SvmMergeMsrpmNested(pHostCpu, pVCpu , pCtx);4581 hmR0SvmMergeMsrpmNested(pHostCpu, pVCpu); 4620 4582 4621 4583 /* Update the nested-guest VMCB with the newly merged MSRPM (clean bits updated below). */ … … 4629 4591 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */ 4630 4592 /* Flush the appropriate tagged-TLB entries. */ 4631 hmR0SvmFlushTaggedTlb(pVCpu, p Ctx, pVmcb, pHostCpu);4593 hmR0SvmFlushTaggedTlb(pVCpu, pVmcb, pHostCpu); 4632 4594 Assert(pVCpu->hm.s.idLastCpu == idHostCpu); 4633 4595 … … 4650 4612 if (uGuestTscAux != pVCpu->hm.s.u64HostTscAux) 4651 4613 ASMWrMsr(MSR_K8_TSC_AUX, uGuestTscAux); 4652 hmR0SvmSetMsrPermission(p Ctx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);4614 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 4653 4615 pSvmTransient->fRestoreTscAuxMsr = true; 4654 4616 } 4655 4617 else 4656 4618 { 4657 hmR0SvmSetMsrPermission(p Ctx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);4619 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE); 4658 4620 pSvmTransient->fRestoreTscAuxMsr = false; 4659 4621 } … … 4665 4627 * from the VMCB. 4666 4628 */ 4667 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu , pCtx);4629 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu); 4668 4630 if (!fSupportsVmcbCleanBits) 4669 4631 pVmcb->ctrl.u32VmcbCleanBits = 0; … … 4676 4638 * @returns VBox strict status code. 4677 4639 * @param pVCpu The cross context virtual CPU structure. 4678 * @param pCtx Pointer to the guest-CPU context.4679 4640 * @param HCPhysVmcb The host physical address of the VMCB. 4680 4641 * 4681 4642 * @remarks No-long-jump zone!!! 4682 4643 */ 4683 DECLINLINE(int) hmR0SvmRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx,RTHCPHYS HCPhysVmcb)4644 DECLINLINE(int) hmR0SvmRunGuest(PVMCPU pVCpu, RTHCPHYS HCPhysVmcb) 4684 4645 { 4685 4646 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4647 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4686 4648 pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 4687 4649 … … 4728 4690 * 4729 4691 * @param pVCpu The cross context virtual CPU structure. 4730 * @param pCtx Pointer to the guest-CPU context. The data maybe4731 * out-of-sync. Make sure to update the required fields4732 * before using them.4733 4692 * @param pSvmTransient Pointer to the SVM transient structure. 4734 4693 * @param rcVMRun Return code of VMRUN. … … 4738 4697 * unconditionally when it is safe to do so. 4739 4698 */ 4740 static void hmR0SvmPostRunGuest(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)4699 static void hmR0SvmPostRunGuest(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient, int rcVMRun) 4741 4700 { 4742 4701 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 4794 4753 #ifdef HMSVM_SYNC_FULL_GUEST_STATE 4795 4754 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 4796 Assert(!(p Ctx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));4755 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)); 4797 4756 #else 4798 4757 /* … … 4841 4800 4842 4801 #ifdef DEBUG_ramshankar 4843 if (CPUMIsGuestInSvmNestedHwVirtMode( pCtx))4802 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 4844 4803 { 4845 4804 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 4846 hmR0SvmLogState(pVCpu, pVmcb, pCtx, "hmR0SvmPostRunGuestNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 0 /* uVerbose */); 4805 hmR0SvmLogState(pVCpu, pVmcb, pVCpu->cpum.GstCtx, "hmR0SvmPostRunGuestNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 4806 0 /* uVerbose */); 4847 4807 } 4848 4808 #endif … … 4850 4810 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 4851 4811 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_SVM, pSvmTransient->u64ExitCode & EMEXIT_F_TYPE_MASK), 4852 p Ctx->cs.u64Base + pCtx->rip, uHostTsc);4812 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc); 4853 4813 } 4854 4814 … … 4859 4819 * @returns VBox status code. 4860 4820 * @param pVCpu The cross context virtual CPU structure. 4861 * @param pCtx Pointer to the guest-CPU context.4862 4821 * @param pcLoops Pointer to the number of executed loops. 4863 4822 */ 4864 static int hmR0SvmRunGuestCodeNormal(PVMCPU pVCpu, PCPUMCTX pCtx,uint32_t *pcLoops)4823 static int hmR0SvmRunGuestCodeNormal(PVMCPU pVCpu, uint32_t *pcLoops) 4865 4824 { 4866 4825 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops; … … 4882 4841 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 4883 4842 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 4884 rc = hmR0SvmPreRunGuest(pVCpu, pCtx,&SvmTransient);4843 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient); 4885 4844 if (rc != VINF_SUCCESS) 4886 4845 break; … … 4892 4851 * better than a kernel panic. This also disables flushing of the R0-logger instance. 4893 4852 */ 4894 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx,&SvmTransient);4895 rc = hmR0SvmRunGuest(pVCpu, p Ctx, pVCpu->hm.s.svm.HCPhysVmcb);4853 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient); 4854 rc = hmR0SvmRunGuest(pVCpu, pVCpu->hm.s.svm.HCPhysVmcb); 4896 4855 4897 4856 /* Restore any residual host-state and save any bits shared between host and guest 4898 4857 into the guest-CPU state. Re-enables interrupts! */ 4899 hmR0SvmPostRunGuest(pVCpu, pCtx,&SvmTransient, rc);4858 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc); 4900 4859 4901 4860 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */ … … 4905 4864 rc = VERR_SVM_INVALID_GUEST_STATE; 4906 4865 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 4907 hmR0SvmReportWorldSwitchError(pVCpu, rc , pCtx);4866 hmR0SvmReportWorldSwitchError(pVCpu, rc); 4908 4867 break; 4909 4868 } … … 4912 4871 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 4913 4872 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 4914 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);4915 rc = hmR0SvmHandleExit(pVCpu, pCtx,&SvmTransient);4873 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb); 4874 rc = hmR0SvmHandleExit(pVCpu, &SvmTransient); 4916 4875 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 4917 4876 if (rc != VINF_SUCCESS) … … 4935 4894 * @returns VBox status code. 4936 4895 * @param pVCpu The cross context virtual CPU structure. 4937 * @param pCtx Pointer to the guest-CPU context.4938 4896 * @param pcLoops Pointer to the number of executed loops. 4939 4897 */ 4940 static int hmR0SvmRunGuestCodeStep(PVMCPU pVCpu, PCPUMCTX pCtx,uint32_t *pcLoops)4898 static int hmR0SvmRunGuestCodeStep(PVMCPU pVCpu, uint32_t *pcLoops) 4941 4899 { 4942 4900 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops; … … 4949 4907 SvmTransient.pVmcb = pVCpu->hm.s.svm.pVmcb; 4950 4908 4909 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4951 4910 uint16_t uCsStart = pCtx->cs.Sel; 4952 4911 uint64_t uRipStart = pCtx->rip; … … 4963 4922 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 4964 4923 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 4965 rc = hmR0SvmPreRunGuest(pVCpu, pCtx,&SvmTransient);4924 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient); 4966 4925 if (rc != VINF_SUCCESS) 4967 4926 break; … … 4975 4934 VMMRZCallRing3Disable(pVCpu); 4976 4935 VMMRZCallRing3RemoveNotification(pVCpu); 4977 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx,&SvmTransient);4978 4979 rc = hmR0SvmRunGuest(pVCpu, p Ctx, pVCpu->hm.s.svm.HCPhysVmcb);4936 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient); 4937 4938 rc = hmR0SvmRunGuest(pVCpu, pVCpu->hm.s.svm.HCPhysVmcb); 4980 4939 4981 4940 /* Restore any residual host-state and save any bits shared between host and guest 4982 4941 into the guest-CPU state. Re-enables interrupts! */ 4983 hmR0SvmPostRunGuest(pVCpu, pCtx,&SvmTransient, rc);4942 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc); 4984 4943 4985 4944 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */ … … 4989 4948 rc = VERR_SVM_INVALID_GUEST_STATE; 4990 4949 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 4991 hmR0SvmReportWorldSwitchError(pVCpu, rc , pCtx);4950 hmR0SvmReportWorldSwitchError(pVCpu, rc); 4992 4951 return rc; 4993 4952 } … … 4997 4956 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 4998 4957 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb); 4999 rc = hmR0SvmHandleExit(pVCpu, pCtx,&SvmTransient);4958 rc = hmR0SvmHandleExit(pVCpu, &SvmTransient); 5000 4959 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 5001 4960 if (rc != VINF_SUCCESS) … … 5040 4999 * @returns VBox status code. 5041 5000 * @param pVCpu The cross context virtual CPU structure. 5042 * @param pCtx Pointer to the guest-CPU context.5043 5001 * @param pcLoops Pointer to the number of executed loops. If we're switching 5044 5002 * from the guest-code execution loop to this nested-guest 5045 5003 * execution loop pass the remainder value, else pass 0. 5046 5004 */ 5047 static int hmR0SvmRunGuestCodeNested(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops) 5048 { 5005 static int hmR0SvmRunGuestCodeNested(PVMCPU pVCpu, uint32_t *pcLoops) 5006 { 5007 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 5049 5008 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 5050 5009 Assert(pcLoops); … … 5066 5025 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 5067 5026 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 5068 rc = hmR0SvmPreRunGuestNested(pVCpu, pCtx,&SvmTransient);5027 rc = hmR0SvmPreRunGuestNested(pVCpu, &SvmTransient); 5069 5028 if ( rc != VINF_SUCCESS 5070 5029 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) … … 5079 5038 * better than a kernel panic. This also disables flushing of the R0-logger instance. 5080 5039 */ 5081 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx,&SvmTransient);5082 5083 rc = hmR0SvmRunGuest(pVCpu, pCtx , pCtx->hwvirt.svm.HCPhysVmcb);5040 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient); 5041 5042 rc = hmR0SvmRunGuest(pVCpu, pCtx->hwvirt.svm.HCPhysVmcb); 5084 5043 5085 5044 /* Restore any residual host-state and save any bits shared between host and guest 5086 5045 into the guest-CPU state. Re-enables interrupts! */ 5087 hmR0SvmPostRunGuest(pVCpu, pCtx,&SvmTransient, rc);5046 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc); 5088 5047 5089 5048 if (RT_LIKELY( rc == VINF_SUCCESS … … 5107 5066 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 5108 5067 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pCtx->hwvirt.svm.CTX_SUFF(pVmcb)); 5109 rc = hmR0SvmHandleExitNested(pVCpu, pCtx,&SvmTransient);5068 rc = hmR0SvmHandleExitNested(pVCpu, &SvmTransient); 5110 5069 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 5111 5070 if ( rc != VINF_SUCCESS … … 5133 5092 * @returns Strict VBox status code. 5134 5093 * @param pVCpu The cross context virtual CPU structure. 5135 * @param pCtx Pointer to the guest-CPU context. 5136 */ 5137 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx) 5094 */ 5095 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu) 5138 5096 { 5139 5097 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 5140 5098 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu); 5141 VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pCtx);5099 VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, NULL /* pvUser */); 5142 5100 5143 5101 uint32_t cLoops = 0; 5144 5102 int rc; 5145 5103 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 5146 if (!CPUMIsGuestInSvmNestedHwVirtMode( pCtx))5104 if (!CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)) 5147 5105 #endif 5148 5106 { 5149 5107 if (!pVCpu->hm.s.fSingleInstruction) 5150 rc = hmR0SvmRunGuestCodeNormal(pVCpu, pCtx,&cLoops);5108 rc = hmR0SvmRunGuestCodeNormal(pVCpu, &cLoops); 5151 5109 else 5152 rc = hmR0SvmRunGuestCodeStep(pVCpu, pCtx,&cLoops);5110 rc = hmR0SvmRunGuestCodeStep(pVCpu, &cLoops); 5153 5111 } 5154 5112 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM … … 5162 5120 if (rc == VINF_SVM_VMRUN) 5163 5121 { 5164 rc = hmR0SvmRunGuestCodeNested(pVCpu, pCtx,&cLoops);5122 rc = hmR0SvmRunGuestCodeNested(pVCpu, &cLoops); 5165 5123 if (rc == VINF_SVM_VMEXIT) 5166 5124 rc = VINF_SUCCESS; … … 5175 5133 5176 5134 /* Prepare to return to ring-3. This will remove longjmp notifications. */ 5177 rc = hmR0SvmExitToRing3(pVCpu, pCtx,rc);5135 rc = hmR0SvmExitToRing3(pVCpu, rc); 5178 5136 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu)); 5179 5137 return rc; … … 5209 5167 * @returns VBox status code (informational status codes included). 5210 5168 * @param pVCpu The cross context virtual CPU structure. 5211 * @param pCtx Pointer to the guest-CPU context.5212 5169 * @param pSvmTransient Pointer to the SVM transient structure. 5213 5170 */ 5214 static int hmR0SvmHandleExitNested(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)5215 { 5216 HMSVM_ASSERT_IN_NESTED_GUEST( pCtx);5171 static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 5172 { 5173 HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); 5217 5174 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID); 5218 5175 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX); … … 5221 5178 * HMSVM_CPUMCTX_EXTRN_ALL breaks nested guests (XP Pro, DSL etc.), see 5222 5179 * also HMSvmNstGstVmExitNotify(). */ 5223 #define NST_GST_VMEXIT_CALL_RET(a_pVCpu, a_ pCtx, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \5180 #define NST_GST_VMEXIT_CALL_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 5224 5181 do { \ 5225 5182 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \ … … 5231 5188 * nested-guest. If it isn't, it should be handled by the (outer) guest. 5232 5189 */ 5233 PSVMVMCB pVmcbNstGst = p Ctx->hwvirt.svm.CTX_SUFF(pVmcb);5190 PSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb); 5234 5191 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 5235 5192 uint64_t const uExitCode = pVmcbNstGstCtrl->u64ExitCode; … … 5243 5200 { 5244 5201 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CPUID)) 5245 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5246 return hmR0SvmExitCpuid(pVCpu, p Ctx, pSvmTransient);5202 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5203 return hmR0SvmExitCpuid(pVCpu, pSvmTransient); 5247 5204 } 5248 5205 … … 5250 5207 { 5251 5208 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSC)) 5252 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5253 return hmR0SvmExitRdtsc(pVCpu, p Ctx, pSvmTransient);5209 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5210 return hmR0SvmExitRdtsc(pVCpu, pSvmTransient); 5254 5211 } 5255 5212 … … 5257 5214 { 5258 5215 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP)) 5259 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5260 return hmR0SvmExitRdtscp(pVCpu, p Ctx, pSvmTransient);5216 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5217 return hmR0SvmExitRdtscp(pVCpu, pSvmTransient); 5261 5218 } 5262 5219 … … 5264 5221 { 5265 5222 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MONITOR)) 5266 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5267 return hmR0SvmExitMonitor(pVCpu, p Ctx, pSvmTransient);5223 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5224 return hmR0SvmExitMonitor(pVCpu, pSvmTransient); 5268 5225 } 5269 5226 … … 5271 5228 { 5272 5229 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MWAIT)) 5273 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5274 return hmR0SvmExitMwait(pVCpu, p Ctx, pSvmTransient);5230 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5231 return hmR0SvmExitMwait(pVCpu, pSvmTransient); 5275 5232 } 5276 5233 … … 5278 5235 { 5279 5236 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_HLT)) 5280 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5281 return hmR0SvmExitHlt(pVCpu, p Ctx, pSvmTransient);5237 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5238 return hmR0SvmExitHlt(pVCpu, pSvmTransient); 5282 5239 } 5283 5240 … … 5286 5243 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT)) 5287 5244 { 5288 uint32_t const idMsr = p Ctx->ecx;5245 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx; 5289 5246 uint16_t offMsrpm; 5290 5247 uint8_t uMsrpmBit; … … 5295 5252 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT); 5296 5253 5297 uint8_t const *pbMsrBitmap = (uint8_t const *)p Ctx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);5254 uint8_t const *pbMsrBitmap = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap); 5298 5255 pbMsrBitmap += offMsrpm; 5299 5256 bool const fInterceptRead = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit)); … … 5303 5260 || (fInterceptRead && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ)) 5304 5261 { 5305 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5262 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5306 5263 } 5307 5264 } … … 5313 5270 */ 5314 5271 Assert(rc == VERR_OUT_OF_RANGE); 5315 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5272 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5316 5273 } 5317 5274 } 5318 return hmR0SvmExitMsr(pVCpu, p Ctx, pSvmTransient);5275 return hmR0SvmExitMsr(pVCpu, pSvmTransient); 5319 5276 } 5320 5277 … … 5323 5280 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 5324 5281 { 5325 void *pvIoBitmap = p Ctx->hwvirt.svm.CTX_SUFF(pvIoBitmap);5282 void *pvIoBitmap = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap); 5326 5283 SVMIOIOEXITINFO IoExitInfo; 5327 5284 IoExitInfo.u = pVmcbNstGst->ctrl.u64ExitInfo1; 5328 5285 bool const fIntercept = hmR0SvmIsIoInterceptActive(pvIoBitmap, &IoExitInfo); 5329 5286 if (fIntercept) 5330 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5287 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5331 5288 } 5332 return hmR0SvmExitIOInstr(pVCpu, p Ctx, pSvmTransient);5289 return hmR0SvmExitIOInstr(pVCpu, pSvmTransient); 5333 5290 } 5334 5291 … … 5343 5300 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */ 5344 5301 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF)) 5345 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, u32ErrCode, uFaultAddress);5302 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, u32ErrCode, uFaultAddress); 5346 5303 5347 5304 /* If the nested-guest is not intercepting #PFs, forward the #PF to the guest. */ 5348 5305 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2); 5349 hmR0SvmSetPendingXcptPF(pVCpu, pCtx,u32ErrCode, uFaultAddress);5306 hmR0SvmSetPendingXcptPF(pVCpu, u32ErrCode, uFaultAddress); 5350 5307 return VINF_SUCCESS; 5351 5308 } 5352 return hmR0SvmExitXcptPF(pVCpu, p Ctx,pSvmTransient);5309 return hmR0SvmExitXcptPF(pVCpu, pSvmTransient); 5353 5310 } 5354 5311 … … 5356 5313 { 5357 5314 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_UD)) 5358 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5315 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5359 5316 hmR0SvmSetPendingXcptUD(pVCpu); 5360 5317 return VINF_SUCCESS; … … 5364 5321 { 5365 5322 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_MF)) 5366 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5367 return hmR0SvmExitXcptMF(pVCpu, p Ctx, pSvmTransient);5323 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5324 return hmR0SvmExitXcptMF(pVCpu, pSvmTransient); 5368 5325 } 5369 5326 … … 5371 5328 { 5372 5329 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_DB)) 5373 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5374 return hmR0SvmNestedExitXcptDB(pVCpu, p Ctx, pSvmTransient);5330 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5331 return hmR0SvmNestedExitXcptDB(pVCpu, pSvmTransient); 5375 5332 } 5376 5333 … … 5378 5335 { 5379 5336 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_AC)) 5380 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5381 return hmR0SvmExitXcptAC(pVCpu, p Ctx, pSvmTransient);5337 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5338 return hmR0SvmExitXcptAC(pVCpu, pSvmTransient); 5382 5339 } 5383 5340 … … 5385 5342 { 5386 5343 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_BP)) 5387 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5388 return hmR0SvmNestedExitXcptBP(pVCpu, p Ctx, pSvmTransient);5344 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5345 return hmR0SvmNestedExitXcptBP(pVCpu, pSvmTransient); 5389 5346 } 5390 5347 … … 5395 5352 uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0; 5396 5353 if (HMIsGuestSvmReadCRxInterceptSet(pVCpu, uCr)) 5397 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5398 return hmR0SvmExitReadCRx(pVCpu, p Ctx, pSvmTransient);5354 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5355 return hmR0SvmExitReadCRx(pVCpu, pSvmTransient); 5399 5356 } 5400 5357 … … 5402 5359 { 5403 5360 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE)) 5404 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5405 return hmR0SvmExitWriteCRx(pVCpu, p Ctx, pSvmTransient);5361 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5362 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient); 5406 5363 } 5407 5364 … … 5415 5372 5416 5373 if (HMIsGuestSvmWriteCRxInterceptSet(pVCpu, uCr)) 5417 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5418 return hmR0SvmExitWriteCRx(pVCpu, p Ctx, pSvmTransient);5374 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5375 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient); 5419 5376 } 5420 5377 … … 5422 5379 { 5423 5380 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE)) 5424 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5425 return hmR0SvmExitPause(pVCpu, p Ctx, pSvmTransient);5381 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5382 return hmR0SvmExitPause(pVCpu, pSvmTransient); 5426 5383 } 5427 5384 … … 5429 5386 { 5430 5387 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VINTR)) 5431 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5432 return hmR0SvmExitUnexpected(pVCpu, p Ctx, pSvmTransient);5388 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5389 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient); 5433 5390 } 5434 5391 … … 5446 5403 */ 5447 5404 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); 5448 return hmR0SvmExitIntr(pVCpu, p Ctx, pSvmTransient);5405 return hmR0SvmExitIntr(pVCpu, pSvmTransient); 5449 5406 } 5450 5407 … … 5452 5409 { 5453 5410 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_FERR_FREEZE)) 5454 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5455 return hmR0SvmExitFerrFreeze(pVCpu, p Ctx, pSvmTransient);5411 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5412 return hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient); 5456 5413 } 5457 5414 … … 5459 5416 { 5460 5417 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPG)) 5461 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5462 return hmR0SvmExitInvlpg(pVCpu, p Ctx, pSvmTransient);5418 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5419 return hmR0SvmExitInvlpg(pVCpu, pSvmTransient); 5463 5420 } 5464 5421 … … 5466 5423 { 5467 5424 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_WBINVD)) 5468 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5469 return hmR0SvmExitWbinvd(pVCpu, p Ctx, pSvmTransient);5425 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5426 return hmR0SvmExitWbinvd(pVCpu, pSvmTransient); 5470 5427 } 5471 5428 … … 5473 5430 { 5474 5431 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVD)) 5475 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5476 return hmR0SvmExitInvd(pVCpu, p Ctx, pSvmTransient);5432 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5433 return hmR0SvmExitInvd(pVCpu, pSvmTransient); 5477 5434 } 5478 5435 … … 5480 5437 { 5481 5438 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDPMC)) 5482 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5483 return hmR0SvmExitRdpmc(pVCpu, p Ctx, pSvmTransient);5439 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5440 return hmR0SvmExitRdpmc(pVCpu, pSvmTransient); 5484 5441 } 5485 5442 … … 5495 5452 uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0; 5496 5453 if (HMIsGuestSvmReadDRxInterceptSet(pVCpu, uDr)) 5497 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5498 return hmR0SvmExitReadDRx(pVCpu, p Ctx, pSvmTransient);5454 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5455 return hmR0SvmExitReadDRx(pVCpu, pSvmTransient); 5499 5456 } 5500 5457 … … 5506 5463 uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0; 5507 5464 if (HMIsGuestSvmWriteDRxInterceptSet(pVCpu, uDr)) 5508 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5509 return hmR0SvmExitWriteDRx(pVCpu, p Ctx, pSvmTransient);5465 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5466 return hmR0SvmExitWriteDRx(pVCpu, pSvmTransient); 5510 5467 } 5511 5468 … … 5536 5493 uint8_t const uVector = uExitCode - SVM_EXIT_XCPT_0; 5537 5494 if (HMIsGuestSvmXcptInterceptSet(pVCpu, uVector)) 5538 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5539 return hmR0SvmExitXcptGeneric(pVCpu, p Ctx, pSvmTransient);5495 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5496 return hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient); 5540 5497 } 5541 5498 … … 5543 5500 { 5544 5501 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_XSETBV)) 5545 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5546 return hmR0SvmExitXsetbv(pVCpu, p Ctx, pSvmTransient);5502 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5503 return hmR0SvmExitXsetbv(pVCpu, pSvmTransient); 5547 5504 } 5548 5505 … … 5550 5507 { 5551 5508 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH)) 5552 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5553 return hmR0SvmExitTaskSwitch(pVCpu, p Ctx, pSvmTransient);5509 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5510 return hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient); 5554 5511 } 5555 5512 … … 5557 5514 { 5558 5515 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IRET)) 5559 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5560 return hmR0SvmExitIret(pVCpu, p Ctx, pSvmTransient);5516 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5517 return hmR0SvmExitIret(pVCpu, pSvmTransient); 5561 5518 } 5562 5519 … … 5564 5521 { 5565 5522 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN)) 5566 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5567 return hmR0SvmExitShutdown(pVCpu, p Ctx, pSvmTransient);5523 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5524 return hmR0SvmExitShutdown(pVCpu, pSvmTransient); 5568 5525 } 5569 5526 … … 5571 5528 { 5572 5529 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL)) 5573 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5574 return hmR0SvmExitVmmCall(pVCpu, p Ctx, pSvmTransient);5530 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5531 return hmR0SvmExitVmmCall(pVCpu, pSvmTransient); 5575 5532 } 5576 5533 … … 5578 5535 { 5579 5536 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CLGI)) 5580 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5581 return hmR0SvmExitClgi(pVCpu, p Ctx, pSvmTransient);5537 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5538 return hmR0SvmExitClgi(pVCpu, pSvmTransient); 5582 5539 } 5583 5540 … … 5585 5542 { 5586 5543 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_STGI)) 5587 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5588 return hmR0SvmExitStgi(pVCpu, p Ctx, pSvmTransient);5544 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5545 return hmR0SvmExitStgi(pVCpu, pSvmTransient); 5589 5546 } 5590 5547 … … 5592 5549 { 5593 5550 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD)) 5594 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5595 return hmR0SvmExitVmload(pVCpu, p Ctx, pSvmTransient);5551 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5552 return hmR0SvmExitVmload(pVCpu, pSvmTransient); 5596 5553 } 5597 5554 … … 5599 5556 { 5600 5557 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE)) 5601 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5602 return hmR0SvmExitVmsave(pVCpu, p Ctx, pSvmTransient);5558 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5559 return hmR0SvmExitVmsave(pVCpu, pSvmTransient); 5603 5560 } 5604 5561 … … 5606 5563 { 5607 5564 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA)) 5608 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5609 return hmR0SvmExitInvlpga(pVCpu, p Ctx, pSvmTransient);5565 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5566 return hmR0SvmExitInvlpga(pVCpu, pSvmTransient); 5610 5567 } 5611 5568 … … 5613 5570 { 5614 5571 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMRUN)) 5615 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5616 return hmR0SvmExitVmrun(pVCpu, p Ctx, pSvmTransient);5572 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5573 return hmR0SvmExitVmrun(pVCpu, pSvmTransient); 5617 5574 } 5618 5575 … … 5620 5577 { 5621 5578 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RSM)) 5622 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5579 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5623 5580 hmR0SvmSetPendingXcptUD(pVCpu); 5624 5581 return VINF_SUCCESS; … … 5628 5585 { 5629 5586 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SKINIT)) 5630 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx,uExitCode, uExitInfo1, uExitInfo2);5587 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 5631 5588 hmR0SvmSetPendingXcptUD(pVCpu); 5632 5589 return VINF_SUCCESS; … … 5636 5593 { 5637 5594 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); 5638 return hmR0SvmExitNestedPF(pVCpu, p Ctx, pSvmTransient);5595 return hmR0SvmExitNestedPF(pVCpu, pSvmTransient); 5639 5596 } 5640 5597 5641 5598 case SVM_EXIT_INIT: /* We shouldn't get INIT signals while executing a nested-guest. */ 5642 return hmR0SvmExitUnexpected(pVCpu, p Ctx, pSvmTransient);5599 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient); 5643 5600 5644 5601 default: … … 5663 5620 * @returns VBox status code (informational status codes included). 5664 5621 * @param pVCpu The cross context virtual CPU structure. 5665 * @param pCtx Pointer to the guest-CPU context.5666 5622 * @param pSvmTransient Pointer to the SVM transient structure. 5667 5623 */ 5668 static int hmR0SvmHandleExit(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)5624 static int hmR0SvmHandleExit(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 5669 5625 { 5670 5626 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID); … … 5692 5648 switch (uExitCode) 5693 5649 { 5694 case SVM_EXIT_NPF: VMEXIT_CALL_RET(0, hmR0SvmExitNestedPF(pVCpu, p Ctx, pSvmTransient));5695 case SVM_EXIT_IOIO: VMEXIT_CALL_RET(0, hmR0SvmExitIOInstr(pVCpu, p Ctx, pSvmTransient));5696 case SVM_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0SvmExitRdtsc(pVCpu, p Ctx, pSvmTransient));5697 case SVM_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0SvmExitRdtscp(pVCpu, p Ctx, pSvmTransient));5698 case SVM_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0SvmExitCpuid(pVCpu, p Ctx, pSvmTransient));5699 case SVM_EXIT_XCPT_PF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptPF(pVCpu, p Ctx, pSvmTransient));5700 case SVM_EXIT_MSR: VMEXIT_CALL_RET(0, hmR0SvmExitMsr(pVCpu, p Ctx, pSvmTransient));5701 case SVM_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0SvmExitMonitor(pVCpu, p Ctx, pSvmTransient));5702 case SVM_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0SvmExitMwait(pVCpu, p Ctx, pSvmTransient));5703 case SVM_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0SvmExitHlt(pVCpu, p Ctx, pSvmTransient));5650 case SVM_EXIT_NPF: VMEXIT_CALL_RET(0, hmR0SvmExitNestedPF(pVCpu, pSvmTransient)); 5651 case SVM_EXIT_IOIO: VMEXIT_CALL_RET(0, hmR0SvmExitIOInstr(pVCpu, pSvmTransient)); 5652 case SVM_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0SvmExitRdtsc(pVCpu, pSvmTransient)); 5653 case SVM_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0SvmExitRdtscp(pVCpu, pSvmTransient)); 5654 case SVM_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0SvmExitCpuid(pVCpu, pSvmTransient)); 5655 case SVM_EXIT_XCPT_PF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptPF(pVCpu, pSvmTransient)); 5656 case SVM_EXIT_MSR: VMEXIT_CALL_RET(0, hmR0SvmExitMsr(pVCpu, pSvmTransient)); 5657 case SVM_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0SvmExitMonitor(pVCpu, pSvmTransient)); 5658 case SVM_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0SvmExitMwait(pVCpu, pSvmTransient)); 5659 case SVM_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0SvmExitHlt(pVCpu, pSvmTransient)); 5704 5660 5705 5661 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */ 5706 5662 case SVM_EXIT_INTR: 5707 case SVM_EXIT_NMI: VMEXIT_CALL_RET(0, hmR0SvmExitIntr(pVCpu, p Ctx, pSvmTransient));5663 case SVM_EXIT_NMI: VMEXIT_CALL_RET(0, hmR0SvmExitIntr(pVCpu, pSvmTransient)); 5708 5664 5709 5665 case SVM_EXIT_READ_CR0: 5710 5666 case SVM_EXIT_READ_CR3: 5711 case SVM_EXIT_READ_CR4: VMEXIT_CALL_RET(0, hmR0SvmExitReadCRx(pVCpu, p Ctx, pSvmTransient));5667 case SVM_EXIT_READ_CR4: VMEXIT_CALL_RET(0, hmR0SvmExitReadCRx(pVCpu, pSvmTransient)); 5712 5668 5713 5669 case SVM_EXIT_CR0_SEL_WRITE: … … 5715 5671 case SVM_EXIT_WRITE_CR3: 5716 5672 case SVM_EXIT_WRITE_CR4: 5717 case SVM_EXIT_WRITE_CR8: VMEXIT_CALL_RET(0, hmR0SvmExitWriteCRx(pVCpu, p Ctx, pSvmTransient));5718 5719 case SVM_EXIT_VINTR: VMEXIT_CALL_RET(0, hmR0SvmExitVIntr(pVCpu, p Ctx, pSvmTransient));5720 case SVM_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0SvmExitPause(pVCpu, p Ctx, pSvmTransient));5721 case SVM_EXIT_VMMCALL: VMEXIT_CALL_RET(0, hmR0SvmExitVmmCall(pVCpu, p Ctx, pSvmTransient));5722 case SVM_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpg(pVCpu, p Ctx, pSvmTransient));5723 case SVM_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0SvmExitWbinvd(pVCpu, p Ctx, pSvmTransient));5724 case SVM_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0SvmExitInvd(pVCpu, p Ctx, pSvmTransient));5725 case SVM_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0SvmExitRdpmc(pVCpu, p Ctx, pSvmTransient));5726 case SVM_EXIT_IRET: VMEXIT_CALL_RET(0, hmR0SvmExitIret(pVCpu, p Ctx, pSvmTransient));5727 case SVM_EXIT_XCPT_UD: VMEXIT_CALL_RET(0, hmR0SvmExitXcptUD(pVCpu, p Ctx, pSvmTransient));5728 case SVM_EXIT_XCPT_MF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptMF(pVCpu, p Ctx, pSvmTransient));5729 case SVM_EXIT_XCPT_DB: VMEXIT_CALL_RET(0, hmR0SvmExitXcptDB(pVCpu, p Ctx, pSvmTransient));5730 case SVM_EXIT_XCPT_AC: VMEXIT_CALL_RET(0, hmR0SvmExitXcptAC(pVCpu, p Ctx, pSvmTransient));5731 case SVM_EXIT_XCPT_BP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptBP(pVCpu, p Ctx, pSvmTransient));5732 case SVM_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0SvmExitXsetbv(pVCpu, p Ctx, pSvmTransient));5733 case SVM_EXIT_FERR_FREEZE: VMEXIT_CALL_RET(0, hmR0SvmExitFerrFreeze(pVCpu, p Ctx, pSvmTransient));5673 case SVM_EXIT_WRITE_CR8: VMEXIT_CALL_RET(0, hmR0SvmExitWriteCRx(pVCpu, pSvmTransient)); 5674 5675 case SVM_EXIT_VINTR: VMEXIT_CALL_RET(0, hmR0SvmExitVIntr(pVCpu, pSvmTransient)); 5676 case SVM_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0SvmExitPause(pVCpu, pSvmTransient)); 5677 case SVM_EXIT_VMMCALL: VMEXIT_CALL_RET(0, hmR0SvmExitVmmCall(pVCpu, pSvmTransient)); 5678 case SVM_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpg(pVCpu, pSvmTransient)); 5679 case SVM_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0SvmExitWbinvd(pVCpu, pSvmTransient)); 5680 case SVM_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0SvmExitInvd(pVCpu, pSvmTransient)); 5681 case SVM_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0SvmExitRdpmc(pVCpu, pSvmTransient)); 5682 case SVM_EXIT_IRET: VMEXIT_CALL_RET(0, hmR0SvmExitIret(pVCpu, pSvmTransient)); 5683 case SVM_EXIT_XCPT_UD: VMEXIT_CALL_RET(0, hmR0SvmExitXcptUD(pVCpu, pSvmTransient)); 5684 case SVM_EXIT_XCPT_MF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptMF(pVCpu, pSvmTransient)); 5685 case SVM_EXIT_XCPT_DB: VMEXIT_CALL_RET(0, hmR0SvmExitXcptDB(pVCpu, pSvmTransient)); 5686 case SVM_EXIT_XCPT_AC: VMEXIT_CALL_RET(0, hmR0SvmExitXcptAC(pVCpu, pSvmTransient)); 5687 case SVM_EXIT_XCPT_BP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptBP(pVCpu, pSvmTransient)); 5688 case SVM_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0SvmExitXsetbv(pVCpu, pSvmTransient)); 5689 case SVM_EXIT_FERR_FREEZE: VMEXIT_CALL_RET(0, hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient)); 5734 5690 5735 5691 default: … … 5741 5697 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: 5742 5698 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15: 5743 VMEXIT_CALL_RET(0, hmR0SvmExitReadDRx(pVCpu, p Ctx, pSvmTransient));5699 VMEXIT_CALL_RET(0, hmR0SvmExitReadDRx(pVCpu, pSvmTransient)); 5744 5700 5745 5701 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3: … … 5747 5703 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: 5748 5704 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15: 5749 VMEXIT_CALL_RET(0, hmR0SvmExitWriteDRx(pVCpu, p Ctx, pSvmTransient));5750 5751 case SVM_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0SvmExitTaskSwitch(pVCpu, p Ctx, pSvmTransient));5752 case SVM_EXIT_SHUTDOWN: VMEXIT_CALL_RET(0, hmR0SvmExitShutdown(pVCpu, p Ctx, pSvmTransient));5705 VMEXIT_CALL_RET(0, hmR0SvmExitWriteDRx(pVCpu, pSvmTransient)); 5706 5707 case SVM_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient)); 5708 case SVM_EXIT_SHUTDOWN: VMEXIT_CALL_RET(0, hmR0SvmExitShutdown(pVCpu, pSvmTransient)); 5753 5709 5754 5710 case SVM_EXIT_SMI: … … 5759 5715 * If it ever does, we want to know about it so log the exit code and bail. 5760 5716 */ 5761 VMEXIT_CALL_RET(0, hmR0SvmExitUnexpected(pVCpu, p Ctx, pSvmTransient));5717 VMEXIT_CALL_RET(0, hmR0SvmExitUnexpected(pVCpu, pSvmTransient)); 5762 5718 } 5763 5719 5764 5720 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 5765 case SVM_EXIT_CLGI: VMEXIT_CALL_RET(0, hmR0SvmExitClgi(pVCpu, p Ctx, pSvmTransient));5766 case SVM_EXIT_STGI: VMEXIT_CALL_RET(0, hmR0SvmExitStgi(pVCpu, p Ctx, pSvmTransient));5767 case SVM_EXIT_VMLOAD: VMEXIT_CALL_RET(0, hmR0SvmExitVmload(pVCpu, p Ctx, pSvmTransient));5768 case SVM_EXIT_VMSAVE: VMEXIT_CALL_RET(0, hmR0SvmExitVmsave(pVCpu, p Ctx, pSvmTransient));5769 case SVM_EXIT_INVLPGA: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpga(pVCpu, p Ctx, pSvmTransient));5770 case SVM_EXIT_VMRUN: VMEXIT_CALL_RET(0, hmR0SvmExitVmrun(pVCpu, p Ctx, pSvmTransient));5721 case SVM_EXIT_CLGI: VMEXIT_CALL_RET(0, hmR0SvmExitClgi(pVCpu, pSvmTransient)); 5722 case SVM_EXIT_STGI: VMEXIT_CALL_RET(0, hmR0SvmExitStgi(pVCpu, pSvmTransient)); 5723 case SVM_EXIT_VMLOAD: VMEXIT_CALL_RET(0, hmR0SvmExitVmload(pVCpu, pSvmTransient)); 5724 case SVM_EXIT_VMSAVE: VMEXIT_CALL_RET(0, hmR0SvmExitVmsave(pVCpu, pSvmTransient)); 5725 case SVM_EXIT_INVLPGA: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpga(pVCpu, pSvmTransient)); 5726 case SVM_EXIT_VMRUN: VMEXIT_CALL_RET(0, hmR0SvmExitVmrun(pVCpu, pSvmTransient)); 5771 5727 #else 5772 5728 case SVM_EXIT_CLGI: … … 5808 5764 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27: 5809 5765 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31: 5810 VMEXIT_CALL_RET(0, hmR0SvmExitXcptGeneric(pVCpu, p Ctx, pSvmTransient));5766 VMEXIT_CALL_RET(0, hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient)); 5811 5767 #endif /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */ 5812 5768 … … 5825 5781 5826 5782 5827 #ifdef DEBUG5783 #ifdef VBOX_STRICT 5828 5784 /* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */ 5829 5785 # define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \ … … 5837 5793 } while (0) 5838 5794 5839 # define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( ) \5795 # define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \ 5840 5796 do { \ 5841 AssertPtr(pVCpu); \ 5842 AssertPtr(pCtx); \ 5843 AssertPtr(pSvmTransient); \ 5797 AssertPtr((a_pVCpu)); \ 5798 AssertPtr((a_pSvmTransient)); \ 5844 5799 Assert(ASMIntAreEnabled()); \ 5845 HMSVM_ASSERT_PREEMPT_SAFE( pVCpu); \5800 HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \ 5846 5801 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \ 5847 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", ( uint32_t)pVCpu->idCpu)); \5848 HMSVM_ASSERT_PREEMPT_SAFE( pVCpu); \5849 if (VMMR0IsLogFlushDisabled( pVCpu)) \5802 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu)); \ 5803 HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \ 5804 if (VMMR0IsLogFlushDisabled((a_pVCpu))) \ 5850 5805 HMSVM_ASSERT_PREEMPT_CPUID(); \ 5851 5806 } while (0) 5852 #else /* Release builds */ 5853 # define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { NOREF(pVCpu); NOREF(pCtx); NOREF(pSvmTransient); } while (0) 5807 #else 5808 # define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \ 5809 do { \ 5810 RT_NOREF2(a_pVCpu, a_pSvmTransient); \ 5811 } while (0) 5854 5812 #endif 5855 5813 … … 5921 5879 * 5922 5880 * @param pVCpu The cross context virtual CPU structure. 5923 * @param pCtx Pointer to the guest-CPU context.5924 5881 * @param pSvmTransient Pointer to the SVM transient structure. 5925 5882 * 5926 5883 * @remarks No-long-jump zone!!! 5927 5884 */ 5928 static int hmR0SvmCheckExitDueToEventDelivery(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)5885 static int hmR0SvmCheckExitDueToEventDelivery(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 5929 5886 { 5930 5887 int rc = VINF_SUCCESS; 5931 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);5888 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 5932 5889 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2); 5933 5890 … … 5992 5949 { 5993 5950 pSvmTransient->fVectoringPF = true; 5994 Log4Func(("IDT: Pending vectoring #PF due to delivery of Ext-Int/NMI. uCR2=%#RX64\n", pCtx->cr2)); 5951 Log4Func(("IDT: Pending vectoring #PF due to delivery of Ext-Int/NMI. uCR2=%#RX64\n", 5952 pVCpu->cpum.GstCtx.cr2)); 5995 5953 } 5996 5954 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION … … 6001 5959 * This can't happen with shadow paging. 6002 5960 */ 6003 GCPtrFaultAddress = p Ctx->cr2;5961 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2; 6004 5962 } 6005 5963 … … 6034 5992 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF) 6035 5993 { 6036 Log4Func(("IDT: Pending vectoring double #PF uCR2=%#RX64\n", p Ctx->cr2));5994 Log4Func(("IDT: Pending vectoring double #PF uCR2=%#RX64\n", pVCpu->cpum.GstCtx.cr2)); 6037 5995 pSvmTransient->fVectoringDoublePF = true; 6038 5996 Assert(rc == VINF_SUCCESS); … … 6064 6022 } 6065 6023 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG); 6066 NOREF(pCtx);6067 6024 return rc; 6068 6025 } … … 6075 6032 * 6076 6033 * @param pVCpu The cross context virtual CPU structure. 6077 * @param pCtx Pointer to the guest-CPU context.6078 6034 * @param cb RIP increment value in bytes. 6079 6035 * … … 6082 6038 * hmR0SvmAdvanceRipDumb! 6083 6039 */ 6084 DECLINLINE(void) hmR0SvmAdvanceRipHwAssist(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t cb) 6085 { 6086 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6040 DECLINLINE(void) hmR0SvmAdvanceRipHwAssist(PVMCPU pVCpu, uint32_t cb) 6041 { 6042 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6043 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6087 6044 if (fSupportsNextRipSave) 6088 6045 { 6089 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);6046 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6090 6047 Assert(pVmcb); 6091 6048 Assert(pVmcb->ctrl.u64NextRIP); … … 6097 6054 pCtx->rip += cb; 6098 6055 6099 HMSVM_UPDATE_INTR_SHADOW(pVCpu , pCtx);6056 HMSVM_UPDATE_INTR_SHADOW(pVCpu); 6100 6057 } 6101 6058 … … 6106 6063 * 6107 6064 * @param pVCpu The cross context virtual CPU structure. 6108 * @param pCtx Pointer to the guest-CPU context.6109 6065 * @param cbLikely The likely instruction length. 6110 6066 */ 6111 DECLINLINE(uint8_t) hmR0SvmGetInstrLengthHwAssist(PVMCPU pVCpu, PCPUMCTX pCtx,uint8_t cbLikely)6067 DECLINLINE(uint8_t) hmR0SvmGetInstrLengthHwAssist(PVMCPU pVCpu, uint8_t cbLikely) 6112 6068 { 6113 6069 Assert(cbLikely <= 15); /* See Intel spec. 2.3.11 "AVX Instruction Length" */ 6114 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu , pCtx);6070 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6115 6071 if (fSupportsNextRipSave) 6116 6072 { 6117 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);6118 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - p Ctx->rip;6073 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6074 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6119 6075 Assert(cbInstr == cbLikely); 6120 6076 return cbInstr; … … 6129 6085 * 6130 6086 * @param pVCpu The cross context virtual CPU structure. 6131 * @param pCtx Pointer to the guest-CPU context.6132 6087 * @param cb RIP increment value in bytes. 6133 6088 */ 6134 DECLINLINE(void) hmR0SvmAdvanceRipDumb(PVMCPU pVCpu, PCPUMCTX pCtx,uint32_t cb)6135 { 6136 p Ctx->rip += cb;6137 HMSVM_UPDATE_INTR_SHADOW(pVCpu , pCtx);6089 DECLINLINE(void) hmR0SvmAdvanceRipDumb(PVMCPU pVCpu, uint32_t cb) 6090 { 6091 pVCpu->cpum.GstCtx.rip += cb; 6092 HMSVM_UPDATE_INTR_SHADOW(pVCpu); 6138 6093 } 6139 6094 #undef HMSVM_UPDATE_INTR_SHADOW … … 6152 6107 * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT). 6153 6108 */ 6154 HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6155 { 6156 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6109 HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6110 { 6111 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6157 6112 6158 6113 if (pSvmTransient->u64ExitCode == SVM_EXIT_NMI) … … 6177 6132 * \#VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional \#VMEXIT. 6178 6133 */ 6179 HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6180 { 6181 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6182 6183 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx,2);6134 HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6135 { 6136 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6137 6138 hmR0SvmAdvanceRipHwAssist(pVCpu, 2); 6184 6139 int rc = VINF_SUCCESS; 6185 6140 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); … … 6191 6146 * \#VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional \#VMEXIT. 6192 6147 */ 6193 HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6194 { 6195 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6196 6197 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx,2);6148 HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6149 { 6150 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6151 6152 hmR0SvmAdvanceRipHwAssist(pVCpu, 2); 6198 6153 int rc = VINF_SUCCESS; 6199 6154 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); … … 6205 6160 * \#VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional \#VMEXIT. 6206 6161 */ 6207 HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6208 { 6209 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6162 HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6163 { 6164 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6210 6165 6211 6166 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX); … … 6216 6171 if (!pExitRec) 6217 6172 { 6218 rcStrict = IEMExecDecodedCpuid(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx,2));6173 rcStrict = IEMExecDecodedCpuid(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, 2)); 6219 6174 if (rcStrict == VINF_IEM_RAISED_XCPT) 6220 6175 rcStrict = VINF_SUCCESS; … … 6226 6181 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 6227 6182 */ 6228 Assert(pCtx == &pVCpu->cpum.GstCtx);6229 6183 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6230 6184 … … 6245 6199 * \#VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional \#VMEXIT. 6246 6200 */ 6247 HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6248 { 6249 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6201 HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6202 { 6203 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6250 6204 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6251 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx,2));6205 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, 2)); 6252 6206 if (rcStrict == VINF_SUCCESS) 6253 6207 pSvmTransient->fUpdateTscOffsetting = true; … … 6262 6216 * \#VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional \#VMEXIT. 6263 6217 */ 6264 HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6265 { 6266 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6267 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx,3));6218 HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6219 { 6220 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6221 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, 3)); 6268 6222 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6269 6223 if (rcStrict == VINF_SUCCESS) … … 6279 6233 * \#VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional \#VMEXIT. 6280 6234 */ 6281 HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6282 { 6283 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6235 HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6236 { 6237 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6284 6238 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_SS); 6285 6239 6240 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6286 6241 int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6287 6242 if (RT_LIKELY(rc == VINF_SUCCESS)) 6288 6243 { 6289 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx,2);6244 hmR0SvmAdvanceRipHwAssist(pVCpu, 2); 6290 6245 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6291 6246 } … … 6302 6257 * \#VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional \#VMEXIT. 6303 6258 */ 6304 HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6305 { 6306 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6259 HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6260 { 6261 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6307 6262 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); 6308 6263 6309 6264 VBOXSTRICTRC rcStrict; 6310 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu , pCtx);6311 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu , pCtx);6265 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu); 6266 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6312 6267 if ( fSupportsDecodeAssists 6313 6268 && fSupportsNextRipSave) 6314 6269 { 6315 6270 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6316 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);6317 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - p Ctx->rip;6271 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6272 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip; 6318 6273 RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1; 6319 6274 rcStrict = IEMExecDecodedInvlpg(pVCpu, cbInstr, GCPtrPage); … … 6333 6288 * \#VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional \#VMEXIT. 6334 6289 */ 6335 HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6336 { 6337 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6338 6339 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx,1);6340 int rc = EMShouldContinueAfterHalt(pVCpu, pCtx) ? VINF_SUCCESS : VINF_EM_HALT;6290 HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6291 { 6292 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6293 6294 hmR0SvmAdvanceRipHwAssist(pVCpu, 1); 6295 int rc = EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx) ? VINF_SUCCESS : VINF_EM_HALT; 6341 6296 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6342 6297 … … 6351 6306 * \#VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional \#VMEXIT. 6352 6307 */ 6353 HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6354 { 6355 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6308 HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6309 { 6310 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6356 6311 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_SS); 6357 6312 6313 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6358 6314 int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6359 6315 if (RT_LIKELY(rc == VINF_SUCCESS)) 6360 6316 { 6361 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx,3);6317 hmR0SvmAdvanceRipHwAssist(pVCpu, 3); 6362 6318 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6363 6319 } … … 6375 6331 * \#VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional \#VMEXIT. 6376 6332 */ 6377 HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6378 { 6379 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6333 HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6334 { 6335 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6380 6336 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_SS); 6381 6337 6338 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6382 6339 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6383 6340 int rc = VBOXSTRICTRC_VAL(rc2); … … 6385 6342 || rc == VINF_SUCCESS) 6386 6343 { 6387 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx,3);6344 hmR0SvmAdvanceRipHwAssist(pVCpu, 3); 6388 6345 6389 6346 if ( rc == VINF_EM_HALT … … 6410 6367 * \#VMEXIT. 6411 6368 */ 6412 HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6413 { 6414 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6369 HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6370 { 6371 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6415 6372 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6416 6373 return VINF_EM_RESET; … … 6421 6378 * \#VMEXIT handler for unexpected exits. Conditional \#VMEXIT. 6422 6379 */ 6423 HMSVM_EXIT_DECL hmR0SvmExitUnexpected(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 6424 { 6425 RT_NOREF(pCtx); 6426 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6380 HMSVM_EXIT_DECL hmR0SvmExitUnexpected(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6381 { 6382 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6427 6383 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6428 6384 AssertMsgFailed(("hmR0SvmExitUnexpected: ExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pSvmTransient->u64ExitCode, … … 6437 6393 * \#VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional \#VMEXIT. 6438 6394 */ 6439 HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 6440 { 6441 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6442 6395 HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6396 { 6397 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6398 6399 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6443 6400 Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip)); 6444 6401 #ifdef VBOX_WITH_STATISTICS … … 6453 6410 #endif 6454 6411 6455 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu , pCtx);6456 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu , pCtx);6412 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu); 6413 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6457 6414 if ( fSupportsDecodeAssists 6458 6415 && fSupportsNextRipSave) 6459 6416 { 6460 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);6417 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6461 6418 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK); 6462 6419 if (fMovCRx) … … 6487 6444 * \#VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional \#VMEXIT. 6488 6445 */ 6489 HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6490 { 6491 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6446 HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6447 { 6448 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6492 6449 6493 6450 uint64_t const uExitCode = pSvmTransient->u64ExitCode; … … 6496 6453 6497 6454 VBOXSTRICTRC rcStrict = VERR_SVM_IPE_5; 6455 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6498 6456 bool fDecodedInstr = false; 6499 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu , pCtx);6500 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu , pCtx);6457 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu); 6458 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6501 6459 if ( fSupportsDecodeAssists 6502 6460 && fSupportsNextRipSave) 6503 6461 { 6504 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);6462 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6505 6463 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK); 6506 6464 if (fMovCRx) … … 6584 6542 6585 6543 VBOXSTRICTRC rcStrict; 6586 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu , pCtx);6544 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6587 6545 if (fSupportsNextRipSave) 6588 6546 { … … 6643 6601 6644 6602 int rc = VINF_SUCCESS; 6645 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx,2);6603 hmR0SvmAdvanceRipHwAssist(pVCpu, 2); 6646 6604 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6647 6605 return rc; … … 6652 6610 */ 6653 6611 VBOXSTRICTRC rcStrict; 6654 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu , pCtx);6612 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6655 6613 if (fSupportsNextRipSave) 6656 6614 { … … 6714 6672 * \#VMEXIT. 6715 6673 */ 6716 HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6717 { 6718 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6719 6720 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);6674 HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6675 { 6676 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6677 6678 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6721 6679 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ) 6722 6680 return VBOXSTRICTRC_TODO(hmR0SvmExitReadMsr(pVCpu, pVmcb)); … … 6730 6688 * \#VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional \#VMEXIT. 6731 6689 */ 6732 HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6733 { 6734 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6690 HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6691 { 6692 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6735 6693 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6736 6694 … … 6738 6696 6739 6697 /** @todo Stepping with nested-guest. */ 6698 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6740 6699 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 6741 6700 { … … 6800 6759 * \#VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional \#VMEXIT. 6801 6760 */ 6802 HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6803 { 6804 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6761 HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6762 { 6763 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6805 6764 /* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */ 6806 int rc = hmR0SvmExitReadDRx(pVCpu, p Ctx, pSvmTransient);6765 int rc = hmR0SvmExitReadDRx(pVCpu, pSvmTransient); 6807 6766 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 6808 6767 STAM_COUNTER_DEC(&pVCpu->hm.s.StatExitDRxRead); … … 6814 6773 * \#VMEXIT handler for XCRx write (SVM_EXIT_XSETBV). Conditional \#VMEXIT. 6815 6774 */ 6816 HMSVM_EXIT_DECL hmR0SvmExitXsetbv(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6817 { 6818 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6775 HMSVM_EXIT_DECL hmR0SvmExitXsetbv(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6776 { 6777 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6819 6778 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6820 6779 … … 6824 6783 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK); 6825 6784 6785 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6826 6786 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 6827 6787 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%d (cr4=%RX64) rcStrict=%Rrc\n", pCtx->aXcr[0], pVCpu->hm.s.fLoadSaveGuestXcr0, … … 6836 6796 * \#VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional \#VMEXIT. 6837 6797 */ 6838 HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6839 { 6840 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );6798 HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 6799 { 6800 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 6841 6801 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK); 6842 6802 … … 6845 6805 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving 6846 6806 the result (in AL/AX/EAX). */ 6807 PVM pVM = pVCpu->CTX_SUFF(pVM); 6808 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6809 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 6810 6847 6811 Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip)); 6848 6849 PVM pVM = pVCpu->CTX_SUFF(pVM);6850 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);6851 6812 6852 6813 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */ … … 6904 6865 only enabling it for Bulldozer and later with NRIP. OS/2 broke on 6905 6866 2384 Opterons when only checking NRIP. */ 6906 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu , pCtx);6867 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6907 6868 if ( fSupportsNextRipSave 6908 6869 && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First) … … 6962 6923 } 6963 6924 else if (rcStrict == VINF_IOM_R3_IOPORT_READ) 6964 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, cbValue); 6925 { 6926 HMR0SavePendingIOPortRead(pVCpu, pVCpu->cpum.GstCtx.rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 6927 uAndVal, cbValue); 6928 } 6965 6929 6966 6930 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); … … 6995 6959 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/); 6996 6960 6997 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, IoExitInfo.n.u16Port, cbValue);6961 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, &pVCpu->cpum.GstCtx, IoExitInfo.n.u16Port, cbValue); 6998 6962 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP) 6999 6963 { … … 7066 7030 * \#VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional \#VMEXIT. 7067 7031 */ 7068 HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7069 { 7070 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7032 HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7033 { 7034 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7071 7035 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7072 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7073 7074 PVM pVM = pVCpu->CTX_SUFF(pVM); 7036 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient); 7037 7038 PVM pVM = pVCpu->CTX_SUFF(pVM); 7039 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7075 7040 Assert(pVM->hm.s.fNestedPaging); 7076 7041 7077 7042 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */ 7078 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);7043 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7079 7044 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2; 7080 7045 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1; /* Note! High bits in EXITINFO1 may contain additional info and are … … 7208 7173 * \#VMEXIT. 7209 7174 */ 7210 HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7211 { 7212 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7213 HMSVM_ASSERT_NOT_IN_NESTED_GUEST( pCtx);7175 HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7176 { 7177 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7178 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); 7214 7179 7215 7180 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */ 7216 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);7217 hmR0SvmClearIntWindowExiting(pVCpu, pVmcb , pCtx);7181 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7182 hmR0SvmClearIntWindowExiting(pVCpu, pVmcb); 7218 7183 7219 7184 /* Deliver the pending interrupt via hmR0SvmEvaluatePendingEvent() and resume guest execution. */ … … 7227 7192 * \#VMEXIT. 7228 7193 */ 7229 HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7230 { 7231 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7232 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY( );7194 HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7195 { 7196 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7197 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient); 7233 7198 7234 7199 #ifndef HMSVM_ALWAYS_TRAP_TASK_SWITCH … … 7258 7223 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT. 7259 7224 */ 7260 HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7261 { 7262 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7225 HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7226 { 7227 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7263 7228 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7264 7229 7265 7230 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTprPatchingAllowed) 7266 7231 { 7267 int rc = hmSvmEmulateMovTpr(pVCpu , pCtx);7232 int rc = hmSvmEmulateMovTpr(pVCpu); 7268 7233 if (rc != VERR_NOT_FOUND) 7269 7234 { … … 7275 7240 if (EMAreHypercallInstructionsEnabled(pVCpu)) 7276 7241 { 7277 VBOXSTRICTRC rcStrict = GIMHypercall(pVCpu, pCtx);7242 VBOXSTRICTRC rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx); 7278 7243 if (RT_SUCCESS(rcStrict)) 7279 7244 { … … 7281 7246 of say VINF_GIM_R3_HYPERCALL. */ 7282 7247 if (rcStrict == VINF_SUCCESS) 7283 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx,3 /* cbInstr */);7248 hmR0SvmAdvanceRipHwAssist(pVCpu, 3 /* cbInstr */); 7284 7249 7285 7250 return VBOXSTRICTRC_VAL(rcStrict); … … 7297 7262 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT. 7298 7263 */ 7299 HMSVM_EXIT_DECL hmR0SvmExitPause(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7300 { 7301 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7302 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx,2);7264 HMSVM_EXIT_DECL hmR0SvmExitPause(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7265 { 7266 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7267 hmR0SvmAdvanceRipHwAssist(pVCpu, 2); 7303 7268 /** @todo The guest has likely hit a contended spinlock. We might want to 7304 7269 * poke a schedule different guest VCPU. */ … … 7311 7276 * \#VMEXIT. 7312 7277 */ 7313 HMSVM_EXIT_DECL hmR0SvmExitFerrFreeze(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7314 { 7315 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7278 HMSVM_EXIT_DECL hmR0SvmExitFerrFreeze(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7279 { 7280 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7316 7281 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0); 7317 Assert(!(p Ctx->cr0 & X86_CR0_NE));7282 Assert(!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)); 7318 7283 7319 7284 Log4Func(("Raising IRQ 13 in response to #FERR\n")); … … 7325 7290 * \#VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional \#VMEXIT. 7326 7291 */ 7327 HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7328 { 7329 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7292 HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7293 { 7294 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7330 7295 7331 7296 /* Clear NMI blocking. */ … … 7334 7299 7335 7300 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */ 7336 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);7337 hmR0SvmClearCtrlIntercept(pVCpu, p Ctx, pVmcb, SVM_CTRL_INTERCEPT_IRET);7301 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7302 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_IRET); 7338 7303 7339 7304 /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */ … … 7346 7311 * Conditional \#VMEXIT. 7347 7312 */ 7348 HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7349 { 7350 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7313 HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7314 { 7315 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7351 7316 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7352 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY( );7317 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient); 7353 7318 7354 7319 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */ 7355 7320 PVM pVM = pVCpu->CTX_SUFF(pVM); 7356 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 7321 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7322 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7357 7323 uint32_t uErrCode = pVmcb->ctrl.u64ExitInfo1; 7358 7324 uint64_t const uFaultAddress = pVmcb->ctrl.u64ExitInfo2; … … 7366 7332 { 7367 7333 /* A genuine guest #PF, reflect it to the guest. */ 7368 hmR0SvmSetPendingXcptPF(pVCpu, pCtx,uErrCode, uFaultAddress);7334 hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress); 7369 7335 Log4Func(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RX64 ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, 7370 7336 uFaultAddress, uErrCode)); … … 7462 7428 #endif 7463 7429 7464 hmR0SvmSetPendingXcptPF(pVCpu, pCtx,uErrCode, uFaultAddress);7430 hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress); 7465 7431 } 7466 7432 else … … 7486 7452 * Conditional \#VMEXIT. 7487 7453 */ 7488 HMSVM_EXIT_DECL hmR0SvmExitXcptUD(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7489 { 7490 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7491 HMSVM_ASSERT_NOT_IN_NESTED_GUEST( pCtx);7454 HMSVM_EXIT_DECL hmR0SvmExitXcptUD(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7455 { 7456 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7457 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); 7492 7458 7493 7459 /* Paranoia; Ensure we cannot be called as a result of event delivery. */ … … 7500 7466 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7501 7467 uint8_t cbInstr = 0; 7502 VBOXSTRICTRC rcStrict = GIMXcptUD(pVCpu, pCtx, NULL /* pDis */, &cbInstr);7468 VBOXSTRICTRC rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr); 7503 7469 if (rcStrict == VINF_SUCCESS) 7504 7470 { 7505 7471 /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */ 7506 hmR0SvmAdvanceRipDumb(pVCpu, pCtx,cbInstr);7472 hmR0SvmAdvanceRipDumb(pVCpu, cbInstr); 7507 7473 rc = VINF_SUCCESS; 7508 7474 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); … … 7532 7498 * Conditional \#VMEXIT. 7533 7499 */ 7534 HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7535 { 7536 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7500 HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7501 { 7502 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7537 7503 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7538 7504 7505 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7506 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7507 7539 7508 /* Paranoia; Ensure we cannot be called as a result of event delivery. */ 7540 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);7541 7509 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb); 7542 7510 … … 7570 7538 * \#VMEXIT. 7571 7539 */ 7572 HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7573 { 7574 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7540 HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7541 { 7542 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7575 7543 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7576 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY( );7544 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient); 7577 7545 7578 7546 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending)) … … 7591 7559 PVM pVM = pVCpu->CTX_SUFF(pVM); 7592 7560 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 7561 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7593 7562 int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction); 7594 7563 if (rc == VINF_EM_RAW_GUEST_TRAP) … … 7626 7595 * Conditional \#VMEXIT. 7627 7596 */ 7628 HMSVM_EXIT_DECL hmR0SvmExitXcptAC(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7629 { 7630 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7631 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY( );7597 HMSVM_EXIT_DECL hmR0SvmExitXcptAC(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7598 { 7599 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7600 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient); 7632 7601 7633 7602 SVMEVENT Event; … … 7646 7615 * Conditional \#VMEXIT. 7647 7616 */ 7648 HMSVM_EXIT_DECL hmR0SvmExitXcptBP(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7649 { 7650 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7617 HMSVM_EXIT_DECL hmR0SvmExitXcptBP(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7618 { 7619 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7651 7620 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7652 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7653 7621 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient); 7622 7623 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7654 7624 int rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 7655 7625 if (rc == VINF_EM_RAW_GUEST_TRAP) … … 7672 7642 * \#VMEXIT handler for generic exceptions. Conditional \#VMEXIT. 7673 7643 */ 7674 HMSVM_EXIT_DECL hmR0SvmExitXcptGeneric(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7675 { 7676 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7677 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY( );7678 7679 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);7644 HMSVM_EXIT_DECL hmR0SvmExitXcptGeneric(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7645 { 7646 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7647 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient); 7648 7649 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7680 7650 uint8_t const uVector = pVmcb->ctrl.u64ExitCode - SVM_EXIT_XCPT_0; 7681 7651 uint32_t const uErrCode = pVmcb->ctrl.u64ExitInfo1; … … 7715 7685 * \#VMEXIT handler for CLGI (SVM_EXIT_CLGI). Conditional \#VMEXIT. 7716 7686 */ 7717 HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7718 { 7719 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7687 HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7688 { 7689 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7720 7690 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_HWVIRT); 7721 7691 7722 7692 #ifdef VBOX_STRICT 7723 PCSVMVMCB pVmcbTmp = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);7693 PCSVMVMCB pVmcbTmp = hmR0SvmGetCurrentVmcb(pVCpu); 7724 7694 Assert(pVmcbTmp); 7725 7695 Assert(!pVmcbTmp->ctrl.IntCtrl.n.u1VGifEnable); … … 7727 7697 #endif 7728 7698 7729 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx,3);7699 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, 3); 7730 7700 VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr); 7731 7701 if (rcStrict == VINF_SUCCESS) … … 7738 7708 * \#VMEXIT handler for STGI (SVM_EXIT_STGI). Conditional \#VMEXIT. 7739 7709 */ 7740 HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7741 { 7742 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7710 HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7711 { 7712 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7743 7713 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_HWVIRT); 7744 7714 … … 7747 7717 * we only intercept STGI when events are pending for GIF to become 1. 7748 7718 */ 7749 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);7719 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7750 7720 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable) 7751 hmR0SvmClearCtrlIntercept(pVCpu, p Ctx, pVmcb, SVM_CTRL_INTERCEPT_STGI);7752 7753 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx,3);7721 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_STGI); 7722 7723 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, 3); 7754 7724 VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr); 7755 7725 if (rcStrict == VINF_SUCCESS) … … 7762 7732 * \#VMEXIT handler for VMLOAD (SVM_EXIT_VMLOAD). Conditional \#VMEXIT. 7763 7733 */ 7764 HMSVM_EXIT_DECL hmR0SvmExitVmload(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7765 { 7766 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7734 HMSVM_EXIT_DECL hmR0SvmExitVmload(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7735 { 7736 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7767 7737 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 7768 7738 | CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR … … 7771 7741 7772 7742 #ifdef VBOX_STRICT 7773 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);7743 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7774 7744 Assert(pVmcb); 7775 7745 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload); … … 7777 7747 #endif 7778 7748 7779 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx,3);7749 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, 3); 7780 7750 VBOXSTRICTRC rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr); 7781 7751 if (rcStrict == VINF_SUCCESS) … … 7793 7763 * \#VMEXIT handler for VMSAVE (SVM_EXIT_VMSAVE). Conditional \#VMEXIT. 7794 7764 */ 7795 HMSVM_EXIT_DECL hmR0SvmExitVmsave(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7796 { 7797 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7765 HMSVM_EXIT_DECL hmR0SvmExitVmsave(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7766 { 7767 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7798 7768 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 7799 7769 7800 7770 #ifdef VBOX_STRICT 7801 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu , pCtx);7771 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 7802 7772 Assert(pVmcb); 7803 7773 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload); … … 7805 7775 #endif 7806 7776 7807 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx,3);7777 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, 3); 7808 7778 VBOXSTRICTRC rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr); 7809 7779 return VBOXSTRICTRC_VAL(rcStrict); … … 7814 7784 * \#VMEXIT handler for INVLPGA (SVM_EXIT_INVLPGA). Conditional \#VMEXIT. 7815 7785 */ 7816 HMSVM_EXIT_DECL hmR0SvmExitInvlpga(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7817 { 7818 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7786 HMSVM_EXIT_DECL hmR0SvmExitInvlpga(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7787 { 7788 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7819 7789 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 7820 7790 7821 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx,3);7791 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, 3); 7822 7792 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpga(pVCpu, cbInstr); 7823 7793 return VBOXSTRICTRC_VAL(rcStrict); … … 7828 7798 * \#VMEXIT handler for STGI (SVM_EXIT_VMRUN). Conditional \#VMEXIT. 7829 7799 */ 7830 HMSVM_EXIT_DECL hmR0SvmExitVmrun(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7831 { 7832 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7800 HMSVM_EXIT_DECL hmR0SvmExitVmrun(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7801 { 7802 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7833 7803 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK); 7834 7804 VBOXSTRICTRC rcStrict; 7835 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx,3);7805 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, 3); 7836 7806 rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr); 7837 7807 Log4Func(("IEMExecDecodedVmrun returns %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); … … 7849 7819 * Unconditional \#VMEXIT. 7850 7820 */ 7851 HMSVM_EXIT_DECL hmR0SvmNestedExitXcptDB(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7852 { 7853 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7854 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY( );7821 HMSVM_EXIT_DECL hmR0SvmNestedExitXcptDB(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7822 { 7823 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7824 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient); 7855 7825 7856 7826 if (pVCpu->hm.s.Event.fPending) … … 7869 7839 * Conditional \#VMEXIT. 7870 7840 */ 7871 HMSVM_EXIT_DECL hmR0SvmNestedExitXcptBP(PVMCPU pVCpu, P CPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7872 { 7873 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS( );7874 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY( );7841 HMSVM_EXIT_DECL hmR0SvmNestedExitXcptBP(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient) 7842 { 7843 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient); 7844 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient); 7875 7845 7876 7846 SVMEVENT Event; -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r72805 r72967 48 48 VMMR0DECL(int) SVMR0TermVM(PVM pVM); 49 49 VMMR0DECL(int) SVMR0SetupVM(PVM pVM); 50 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu , PCPUMCTX pCtx);50 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu); 51 51 VMMR0DECL(int) SVMR0ExportHostState(PVMCPU pVCpu); 52 52 VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72966 r72967 10162 10162 * @returns Strict VBox status code (i.e. informational status codes too). 10163 10163 * @param pVCpu The cross context virtual CPU structure. 10164 * @param pCtx Pointer to the guest-CPU context.10165 */ 10166 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx) 10167 { 10164 */ 10165 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu) 10166 { 10167 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 10168 10168 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 10169 10169 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn)); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r72805 r72967 42 42 VMMR0DECL(int) VMXR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt); 43 43 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat); 44 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu , PCPUMCTX pCtx);44 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu); 45 45 DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 46 46 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
Note:
See TracChangeset
for help on using the changeset viewer.