Changeset 69926 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Dec 5, 2017 9:48:57 AM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r69916 r69926 78 78 } while (0) 79 79 80 /** 81 * Updates interrupt shadow for the current RIP. 82 */ 83 #define HMSVM_UPDATE_INTR_SHADOW(pVCpu, pCtx) \ 84 do { \ 85 /* Update interrupt shadow. */ \ 86 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) \ 87 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) \ 88 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); \ 89 } while (0) 90 80 91 /** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an 81 92 * instruction that exited. */ … … 96 107 ("Illegal migration! Entered on CPU %u Current %u\n", \ 97 108 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); 109 110 /** Assert that we're not executing a nested-guest. */ 111 #ifdef VBOX_WITH_NESTED_HWVIRT 112 # define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx))) 113 #else 114 # define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) do { RT_NOREF((a_pCtx)); } while (0) 115 #endif 116 117 /** Assert that we're executing a nested-guest. */ 118 #ifdef VBOX_WITH_NESTED_HWVIRT 119 # define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx))) 120 #else 121 # define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) do { RT_NOREF((a_pCtx)); } while (0) 122 #endif 98 123 99 124 /** … … 314 339 static FNSVMEXITHANDLER hmR0SvmExitInvlpga; 315 340 static FNSVMEXITHANDLER hmR0SvmExitVmrun; 316 static FNSVMEXITHANDLER hmR0SvmNestedExitIret;317 341 static FNSVMEXITHANDLER hmR0SvmNestedExitXcptDB; 318 342 static FNSVMEXITHANDLER hmR0SvmNestedExitXcptBP; … … 838 862 839 863 /** 864 * Gets a pointer to the currently active guest or nested-guest VMCB. 865 * 866 * @returns Pointer to the current context VMCB. 867 * @param pVCpu The cross context virtual CPU structure. 868 * @param pCtx Pointer to the guest-CPU context. 869 */ 870 DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPU pVCpu, PCPUMCTX pCtx) 871 { 872 #ifdef VBOX_WITH_NESTED_HWVIRT 873 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 874 return pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 875 #else 876 RT_NOREF(pCtx); 877 #endif 878 return pVCpu->hm.s.svm.pVmcb; 879 } 880 881 882 /** 840 883 * Invalidates a guest page by guest virtual address. 841 884 * … … 857 900 Log4(("SVMR0InvalidatePage %RGv\n", GCVirt)); 858 901 859 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 902 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 903 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 860 904 AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB); 861 905 … … 1748 1792 pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx; 1749 1793 1750 /* 1751 * CR3, CR4 reads and writes are intercepted as we modify them before1752 * hardware-assisted SVM execution. In addition, PGM needs to be up to date1753 * on paging mode changes in the nested-guest. 1754 *1755 * CR0 writes are intercepted in case of paging mode changes. CR0 reads are not1756 * intercepted as we currently don't modify CR0 while executing the nested-guest.1757 */1758 pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(4) |RT_BIT(3);1759 pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(4) | RT_BIT(3) | RT_BIT(0);1794 /* Always intercept CR0, CR4 reads and writes as we alter them. */ 1795 pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(0) | RT_BIT(4); 1796 pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(0) | RT_BIT(4); 1797 1798 /* Always intercept CR3 reads and writes without nested-paging as we load shadow page tables. */ 1799 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging) 1800 { 1801 pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(3); 1802 pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(3); 1803 } 1760 1804 1761 1805 /** @todo Figure out debugging with nested-guests, till then just intercept … … 1767 1811 pVmcbNstGst->ctrl.u64InterceptCtrl |= pVmcb->ctrl.u64InterceptCtrl 1768 1812 | HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS; 1813 1769 1814 /* 1770 1815 * Remove control intercepts that we don't need while executing the nested-guest. … … 1949 1994 static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1950 1995 { 1996 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 1997 1951 1998 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 1952 1999 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB); … … 2241 2288 pMixedCtx->cr2 = pVmcb->guest.u64CR2; 2242 2289 2243 #ifdef VBOX_WITH_NESTED_GUEST2244 /*2245 * The nested hypervisor might not be intercepting these control registers,2246 */2247 if (CPUMIsGuestInNestedHwVirtMode(pMixedCtx))2248 {2249 pMixedCtx->cr4 = pVmcb->guest.u64CR4;2250 pMixedCtx->cr0 = pVmcb->guest.u64CR0;2251 }2252 #endif2253 2254 2290 /* 2255 2291 * Guest MSRs. … … 2398 2434 if (CPUMIsHyperDebugStateActive(pVCpu)) 2399 2435 { 2400 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 2436 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; /** @todo nested-guest. */ 2401 2437 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff); 2402 2438 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff); … … 3020 3056 { 3021 3057 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET; 3022 pVmcb->ctrl.u64VmcbCleanBits &= ~ (HMSVM_VMCB_CLEAN_INTERCEPTS);3058 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 3023 3059 3024 3060 Log4(("Setting IRET intercept\n")); … … 3196 3232 #endif 3197 3233 3234 3198 3235 /** 3199 3236 * Evaluates the event to be delivered to the guest and sets it as the pending … … 3202 3239 * @param pVCpu The cross context virtual CPU structure. 3203 3240 * @param pCtx Pointer to the guest-CPU context. 3241 * 3242 * @remarks Don't use this function when we are actively executing a 3243 * nested-guest, use hmR0SvmEvaluatePendingEventNested instead. 3204 3244 */ 3205 3245 static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx) 3206 3246 { 3247 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 3207 3248 Assert(!pVCpu->hm.s.Event.fPending); 3208 3249 3209 3250 #ifdef VBOX_WITH_NESTED_HWVIRT 3210 bool const fGif 3251 bool const fGif = pCtx->hwvirt.svm.fGif; 3211 3252 #else 3212 bool const fGif 3253 bool const fGif = true; 3213 3254 #endif 3214 3255 Log4Func(("fGif=%RTbool\n", fGif)); … … 3372 3413 NOREF(pCtx); 3373 3414 HMSVM_ASSERT_PREEMPT_SAFE(); 3415 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 3374 3416 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 3375 3417 … … 3738 3780 { 3739 3781 HMSVM_ASSERT_PREEMPT_SAFE(); 3740 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 3741 3742 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM 3743 /* IEM only for executing nested guest, we shouldn't get here. */ 3744 /** @todo Make this into an assertion since HMR3CanExecuteGuest already checks 3745 * for it? */ 3746 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 3747 { 3748 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n")); 3749 return VINF_EM_RESCHEDULE_REM; 3750 } 3751 #endif 3782 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 3752 3783 3753 3784 /* Check force flag actions that might require us to go back to ring-3. */ … … 3871 3902 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 3872 3903 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 3904 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 3873 3905 3874 3906 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM); … … 3982 4014 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 3983 4015 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 4016 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 3984 4017 3985 4018 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM); … … 4188 4221 guest-CPU context. */ 4189 4222 4223 /** @todo This could later be optimized. Not now. */ 4190 4224 HMSvmNstGstVmExitNotify(pVCpu, pMixedCtx); /* Restore modified VMCB fields for now, see @bugref{7243#c52} .*/ 4225 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); /* Ensure we re-modify the fields before next reentry. */ 4191 4226 } 4192 4227 #endif … … 4215 4250 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */ 4216 4251 4217 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;4252 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 4218 4253 pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */ 4219 4254 … … 4466 4501 static int hmR0SvmRunGuestCodeNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops) 4467 4502 { 4468 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));4503 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 4469 4504 Assert(pcLoops); 4470 4505 Assert(*pcLoops <= pVM->hm.s.cMaxResumeLoops); … … 4514 4549 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 4515 4550 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); 4516 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, p VCpu->hm.s.svm.pVmcb);4551 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pCtx->hwvirt.svm.CTX_SUFF(pVmcb)); 4517 4552 rc = hmR0SvmHandleExitNested(pVCpu, pCtx, &SvmTransient); 4518 4553 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); … … 4622 4657 static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4623 4658 { 4659 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 4624 4660 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID); 4625 4661 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX); … … 4950 4986 if (HM_SVM_IS_CTRL_INTERCEPT_SET(pCtx, SVM_CTRL_INTERCEPT_IRET)) 4951 4987 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 4952 return hmR0Svm NestedExitIret(pVCpu, pCtx, pSvmTransient);4988 return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient); 4953 4989 } 4954 4990 … … 5261 5297 /** @todo r=ramshankar; We should be doing 5262 5298 * HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY here! */ 5263 5264 5299 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 5265 5300 SVMEVENT Event; … … 5524 5559 { 5525 5560 int rc = VINF_SUCCESS; 5526 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;5561 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 5527 5562 5528 5563 Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n", … … 5793 5828 5794 5829 /** 5795 * Updates interrupt shadow for the current RIP. 5796 */ 5797 #define HMSVM_UPDATE_INTR_SHADOW(pVCpu, pCtx) \ 5798 do { \ 5799 /* Update interrupt shadow. */ \ 5800 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) \ 5801 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) \ 5802 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); \ 5803 } while (0) 5830 * Returns whether decode-assist feature is supported. 5831 * 5832 * @param pVCpu The cross context virtual CPU structure. 5833 * @param pCtx Pointer to the guest-CPU context. 5834 */ 5835 DECLINLINE(bool) hmR0SvmSupportsDecodeAssist(PVMCPU pVCpu, PCPUMCTX pCtx) 5836 { 5837 PVM pVM = pVCpu->CTX_SUFF(pVM); 5838 #ifdef VBOX_WITH_NESTED_HWVIRT 5839 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 5840 { 5841 return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST) 5842 && pVM->cpum.ro.GuestFeatures.fSvmDecodeAssist; 5843 } 5844 #else 5845 RT_NOREF(pCtx); 5846 #endif 5847 return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST); 5848 } 5849 5850 5851 /** 5852 * Returns whether NRIP_SAVE feature is supported. 5853 * 5854 * @param pVCpu The cross context virtual CPU structure. 5855 * @param pCtx Pointer to the guest-CPU context. 5856 */ 5857 DECLINLINE(bool) hmR0SvmSupportsNextRipSave(PVMCPU pVCpu, PCPUMCTX pCtx) 5858 { 5859 PVM pVM = pVCpu->CTX_SUFF(pVM); 5860 #ifdef VBOX_WITH_NESTED_HWVIRT 5861 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 5862 { 5863 return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE) 5864 && pVM->cpum.ro.GuestFeatures.fSvmNextRipSave; 5865 } 5866 #else 5867 RT_NOREF(pCtx); 5868 #endif 5869 return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE); 5870 } 5804 5871 5805 5872 … … 5819 5886 DECLINLINE(void) hmR0SvmAdvanceRipHwAssist(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t cb) 5820 5887 { 5821 if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE) 5822 { 5823 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 5888 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 5889 if (fSupportsNextRipSave) 5890 { 5891 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 5824 5892 Assert(pVmcb->ctrl.u64NextRIP); 5825 5893 AssertRelease(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb); /* temporary, remove later */ … … 5845 5913 { 5846 5914 Assert(cbLikely <= 15); /* See Intel spec. 2.3.11 "AVX Instruction Length" */ 5847 if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE) 5848 { 5849 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 5915 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 5916 if (fSupportsNextRipSave) 5917 { 5918 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 5850 5919 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip; 5851 5920 Assert(cbInstr == cbLikely); … … 6038 6107 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg); 6039 6108 6040 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST) 6041 { 6042 Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE); 6043 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 6109 bool const fSupportsDecodeAssist = hmR0SvmSupportsDecodeAssist(pVCpu, pCtx); 6110 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6111 if ( fSupportsDecodeAssist 6112 && fSupportsNextRipSave) 6113 { 6114 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6044 6115 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip; 6045 6116 RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1; … … 6160 6231 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0]); 6161 6232 6162 PVM pVM = pVCpu->CTX_SUFF(pVM); 6163 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST) 6164 { 6165 Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE); 6166 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 6233 bool const fSupportsDecodeAssist = hmR0SvmSupportsDecodeAssist(pVCpu, pCtx); 6234 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6235 if ( fSupportsDecodeAssist 6236 && fSupportsNextRipSave) 6237 { 6238 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6167 6239 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK); 6168 6240 if (fMovCRx) … … 6199 6271 6200 6272 VBOXSTRICTRC rcStrict = VERR_SVM_IPE_5; 6201 PVM pVM = pVCpu->CTX_SUFF(pVM); 6202 bool fDecodedInstr = false; 6203 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST) 6204 { 6205 Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE); 6206 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 6273 PVM pVM = pVCpu->CTX_SUFF(pVM); 6274 bool fDecodedInstr = false; 6275 bool const fSupportsDecodeAssist = hmR0SvmSupportsDecodeAssist(pVCpu, pCtx); 6276 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6277 if ( fSupportsDecodeAssist 6278 && fSupportsNextRipSave) 6279 { 6280 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6207 6281 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK); 6208 6282 if (fMovCRx) … … 6267 6341 { 6268 6342 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6269 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;6343 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6270 6344 PVM pVM = pVCpu->CTX_SUFF(pVM); 6271 6345 … … 6293 6367 } 6294 6368 6295 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE) 6369 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6370 if (fSupportsNextRipSave) 6296 6371 { 6297 6372 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx)); … … 6341 6416 Log4(("MSR Read: idMsr=%#RX32\n", pCtx->ecx)); 6342 6417 6343 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE) 6418 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6419 if (fSupportsNextRipSave) 6344 6420 { 6345 6421 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx)); … … 6366 6442 } 6367 6443 6368 /* RIP has been updated by EMInterpret[Rd|Wr]msr() . */6444 /* RIP has been updated by EMInterpret[Rd|Wr]msr() or EMInterpretInstruction(). */ 6369 6445 return rc; 6370 6446 } … … 6488 6564 6489 6565 PVM pVM = pVCpu->CTX_SUFF(pVM); 6490 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;6566 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6491 6567 6492 6568 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */ … … 6530 6606 only enabling it for Bulldozer and later with NRIP. OS/2 broke on 6531 6607 2384 Opterons when only checking NRIP. */ 6532 if ( (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE) 6608 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6609 if ( fSupportsNextRipSave 6533 6610 && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First) 6534 6611 { … … 6699 6776 { 6700 6777 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6778 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 6779 6701 6780 PVM pVM = pVCpu->CTX_SUFF(pVM); 6702 6781 Assert(pVM->hm.s.fNestedPaging); … … 6808 6887 { 6809 6888 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6810 6811 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 6889 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 6890 6891 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6812 6892 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0; /* No virtual interrupts pending, we'll inject the current one/NMI before reentry. */ 6813 6893 pVmcb->ctrl.IntCtrl.n.u8VIntrVector = 0; … … 6908 6988 6909 6989 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */ 6910 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;6990 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6911 6991 hmR0SvmClearIretIntercept(pVmcb); 6912 6992 … … 6923 7003 { 6924 7004 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7005 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 6925 7006 6926 7007 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); … … 7285 7366 7286 7367 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */ 7287 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;7368 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 7288 7369 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1; 7289 7370 uint64_t const uFaultAddress = pVmcb->ctrl.u64ExitInfo2; … … 7451 7532 7452 7533 /** 7453 * Nested-guest \#VMEXIT handler for IRET (SVM_EXIT_VMRUN). Conditional \#VMEXIT.7454 */7455 HMSVM_EXIT_DECL hmR0SvmNestedExitIret(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)7456 {7457 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();7458 7459 /* Clear NMI blocking. */7460 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);7461 7462 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */7463 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);7464 hmR0SvmClearIretIntercept(pVmcbNstGst);7465 7466 /* Deliver the pending NMI via hmR0SvmEvaluatePendingEventNested() and resume guest execution. */7467 return VINF_SUCCESS;7468 }7469 7470 7471 /**7472 7534 * Nested-guest \#VMEXIT handler for debug exceptions (SVM_EXIT_EXCEPTION_1). 7473 7535 * Unconditional \#VMEXIT.
Note:
See TracChangeset
for help on using the changeset viewer.