- Timestamp:
- Aug 7, 2017 8:15:17 AM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r68305 r68311 314 314 static FNSVMEXITHANDLER hmR0SvmExitInvlpga; 315 315 static FNSVMEXITHANDLER hmR0SvmExitVmrun; 316 static FNSVMEXITHANDLER hmR0SvmNestedExitIret; 317 static FNSVMEXITHANDLER hmR0SvmNestedExitVIntr; 316 318 #endif 317 319 /** @} */ … … 3026 3028 hmR0SvmSetIretIntercept(pVmcbNstGst); 3027 3029 else if (fIntShadow) 3030 { 3031 /** @todo Figure this out, how we shall manage virt. intercept if the 3032 * nested-guest already has one set and/or if we really need it? */ 3033 #if 0 3028 3034 hmR0SvmSetVirtIntrIntercept(pVmcbNstGst); 3035 #endif 3036 } 3029 3037 else 3030 3038 { … … 3079 3087 } 3080 3088 else 3089 { 3090 /** @todo Figure this out, how we shall manage virt. intercept if the 3091 * nested-guest already has one set and/or if we really need it? */ 3092 #if 0 3081 3093 hmR0SvmSetVirtIntrIntercept(pVmcbNstGst); 3094 #endif 3095 } 3082 3096 } 3083 3097 /* … … 3140 3154 hmR0SvmSetIretIntercept(pVmcb); 3141 3155 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 3156 return; 3142 3157 } 3143 3158 } … … 4628 4643 if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR) 4629 4644 return hmR0SvmExecVmexit(pVCpu, pCtx); 4630 return hmR0Svm ExitVIntr(pVCpu, pCtx, pSvmTransient);4645 return hmR0SvmNestedExitVIntr(pVCpu, pCtx, pSvmTransient); 4631 4646 } 4632 4647 … … 4741 4756 if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET) 4742 4757 return hmR0SvmExecVmexit(pVCpu, pCtx); 4743 return hmR0Svm ExitIret(pVCpu, pCtx, pSvmTransient);4758 return hmR0SvmNestedExitIret(pVCpu, pCtx, pSvmTransient); 4744 4759 } 4745 4760 … … 7461 7476 return VBOXSTRICTRC_VAL(rcStrict); 7462 7477 } 7478 7479 /** 7480 * Nested-guest \#VMEXIT handler for IRET (SVM_EXIT_VMRUN). Conditional \#VMEXIT. 7481 */ 7482 HMSVM_EXIT_DECL hmR0SvmNestedExitIret(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 7483 { 7484 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7485 7486 /* Clear NMI blocking. */ 7487 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 7488 7489 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */ 7490 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 7491 hmR0SvmClearIretIntercept(pVmcbNstGst); 7492 7493 /* Deliver the pending NMI via hmR0SvmEvaluatePendingEventNested() and resume guest execution. */ 7494 return VINF_SUCCESS; 7495 } 7496 7497 7498 /** 7499 * \#VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional 7500 * \#VMEXIT. 7501 */ 7502 HMSVM_EXIT_DECL hmR0SvmNestedExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 7503 { 7504 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7505 7506 /* No virtual interrupts pending, we'll inject the current one/NMI before reentry. */ 7507 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 7508 pVmcbNstGst->ctrl.IntCtrl.n.u1VIrqPending = 0; 7509 pVmcbNstGst->ctrl.IntCtrl.n.u8VIntrVector = 0; 7510 7511 /* Indicate that we no longer need to #VMEXIT when the nested-guest is ready to receive interrupts/NMIs, it is now ready. */ 7512 pVmcbNstGst->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR; 7513 pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 7514 7515 /* Deliver the pending interrupt/NMI via hmR0SvmEvaluatePendingEventNested() and resume guest execution. */ 7516 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow); 7517 return VINF_SUCCESS; 7518 } 7519 7463 7520 #endif /* VBOX_WITH_NESTED_HWVIRT */ 7464 7521
Note:
See TracChangeset
for help on using the changeset viewer.