Changeset 69413 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Oct 27, 2017 9:32:26 AM (8 years ago)
- svn:sync-xref-src-repo-rev:
- 118742
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r69221 r69413 288 288 static FNSVMEXITHANDLER hmR0SvmExitReadCRx; 289 289 static FNSVMEXITHANDLER hmR0SvmExitWriteCRx; 290 static FNSVMEXITHANDLER hmR0SvmExitSetPendingXcptUD;291 290 static FNSVMEXITHANDLER hmR0SvmExitMsr; 292 291 static FNSVMEXITHANDLER hmR0SvmExitReadDRx; … … 316 315 static FNSVMEXITHANDLER hmR0SvmExitVmrun; 317 316 static FNSVMEXITHANDLER hmR0SvmNestedExitIret; 317 static FNSVMEXITHANDLER hmR0SvmNestedExitXcptDB; 318 static FNSVMEXITHANDLER hmR0SvmNestedExitXcptBP; 318 319 #endif 319 320 /** @} */ … … 1756 1757 hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb); 1757 1758 1758 /** @todo Optimization: we don't need to intercept VMMCALL when the1759 * nested-guest isn't intercepting them, and possibly others. */1760 1761 1759 /* Next, merge the intercepts into the nested-guest VMCB. */ 1762 1760 pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx; … … 1782 1780 pVmcbNstGst->ctrl.u64InterceptCtrl |= pVmcb->ctrl.u64InterceptCtrl 1783 1781 | HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS; 1784 1782 /* 1783 * Remove control intercepts that we don't need while executing the nested-guest. 1784 * 1785 * VMMCALL when not intercepted raises a \#UD exception in the guest. However, 1786 * other SVM instructions like VMSAVE when not intercept can cause havoc on the 1787 * host as they can write to any location in physical memory, hence they always 1788 * need to be intercepted (they are included in HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS). 1789 */ 1785 1790 Assert( (pVmcbNstGst->ctrl.u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS) 1786 1791 == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS); 1792 pVmcbNstGst->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VMMCALL; 1793 1794 /* Remove exception intercepts that we don't need while executing the nested-guest. */ 1795 pVmcbNstGst->ctrl.u32InterceptXcpt &= ~RT_BIT(X86_XCPT_UD); 1787 1796 1788 1797 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS)); … … 2658 2667 Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, 2659 2668 pEvent->n.u8Vector, (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode)); 2669 } 2670 2671 2672 /** 2673 * Sets an exception as pending-for-injection into the VM. 2674 * 2675 * @param pVCpu The cross context virtual CPU structure. 2676 */ 2677 DECLINLINE(void) hmR0SvmSetPendingXcpt(PVMCPU pVCpu, uint8_t uXcpt) 2678 { 2679 SVMEVENT Event; 2680 Event.u = 0; 2681 Event.n.u1Valid = 1; 2682 Event.n.u3Type = SVM_EVENT_EXCEPTION; 2683 Event.n.u8Vector = uXcpt; 2684 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 2660 2685 } 2661 2686 … … 4582 4607 } while (0) \ 4583 4608 4584 #define HM_SVM_HANDLE_XCPT_EXIT_NESTED(a_uXcpt, a_XcptExitFn) \4585 do \4586 { \4587 if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(a_uXcpt)) \4588 HM_SVM_RET_VMEXIT_NESTED(pVCpu, pVmcbNstGst->ctrl.u64ExitCode, pVmcbNstGst->ctrl.u64ExitInfo1, \4589 pVmcbNstGst->ctrl.u64ExitInfo2); \4590 return a_XcptExitFn(pVCpu, pCtx, pSvmTransient); \4591 } while (0) \4592 4593 4609 /* 4594 4610 * For all the #VMEXITs here we primarily figure out if the #VMEXIT is expected … … 4602 4618 uint64_t const uExitInfo2 = pVmcbNstGstCtrl->u64ExitInfo2; 4603 4619 4604 switch (pSvmTransient->u64ExitCode) 4620 Assert(uExitCode == pVmcbNstGstCtrl->u64ExitCode); 4621 switch (uExitCode) 4605 4622 { 4606 4623 case SVM_EXIT_CPUID: … … 4707 4724 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */ 4708 4725 if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_PF)) 4709 HM_SVM_RET_VMEXIT_NESTED(pVCpu, SVM_EXIT_EXCEPTION_14, u32ErrCode, uFaultAddress);4726 HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, u32ErrCode, uFaultAddress); 4710 4727 4711 4728 /* If the nested-guest is not intercepting #PFs, forward the #PF to the nested-guest. */ … … 4717 4734 4718 4735 case SVM_EXIT_EXCEPTION_7: /* X86_XCPT_NM */ 4719 HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_NM, hmR0SvmExitXcptNM); 4736 { 4737 if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_NM)) 4738 HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 4739 hmR0SvmSetPendingXcptNM(pVCpu); 4740 return VINF_SUCCESS; 4741 } 4720 4742 4721 4743 case SVM_EXIT_EXCEPTION_6: /* X86_XCPT_UD */ 4722 HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_UD, hmR0SvmExitXcptUD); 4744 { 4745 if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_UD)) 4746 HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 4747 hmR0SvmSetPendingXcptUD(pVCpu); 4748 return VINF_SUCCESS; 4749 } 4723 4750 4724 4751 case SVM_EXIT_EXCEPTION_16: /* X86_XCPT_MF */ 4725 HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_MF, hmR0SvmExitXcptMF); 4752 { 4753 if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_MF)) 4754 HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 4755 hmR0SvmSetPendingXcptMF(pVCpu); 4756 return VINF_SUCCESS; 4757 } 4726 4758 4727 4759 case SVM_EXIT_EXCEPTION_1: /* X86_XCPT_DB */ 4728 HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_DB, hmR0SvmExitXcptDB); 4760 { 4761 if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_DB)) 4762 HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 4763 return hmR0SvmNestedExitXcptDB(pVCpu, pCtx, pSvmTransient); 4764 } 4729 4765 4730 4766 case SVM_EXIT_EXCEPTION_17: /* X86_XCPT_AC */ 4731 HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_AC, hmR0SvmExitXcptAC); 4767 { 4768 if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_AC)) 4769 HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 4770 return hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient); 4771 } 4732 4772 4733 4773 case SVM_EXIT_EXCEPTION_3: /* X86_XCPT_BP */ 4734 HM_SVM_HANDLE_XCPT_EXIT_NESTED(X86_XCPT_BP, hmR0SvmExitXcptBP); 4774 { 4775 if (pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(X86_XCPT_BP)) 4776 HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 4777 return hmR0SvmNestedExitXcptBP(pVCpu, pCtx, pSvmTransient); 4778 } 4735 4779 4736 4780 case SVM_EXIT_READ_CR0: … … 4956 5000 if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RSM) 4957 5001 HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 4958 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient); 5002 hmR0SvmSetPendingXcptUD(pVCpu); 5003 return VINF_SUCCESS; 4959 5004 } 4960 5005 … … 4963 5008 if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_SKINIT) 4964 5009 HM_SVM_RET_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 4965 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient); 5010 hmR0SvmSetPendingXcptUD(pVCpu); 5011 return VINF_SUCCESS; 4966 5012 } 4967 5013 … … 4984 5030 /* not reached */ 4985 5031 4986 #undef HM_SVM_HANDLE_XCPT_EXIT_NESTED4987 5032 #undef HM_SVM_RET_VMEXIT_NESTED 4988 5033 } … … 5150 5195 case SVM_EXIT_RSM: 5151 5196 case SVM_EXIT_SKINIT: 5152 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient); 5197 { 5198 hmR0SvmSetPendingXcptUD(pVCpu); 5199 return VINF_SUCCESS; 5200 } 5153 5201 5154 5202 #ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS … … 6180 6228 6181 6229 /** 6182 * \#VMEXIT handler for instructions that result in a \#UD exception delivered6183 * to the guest.6184 */6185 HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)6186 {6187 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();6188 hmR0SvmSetPendingXcptUD(pVCpu);6189 return VINF_SUCCESS;6190 }6191 6192 6193 /**6194 6230 * \#VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional 6195 6231 * \#VMEXIT. … … 7159 7195 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7160 7196 7197 /** @todo if triple-fault is returned in nested-guest scenario convert to a 7198 * shutdown VMEXIT. */ 7161 7199 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7162 7200 … … 7190 7228 Event.n.u3Type = SVM_EVENT_EXCEPTION; 7191 7229 Event.n.u8Vector = X86_XCPT_BP; 7192 Event.n.u1ErrorCodeValid = 0;7193 7230 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 7194 7231 } … … 7353 7390 /** @todo Stat. */ 7354 7391 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmrun); */ 7392 #if 0 7355 7393 VBOXSTRICTRC rcStrict; 7356 7394 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); … … 7363 7401 } 7364 7402 return VBOXSTRICTRC_VAL(rcStrict); 7403 #endif 7404 return VERR_EM_INTERPRETER; 7365 7405 } 7366 7406 … … 7383 7423 return VINF_SUCCESS; 7384 7424 } 7425 7426 7427 /** 7428 * Nested-guest \#VMEXIT handler for debug exceptions (SVM_EXIT_EXCEPTION_1). 7429 * Unconditional \#VMEXIT. 7430 */ 7431 HMSVM_EXIT_DECL hmR0SvmNestedExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 7432 { 7433 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7434 7435 /* If this #DB is the result of delivering an event, go back to the interpreter. */ 7436 /** @todo if triple-fault is returned in nested-guest scenario convert to a 7437 * shutdown VMEXIT. */ 7438 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7439 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending)) 7440 { 7441 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret); 7442 return VINF_EM_RAW_INJECT_TRPM_EVENT; 7443 } 7444 7445 hmR0SvmSetPendingXcpt(pVCpu, X86_XCPT_DB); 7446 return VINF_SUCCESS; 7447 } 7448 7449 7450 /** 7451 * Nested-guest \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_EXCEPTION_3). 7452 * Conditional \#VMEXIT. 7453 */ 7454 HMSVM_EXIT_DECL hmR0SvmNestedExitXcptBP(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 7455 { 7456 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7457 7458 /** @todo if triple-fault is returned in nested-guest scenario convert to a 7459 * shutdown VMEXIT. */ 7460 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7461 7462 SVMEVENT Event; 7463 Event.u = 0; 7464 Event.n.u1Valid = 1; 7465 Event.n.u3Type = SVM_EVENT_EXCEPTION; 7466 Event.n.u8Vector = X86_XCPT_BP; 7467 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 7468 return VINF_SUCCESS; 7469 } 7470 7385 7471 #endif /* VBOX_WITH_NESTED_HWVIRT */ 7386 7472
Note:
See TracChangeset
for help on using the changeset viewer.