Changeset 58658 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Nov 11, 2015 11:20:55 AM (9 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r58545 r58658 107 107 { \ 108 108 int rc = hmR0SvmCheckExitDueToEventDelivery(pVCpu, pCtx, pSvmTransient); \ 109 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT)) \ 109 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* likely */ } \ 110 else if (rc == VINF_HM_DOUBLE_FAULT) \ 110 111 return VINF_SUCCESS; \ 111 else if (RT_UNLIKELY(rc == VINF_EM_RESET))\112 else \ 112 113 return rc; \ 113 114 } while (0) … … 304 305 static FNSVMEXITHANDLER hmR0SvmExitXcptMF; 305 306 static FNSVMEXITHANDLER hmR0SvmExitXcptDB; 307 static FNSVMEXITHANDLER hmR0SvmExitXcptAC; 306 308 /** @} */ 307 309 … … 692 694 HMCPU_EXIT_HISTORY_RESET(pVCpu); 693 695 696 /* Always trap #AC for reasons of security. */ 697 pVmcb->ctrl.u32InterceptException |= RT_BIT_32(X86_XCPT_AC); 698 699 /* Always trap #DB for reasons of security. */ 700 pVmcb->ctrl.u32InterceptException |= RT_BIT_32(X86_XCPT_DB); 701 694 702 /* Trap exceptions unconditionally (debug purposes). */ 695 703 #ifdef HMSVM_ALWAYS_TRAP_PF … … 700 708 pVmcb->ctrl.u32InterceptException |= 0 701 709 | RT_BIT(X86_XCPT_BP) 702 | RT_BIT(X86_XCPT_DB)703 710 | RT_BIT(X86_XCPT_DE) 704 711 | RT_BIT(X86_XCPT_NM) … … 1112 1119 DECLINLINE(void) hmR0SvmRemoveXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt) 1113 1120 { 1121 Assert(u32Xcpt != X86_XCPT_DB); 1122 Assert(u32Xcpt != X86_XCPT_AC); 1114 1123 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS 1115 1124 if (pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt)) … … 1437 1446 Assert((pCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); Assert((pCtx->dr[7] & X86_DR7_RAZ_MASK) == 0); 1438 1447 1439 bool fInterceptDB = false;1440 1448 bool fInterceptMovDRx = false; 1441 1449 … … 1450 1458 pVCpu->hm.s.fClearTrapFlag = true; 1451 1459 pVmcb->guest.u64RFlags |= X86_EFL_TF; 1452 fInterceptDB = true;1453 1460 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */ 1454 1461 } … … 1495 1502 /** @todo If we cared, we could optimize to allow the guest to read registers 1496 1503 * with the same values. */ 1497 fInterceptDB = true;1498 1504 fInterceptMovDRx = true; 1499 1505 Log5(("hmR0SvmLoadSharedDebugState: Loaded hyper DRx\n")); … … 1542 1548 * If no debugging enabled, we'll lazy load DR0-3. We don't need to 1543 1549 * intercept #DB as DR6 is updated in the VMCB. 1550 * 1551 * Note! If we cared and dared, we could skip intercepting \#DB here. 1552 * However, \#DB shouldn't be performance critical, so we'll play safe 1553 * and keep the code similar to the VT-x code and always intercept it. 1544 1554 */ 1545 1555 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) … … 1554 1564 } 1555 1565 1556 /* 1557 * Set up the intercepts. 1558 */ 1559 if (fInterceptDB) 1560 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_DB); 1561 else 1562 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_DB); 1563 1566 Assert(pVmcb->ctrl.u32InterceptException & RT_BIT_32(X86_XCPT_DB)); 1564 1567 if (fInterceptMovDRx) 1565 1568 { … … 3545 3548 return hmR0SvmExitXcptDB(pVCpu, pCtx, pSvmTransient); 3546 3549 3550 case SVM_EXIT_EXCEPTION_11: /* X86_XCPT_AC */ 3551 return hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient); 3552 3547 3553 case SVM_EXIT_MONITOR: 3548 3554 return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient); … … 3661 3667 /* SVM_EXIT_EXCEPTION_E: */ /* X86_XCPT_PF - Handled above. */ 3662 3668 /* SVM_EXIT_EXCEPTION_10: */ /* X86_XCPT_MF - Handled above. */ 3663 case SVM_EXIT_EXCEPTION_11: /* X86_XCPT_AC*/3669 /* SVM_EXIT_EXCEPTION_11: */ /* X86_XCPT_AC - Handled above. */ 3664 3670 case SVM_EXIT_EXCEPTION_12: /* X86_XCPT_MC */ 3665 3671 case SVM_EXIT_EXCEPTION_13: /* X86_XCPT_XF */ … … 4069 4075 * continue execution of the guest which will delivery the \#DF. 4070 4076 * @retval VINF_EM_RESET if we detected a triple-fault condition. 4077 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang. 4071 4078 * 4072 4079 * @param pVCpu The cross context virtual CPU structure. … … 4092 4099 SVMREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */ 4093 4100 SVMREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */ 4101 SVMREFLECTXCPT_HANG, /* Indicate bad VM trying to deadlock the CPU. */ 4094 4102 SVMREFLECTXCPT_NONE /* Nothing to reflect. */ 4095 4103 } SVMREFLECTXCPT; … … 4115 4123 pSvmTransient->fVectoringDoublePF = true; 4116 4124 Log4(("IDT: Vectoring double #PF uCR2=%#RX64\n", pCtx->cr2)); 4125 } 4126 else if ( uExitVector == X86_XCPT_AC 4127 && uIdtVector == X86_XCPT_AC) 4128 { 4129 enmReflect = SVMREFLECTXCPT_HANG; 4130 Log4(("IDT: Nested #AC - Bad guest\n")); 4117 4131 } 4118 4132 else if ( (pVmcb->ctrl.u32InterceptException & HMSVM_CONTRIBUTORY_XCPT_MASK) … … 4191 4205 } 4192 4206 4207 case SVMREFLECTXCPT_HANG: 4208 { 4209 rc = VERR_EM_GUEST_CPU_HANG; 4210 break; 4211 } 4212 4193 4213 default: 4194 4214 Assert(rc == VINF_SUCCESS); … … 4196 4216 } 4197 4217 } 4198 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET );4218 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG); 4199 4219 NOREF(pCtx); 4200 4220 return rc; … … 5481 5501 } 5482 5502 5503 5504 /** 5505 * \#VMEXIT handler for alignment check exceptions (SVM_EXIT_EXCEPTION_11). 5506 * Conditional \#VMEXIT. 5507 */ 5508 HMSVM_EXIT_DECL hmR0SvmExitXcptAC(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 5509 { 5510 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 5511 5512 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 5513 5514 SVMEVENT Event; 5515 Event.u = 0; 5516 Event.n.u1Valid = 1; 5517 Event.n.u3Type = SVM_EVENT_EXCEPTION; 5518 Event.n.u8Vector = X86_XCPT_AC; 5519 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 5520 return VINF_SUCCESS; 5521 } 5522 5483 5523 /** @} */ 5484 5524 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r58487 r58658 143 143 * support. 144 144 */ 145 #define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /*RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \145 #define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \ 146 146 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \ 147 147 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \ 148 148 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \ 149 149 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \ 150 /* RT_BIT(X86_XCPT_MF) */ | RT_BIT(X86_XCPT_AC)| RT_BIT(X86_XCPT_MC) \150 /* RT_BIT(X86_XCPT_MF) always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \ 151 151 | RT_BIT(X86_XCPT_XF)) 152 152 … … 409 409 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 410 410 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 411 static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 411 412 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 412 413 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); … … 2582 2583 uint32_t u32XcptBitmap = pVCpu->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0; 2583 2584 2585 /* Must always intercept #AC to prevent the guest from hanging the CPU. */ 2586 u32XcptBitmap |= RT_BIT_32(X86_XCPT_AC); 2587 2588 /* Because we need to maintain the DR6 state even when intercepting DRx reads 2589 and writes, and because recursive #DBs can cause the CPU hang, we must always 2590 intercept #DB. */ 2591 u32XcptBitmap |= RT_BIT_32(X86_XCPT_DB); 2592 2584 2593 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */ 2585 2594 if (!pVM->hm.s.fNestedPaging) … … 3436 3445 } 3437 3446 3447 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC)); 3448 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB)); 3449 3438 3450 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 3439 3451 AssertRCReturn(rc, rc); … … 3688 3700 pVCpu->hm.s.vmx.u32XcptBitmap |= 0 3689 3701 | RT_BIT(X86_XCPT_BP) 3690 | RT_BIT(X86_XCPT_DB)3691 3702 | RT_BIT(X86_XCPT_DE) 3692 3703 | RT_BIT(X86_XCPT_NM) … … 4007 4018 int rc; 4008 4019 PVM pVM = pVCpu->CTX_SUFF(pVM); 4009 bool f InterceptDB= false;4020 bool fSteppingDB = false; 4010 4021 bool fInterceptMovDRx = false; 4011 4022 if (pVCpu->hm.s.fSingleInstruction) … … 4017 4028 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 4018 4029 AssertRCReturn(rc, rc); 4019 Assert(f InterceptDB == false);4030 Assert(fSteppingDB == false); 4020 4031 } 4021 4032 else … … 4024 4035 pVCpu->hm.s.fClearTrapFlag = true; 4025 4036 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS); 4026 f InterceptDB = true;4027 } 4028 } 4029 4030 if ( f InterceptDB4037 fSteppingDB = true; 4038 } 4039 } 4040 4041 if ( fSteppingDB 4031 4042 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK)) 4032 4043 { … … 4060 4071 4061 4072 pVCpu->hm.s.fUsingHyperDR7 = true; 4062 fInterceptDB = true;4063 4073 fInterceptMovDRx = true; 4064 4074 } … … 4089 4099 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 4090 4100 } 4091 Assert(!fInterceptDB);4092 4101 Assert(!fInterceptMovDRx); 4093 4102 } 4094 4103 /* 4095 4104 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we 4096 * must intercept #DB in order to maintain a correct DR6 guest value. 4105 * must intercept #DB in order to maintain a correct DR6 guest value, and 4106 * because we need to intercept it to prevent nested #DBs from hanging the 4107 * CPU, we end up always having to intercept it. See hmR0VmxInitXcptBitmap. 4097 4108 */ 4098 4109 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) … … 4104 4115 { 4105 4116 fInterceptMovDRx = true; 4106 fInterceptDB = true;4107 4117 } 4108 4118 … … 4112 4122 4113 4123 pVCpu->hm.s.fUsingHyperDR7 = false; 4114 }4115 4116 /*4117 * Update the exception bitmap regarding intercepting #DB generated by the guest.4118 */4119 if ( fInterceptDB4120 || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)4121 {4122 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);4123 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);4124 }4125 else4126 {4127 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS4128 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);4129 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);4130 #endif4131 4124 } 4132 4125 … … 5597 5590 * continue execution of the guest which will delivery the \#DF. 5598 5591 * @retval VINF_EM_RESET if we detected a triple-fault condition. 5592 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang. 5599 5593 * 5600 5594 * @param pVCpu The cross context virtual CPU structure. … … 5625 5619 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */ 5626 5620 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */ 5621 VMXREFLECTXCPT_HANG, /* Indicate bad VM trying to deadlock the CPU. */ 5627 5622 VMXREFLECTXCPT_NONE /* Nothing to reflect. */ 5628 5623 } VMXREFLECTXCPT; … … 5647 5642 pVmxTransient->fVectoringDoublePF = true; 5648 5643 Log4(("IDT: vcpu[%RU32] Vectoring Double #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2)); 5644 } 5645 else if ( uExitVector == X86_XCPT_AC 5646 && uIdtVector == X86_XCPT_AC) 5647 { 5648 enmReflect = VMXREFLECTXCPT_HANG; 5649 Log4(("IDT: Nested #AC - Bad guest\n")); 5649 5650 } 5650 5651 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK) … … 5745 5746 } 5746 5747 5748 case VMXREFLECTXCPT_HANG: 5749 { 5750 rc = VERR_EM_GUEST_CPU_HANG; 5751 break; 5752 } 5753 5747 5754 default: 5748 5755 Assert(rc == VINF_SUCCESS); … … 5768 5775 } 5769 5776 5770 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET );5777 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG); 5771 5778 return rc; 5772 5779 } … … 8259 8266 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS)) 8260 8267 { 8268 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC)); 8269 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB)); 8261 8270 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 8262 8271 AssertRC(rc); … … 9872 9881 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break; 9873 9882 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break; 9883 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pMixedCtx, pVmxTransient); break; 9874 9884 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 9875 9885 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); … … 11293 11303 { 11294 11304 Assert(!DBGFIsStepping(pVCpu)); 11295 11296 /* Don't intercept MOV DRx and #DB any more. */ 11305 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB)); 11306 11307 /* Don't intercept MOV DRx any more. */ 11297 11308 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 11298 11309 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 11299 11310 AssertRCReturn(rc, rc); 11300 11301 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)11302 {11303 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS11304 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);11305 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);11306 #endif11307 }11308 11311 11309 11312 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */ … … 11562 11565 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT); 11563 11566 return rc; 11567 } 11568 11569 11570 /** 11571 * VM-exit exception handler for \#AC (alignment check exception). 11572 */ 11573 static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 11574 { 11575 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 11576 11577 /* 11578 * Re-inject it. We'll detect any nesting before getting here. 11579 */ 11580 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 11581 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11582 AssertRCReturn(rc, rc); 11583 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO); 11584 11585 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 11586 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 11587 return VINF_SUCCESS; 11564 11588 } 11565 11589
Note:
See TracChangeset
for help on using the changeset viewer.