Changeset 48552 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 19, 2013 4:52:49 PM (11 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r48475 r48552 211 211 /** Whether the TSC offset mode needs to be updated. */ 212 212 bool fUpdateTscOffsetting; 213 /** Whether the guest FPU state was active at the time of #VMEXIT. */ 214 bool fWasGuestFPUStateActive; 215 /** Whether the guest debug state was active at the time of #VMEXIT. */ 216 bool fWasGuestDebugStateActive; 217 /** Whether the hyper debug state was active at the time of #VMEXIT. */ 218 bool fWasHyperDebugStateActive; 213 219 } SVMTRANSIENT, *PSVMTRANSIENT; 214 220 AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t)); … … 1398 1404 * Note! DBGF expects a clean DR6 state before executing guest code. 1399 1405 */ 1406 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1407 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 1408 && !CPUMIsHyperDebugStateActivePending(pVCpu)) 1409 { 1410 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */); 1411 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu)); 1412 Assert(CPUMIsHyperDebugStateActivePending(pVCpu)); 1413 } 1414 else 1415 #endif 1400 1416 if (!CPUMIsHyperDebugStateActive(pVCpu)) 1417 { 1401 1418 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */); 1402 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1403 Assert(CPUMIsHyperDebugStateActive(pVCpu) || HC_ARCH_BITS == 32); 1419 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1420 Assert(CPUMIsHyperDebugStateActive(pVCpu)); 1421 } 1404 1422 1405 1423 /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */ 1406 1424 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL 1407 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu) 1425 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu)) 1408 1426 { 1409 1427 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu); … … 1437 1455 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */ 1438 1456 { 1457 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1458 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 1459 && !CPUMIsGuestDebugStateActivePending(pVCpu)) 1460 { 1461 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */); 1462 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 1463 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu)); 1464 Assert(CPUMIsGuestDebugStateActivePending(pVCpu)); 1465 } 1466 else 1467 #endif 1439 1468 if (!CPUMIsGuestDebugStateActive(pVCpu)) 1440 1469 { 1441 1470 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */); 1442 1471 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 1472 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1473 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 1443 1474 } 1444 Assert(!CPUMIsHyperDebugStateActive(pVCpu));1445 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);1446 1475 Log5(("hmR0SvmLoadSharedDebugState: Loaded guest DRx\n")); 1447 1476 } 1448 1477 /* 1449 * If no debugging enabled, we'll lazy load DR0-3. 1478 * If no debugging enabled, we'll lazy load DR0-3. We don't need to 1479 * intercept #DB as DR6 is updated in the VMCB. 1450 1480 */ 1481 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1482 else if ( ( CPUMIsGuestInLongModeEx(pMixedCtx) 1483 && !CPUMIsGuestDebugStateActivePending(pVCpu)) 1484 || !CPUMIsGuestDebugStateActive(pVCpu)) 1485 #else 1451 1486 else if (!CPUMIsGuestDebugStateActive(pVCpu)) 1487 #endif 1488 { 1452 1489 fInterceptMovDRx = true; 1490 } 1453 1491 } 1454 1492 … … 1764 1802 * @remarks No-long-jump zone!!! 1765 1803 */ 1766 static void hmR0 VmxLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)1804 static void hmR0SvmLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1767 1805 { 1768 1806 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1893 1931 * Guest Debug registers. 1894 1932 */ 1933 /** @todo We need to save DR6, DR7 according to what we did in 1934 * hmR0SvmLoadSharedDebugState(). */ 1895 1935 if (!CPUMIsHyperDebugStateActive(pVCpu)) 1896 1936 { … … 1983 2023 1984 2024 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before 1985 and done this from the VMXR0ThreadCtxCallback(). */2025 and done this from the SVMR0ThreadCtxCallback(). */ 1986 2026 if (!pVCpu->hm.s.fLeaveDone) 1987 2027 { … … 2864 2904 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2865 2905 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE) 2866 hmR0 VmxLoadSharedState(pVCpu, pVmcb, pCtx);2906 hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx); 2867 2907 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT; /* Preemption might set this, nothing to do on AMD-V. */ 2868 2908 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags)); … … 2879 2919 pSvmTransient->fUpdateTscOffsetting = false; 2880 2920 } 2921 2922 /* Store status of the shared guest-host state at the time of VMRUN. */ 2923 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2924 if (CPUMIsGuestInLongModeEx(pCtx)) 2925 { 2926 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu); 2927 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu); 2928 } 2929 else 2930 #endif 2931 { 2932 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu); 2933 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu); 2934 } 2935 pSvmTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu); 2881 2936 2882 2937 /* Flush the appropriate tagged-TLB entries. */ … … 4218 4273 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 4219 4274 4220 /* We should -not- get this VM-exit if the guest is debugging. */ 4221 AssertMsgReturn(!CPUMIsGuestDebugStateActive(pVCpu), 4275 /* We should -not- get this VM-exit if we're not stepping or the guest is debugging. */ 4276 AssertMsgReturn( pVCpu->hm.s.fSingleInstruction 4277 || DBGFIsStepping(pVCpu) 4278 || !pSvmTransient->fWasGuestDebugStateActive, 4222 4279 ("hmR0SvmExitReadDRx: Unexpected exit. pVCpu=%p pCtx=%p\n", pVCpu, pCtx), 4223 4280 VERR_SVM_UNEXPECTED_EXIT); … … 4226 4283 * Lazy DR0-3 loading? 4227 4284 */ 4228 if (! CPUMIsHyperDebugStateActive(pVCpu))4285 if (!pSvmTransient->fWasHyperDebugStateActive) 4229 4286 { 4230 4287 Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction); … … 4237 4294 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 4238 4295 4239 /* We're playing with the host CPU state here, make sure we don't preempt. */ 4296 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 4297 VMMRZCallRing3Disable(pVCpu); 4240 4298 HM_DISABLE_PREEMPT_IF_NEEDED(); 4241 4299 … … 4245 4303 4246 4304 HM_RESTORE_PREEMPT_IF_NEEDED(); 4305 VMMRZCallRing3Enable(pVCpu); 4247 4306 4248 4307 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch); … … 4388 4447 || DBGFBpIsHwIoArmed(pVM))) 4389 4448 { 4390 /* We're playing with the host CPU state here, make sure we don't preempt. */ 4449 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 4450 VMMRZCallRing3Disable(pVCpu); 4391 4451 HM_DISABLE_PREEMPT_IF_NEEDED(); 4392 4452 … … 4409 4469 4410 4470 HM_RESTORE_PREEMPT_IF_NEEDED(); 4471 VMMRZCallRing3Enable(pVCpu); 4411 4472 } 4412 4473 … … 4738 4799 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 4739 4800 4801 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 4802 VMMRZCallRing3Disable(pVCpu); 4803 HM_DISABLE_PREEMPT_IF_NEEDED(); 4804 4805 int rc; 4806 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */ 4807 if (pSvmTransient->fWasGuestFPUStateActive) 4808 { 4809 rc = VINF_EM_RAW_GUEST_TRAP; 4810 Assert(CPUMIsGuestFPUStateActive(pVCpu) || (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)); 4811 } 4812 else 4813 { 4740 4814 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS 4741 Assert(!CPUMIsGuestFPUStateActive(pVCpu));4815 Assert(!pSvmTransient->fWasGuestFPUStateActive); 4742 4816 #endif 4743 4744 /* We're playing with the host CPU state here, make sure we don't preempt. */4745 HM_DISABLE_PREEMPT_IF_NEEDED();4746 4747 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */ 4748 int rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);4749 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;4817 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */ 4818 rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx); 4819 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu))); 4820 } 4821 4822 HM_RESTORE_PREEMPT_IF_NEEDED(); 4823 VMMRZCallRing3Enable(pVCpu); 4750 4824 4751 4825 if (rc == VINF_SUCCESS) 4752 4826 { 4753 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 4754 HM_RESTORE_PREEMPT_IF_NEEDED(); 4755 4827 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 4756 4828 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 4757 return VINF_SUCCESS; 4758 } 4759 4760 HM_RESTORE_PREEMPT_IF_NEEDED(); 4761 4762 /* Forward #NM to the guest. */ 4763 Assert(rc == VINF_EM_RAW_GUEST_TRAP); 4764 hmR0SvmSetPendingXcptNM(pVCpu); 4765 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM); 4829 } 4830 else 4831 { 4832 /* Forward #NM to the guest. */ 4833 Assert(rc == VINF_EM_RAW_GUEST_TRAP); 4834 hmR0SvmSetPendingXcptNM(pVCpu); 4835 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM); 4836 } 4766 4837 return VINF_SUCCESS; 4767 4838 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r48478 r48552 262 262 * contributory exception or a page-fault. */ 263 263 bool fVectoringPF; 264 /** Whether the guest FPU was active at the time of VM-exit. */ 265 bool fWasGuestFPUStateActive; 266 /** Whether the guest debug state was active at the time of VM-exit. */ 267 bool fWasGuestDebugStateActive; 268 /** Whether the hyper debug state was active at the time of VM-exit. */ 269 bool fWasHyperDebugStateActive; 264 270 } VMXTRANSIENT; 265 271 AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t)); … … 3129 3135 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */ 3130 3136 if (fInterceptNM) 3131 u32CR0Mask |= (X86_CR0_TS | X86_CR0_MP); 3132 else 3133 u32CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP); 3137 { 3138 u32CR0Mask |= X86_CR0_TS 3139 | X86_CR0_MP; 3140 } 3134 3141 3135 3142 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */ … … 3137 3144 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask); 3138 3145 AssertRCReturn(rc, rc); 3146 Log4(("Load: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", u32CR0Mask)); 3139 3147 3140 3148 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0; … … 3407 3415 * Note! DBGF expects a clean DR6 state before executing guest code. 3408 3416 */ 3417 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3418 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 3419 && !CPUMIsHyperDebugStateActivePending(pVCpu)) 3420 { 3421 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */); 3422 Assert(CPUMIsHyperDebugStateActivePending(pVCpu)); 3423 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu)); 3424 } 3425 else 3426 #endif 3409 3427 if (!CPUMIsHyperDebugStateActive(pVCpu)) 3428 { 3410 3429 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */); 3411 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 3412 Assert(CPUMIsHyperDebugStateActive(pVCpu) || HC_ARCH_BITS == 32); 3430 Assert(CPUMIsHyperDebugStateActive(pVCpu)); 3431 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 3432 } 3413 3433 3414 3434 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */ … … 3427 3447 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */ 3428 3448 { 3429 if (!CPUMIsGuestDebugStateActive(pVCpu)) 3449 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3450 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 3451 && !CPUMIsGuestDebugStateActivePending(pVCpu)) 3430 3452 { 3431 3453 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */); 3454 Assert(CPUMIsGuestDebugStateActivePending(pVCpu)); 3455 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu)); 3432 3456 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 3433 3457 } 3434 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 3435 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32); 3458 else 3459 #endif 3460 if (CPUMIsGuestDebugStateActive(pVCpu)) 3461 { 3462 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */); 3463 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 3464 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 3465 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 3466 } 3436 3467 } 3437 3468 /* … … 3439 3470 * must intercept #DB in order to maintain a correct DR6 guest value. 3440 3471 */ 3472 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3473 else if ( ( CPUMIsGuestInLongModeEx(pMixedCtx) 3474 && !CPUMIsGuestDebugStateActivePending(pVCpu)) 3475 || !CPUMIsGuestDebugStateActive(pVCpu)) 3476 #else 3441 3477 else if (!CPUMIsGuestDebugStateActive(pVCpu)) 3478 #endif 3442 3479 { 3443 3480 fInterceptMovDRx = true; … … 5106 5143 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal); 5107 5144 AssertRCReturn(rc, rc); 5145 5108 5146 uint32_t uShadow = 0; 5109 5147 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow); … … 5751 5789 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG)) 5752 5790 { 5791 /** @todo We need to update DR7 according to what was done in hmR0VmxLoadSharedDebugState(). */ 5753 5792 if (!CPUMIsHyperDebugStateActive(pVCpu)) 5754 5793 { … … 6123 6162 if (CPUMIsGuestFPUStateActive(pVCpu)) 6124 6163 { 6164 if (!fSaveGuestState) 6165 { 6166 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 6167 AssertRCReturn(rc, rc); 6168 } 6125 6169 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx); 6126 6170 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); … … 6135 6179 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */)) 6136 6180 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 6137 Assert(!CPUMIsGuestDebugStateActive(pVCpu) );6138 Assert(!CPUMIsHyperDebugStateActive(pVCpu) );6181 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu)); 6182 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu)); 6139 6183 6140 6184 #if HC_ARCH_BITS == 64 … … 6387 6431 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 6388 6432 6389 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu)); 6433 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n enmOperation=%d", pVCpu, pVCpu->idCpu, 6434 enmOperation)); 6435 6390 6436 int rc = hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser); 6391 6437 AssertRCReturn(rc, rc); … … 7522 7568 } 7523 7569 7570 #ifdef HMVMX_ALWAYS_SWAP_FPU_STATE 7571 if (!CPUMIsGuestFPUStateActive(pVCpu)) 7572 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx); 7573 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 7574 #endif 7575 7524 7576 /* 7525 7577 * Load the host state bits as we may've been preempted (only happens when … … 7534 7586 } 7535 7587 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)); 7536 7537 #ifdef HMVMX_ALWAYS_SWAP_FPU_STATE7538 if (!CPUMIsGuestFPUStateActive(pVCpu))7539 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);7540 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;7541 #endif7542 7588 7543 7589 /* … … 7547 7593 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx); 7548 7594 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags)); 7595 7596 /* Store status of the shared guest-host state at the time of VM-entry. */ 7597 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 7598 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 7599 { 7600 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu); 7601 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu); 7602 } 7603 else 7604 #endif 7605 { 7606 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu); 7607 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu); 7608 } 7609 pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu); 7549 7610 7550 7611 /* … … 9784 9845 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); 9785 9846 9786 /* We're playing with the host CPU state here, make sure we don't preempt. */ 9847 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 9848 VMMRZCallRing3Disable(pVCpu); 9787 9849 HM_DISABLE_PREEMPT_IF_NEEDED(); 9850 9788 9851 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /*fDr6*/); 9789 9852 … … 9805 9868 9806 9869 HM_RESTORE_PREEMPT_IF_NEEDED(); 9870 VMMRZCallRing3Enable(pVCpu); 9807 9871 } 9808 9872 } … … 9984 10048 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 9985 10049 9986 /* We should -not- get this VM-exit if the guest's debug registers are active. See CPUMR0LoadGuestDebugState(). */ 9987 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 9988 if ( !CPUMIsGuestInLongModeEx(pMixedCtx) /* EFER is always up-to-date. */ 9989 && CPUMIsGuestDebugStateActive(pVCpu)) 9990 #else 9991 if (CPUMIsGuestDebugStateActive(pVCpu)) 9992 #endif 10050 /* We should -not- get this VM-exit if the guest's debug registers were active. */ 10051 if (pVmxTransient->fWasGuestDebugStateActive) 9993 10052 { 9994 10053 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); … … 9999 10058 if ( !DBGFIsStepping(pVCpu) 10000 10059 && !pVCpu->hm.s.fSingleInstruction 10001 && ! CPUMIsHyperDebugStateActive(pVCpu))10060 && !pVmxTransient->fWasHyperDebugStateActive) 10002 10061 { 10003 10062 /* Don't intercept MOV DRx and #DB any more. */ … … 10015 10074 } 10016 10075 10017 /* We're playing with the host CPU state here, make sure we can't preempt. */ 10076 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */ 10077 VMMRZCallRing3Disable(pVCpu); 10018 10078 HM_DISABLE_PREEMPT_IF_NEEDED(); 10019 10079 … … 10024 10084 10025 10085 HM_RESTORE_PREEMPT_IF_NEEDED(); 10086 VMMRZCallRing3Enable(pVCpu); 10026 10087 10027 10088 #ifdef VBOX_WITH_STATISTICS … … 10286 10347 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".) 10287 10348 */ 10349 VMMRZCallRing3Disable(pVCpu); 10288 10350 HM_DISABLE_PREEMPT_IF_NEEDED(); 10289 10351 … … 10294 10356 10295 10357 HM_RESTORE_PREEMPT_IF_NEEDED(); 10358 VMMRZCallRing3Enable(pVCpu); 10296 10359 10297 10360 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx); … … 10325 10388 */ 10326 10389 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc)); 10327 AssertReturn( CPUMIsHyperDebugStateActive(pVCpu), VERR_HM_IPE_5);10390 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5); 10328 10391 CPUMSetHyperDR6(pVCpu, uDR6); 10329 10392 … … 10344 10407 AssertRCReturn(rc, rc); 10345 10408 10346 /* We're playing with the host CPU state here, have to disable preemption. */ 10409 /* We're playing with the host CPU state here, have to disable preemption or longjmp. */ 10410 VMMRZCallRing3Disable(pVCpu); 10347 10411 HM_DISABLE_PREEMPT_IF_NEEDED(); 10348 10412 10413 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */ 10414 if (pVmxTransient->fWasGuestFPUStateActive) 10415 { 10416 rc = VINF_EM_RAW_GUEST_TRAP; 10417 Assert(CPUMIsGuestFPUStateActive(pVCpu) || (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)); 10418 } 10419 else 10420 { 10349 10421 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 10350 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 10351 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 10352 #endif 10353 10354 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */ 10355 PVM pVM = pVCpu->CTX_SUFF(pVM); 10356 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx); 10357 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 10422 Assert(!pVmxTransient->fWasGuestFPUStateActive); 10423 #endif 10424 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */ 10425 rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx); 10426 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu))); 10427 } 10428 10429 HM_RESTORE_PREEMPT_IF_NEEDED(); 10430 VMMRZCallRing3Enable(pVCpu); 10358 10431 10359 10432 if (rc == VINF_SUCCESS) 10360 10433 { 10361 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 10362 HM_RESTORE_PREEMPT_IF_NEEDED(); 10363 10434 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 10364 10435 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 10365 return VINF_SUCCESS; 10366 } 10367 HM_RESTORE_PREEMPT_IF_NEEDED(); 10368 10369 /* Forward #NM to the guest. */ 10370 Assert(rc == VINF_EM_RAW_GUEST_TRAP); 10371 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient); 10372 AssertRCReturn(rc, rc); 10373 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo), 10374 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */); 10375 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM); 10376 return rc; 10436 } 10437 else 10438 { 10439 /* Forward #NM to the guest. */ 10440 Assert(rc == VINF_EM_RAW_GUEST_TRAP); 10441 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient); 10442 AssertRCReturn(rc, rc); 10443 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo), 10444 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */); 10445 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM); 10446 } 10447 10448 return VINF_SUCCESS; 10377 10449 } 10378 10450
Note:
See TracChangeset
for help on using the changeset viewer.