Changeset 72530 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jun 12, 2018 4:40:46 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123017
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72522 r72530 49 49 # define HMVMX_ALWAYS_TRAP_ALL_XCPTS 50 50 # define HMVMX_ALWAYS_TRAP_PF 51 # define HMVMX_ALWAYS_SWAP_FPU_STATE52 51 # define HMVMX_ALWAYS_FLUSH_TLB 53 52 # define HMVMX_ALWAYS_SWAP_EFER … … 148 147 #define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \ 149 148 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \ 150 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */| RT_BIT(X86_XCPT_DF) \149 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \ 151 150 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \ 152 151 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \ 153 /* RT_BIT(X86_XCPT_MF)always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \152 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \ 154 153 | RT_BIT(X86_XCPT_XF)) 155 154 … … 288 287 uint32_t fVmcsFieldsRead; 289 288 290 /** Whether the guest FPU was active at the time of VM-exit. */291 bool fWasGuestFPUStateActive;292 289 /** Whether the guest debug state was active at the time of VM-exit. */ 293 290 bool fWasGuestDebugStateActive; … … 303 300 bool fVectoringPF; 304 301 } VMXTRANSIENT; 305 AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));306 AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));307 AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));308 AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuest FPUStateActive, sizeof(uint64_t));302 AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t)); 303 AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t)); 304 AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t)); 305 AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestDebugStateActive, sizeof(uint64_t)); 309 306 AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t)); 310 307 /** Pointer to VMX transient state. */ … … 452 449 /** @} */ 453 450 454 static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);455 451 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 456 452 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); … … 3777 3773 static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3778 3774 { 3775 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 3776 3779 3777 /* 3780 3778 * Guest CR0. … … 3826 3824 */ 3827 3825 u32GuestCR0 |= X86_CR0_NE; 3828 bool fInterceptNM = false;3829 if (CPUMIsGuestFPUStateActive(pVCpu))3830 {3831 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */3832 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.3833 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */3834 }3835 else3836 {3837 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */3838 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */3839 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */3840 }3841 3826 3842 3827 /* Catch floating point exceptions if we need to report them to the guest in a different way. */ … … 3851 3836 Assert(pVM->hm.s.vmx.pRealModeTSS); 3852 3837 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK; 3853 fInterceptNM = true;3854 fInterceptMF = true;3855 3838 } 3856 3839 else … … 3858 3841 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */ 3859 3842 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK; 3843 if (fInterceptMF) 3844 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF); 3860 3845 } 3861 3846 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 3862 3863 if (fInterceptNM)3864 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);3865 else3866 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);3867 3868 if (fInterceptMF)3869 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);3870 else3871 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);3872 3847 3873 3848 /* Additional intercepts for debugging, define these yourself explicitly. */ … … 3932 3907 if (pVM->hm.s.fNestedPaging) 3933 3908 u32CR0Mask &= ~X86_CR0_WP; 3934 3935 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */3936 if (fInterceptNM)3937 {3938 u32CR0Mask |= X86_CR0_TS3939 | X86_CR0_MP;3940 }3941 3909 3942 3910 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */ … … 9085 9053 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 9086 9054 9087 #ifdef HMVMX_ALWAYS_SWAP_FPU_STATE9088 9055 if (!CPUMIsGuestFPUStateActive(pVCpu)) 9089 {9090 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);9091 if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED)9092 HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT);9093 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);9094 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);9095 }9096 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);9097 #endif9098 9099 if ( pVCpu->hm.s.fPreloadGuestFpu9100 && !CPUMIsGuestFPUStateActive(pVCpu))9101 9056 { 9102 9057 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x); … … 9153 9108 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu); 9154 9109 } 9155 pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);9156 9110 9157 9111 /* … … 11688 11642 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break; 11689 11643 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break; 11690 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;11691 11644 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break; 11692 11645 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break; … … 11694 11647 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pMixedCtx, pVmxTransient); break; 11695 11648 11649 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM); 11650 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break; 11696 11651 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); 11697 11652 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break; … … 13538 13493 return rc; 13539 13494 } 13540 13541 13542 /**13543 * VM-exit exception handler for \#NM (Device-not-available exception: floating13544 * point exception).13545 */13546 static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)13547 {13548 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();13549 13550 /* We require CR0 and EFER. EFER is always up-to-date. */13551 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);13552 AssertRCReturn(rc, rc);13553 13554 /* We're playing with the host CPU state here, have to disable preemption or longjmp. */13555 VMMRZCallRing3Disable(pVCpu);13556 HM_DISABLE_PREEMPT();13557 13558 /* If the guest FPU was active at the time of the #NM VM-exit, then it's a guest fault. */13559 if (pVmxTransient->fWasGuestFPUStateActive)13560 {13561 rc = VINF_EM_RAW_GUEST_TRAP;13562 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));13563 }13564 else13565 {13566 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS13567 Assert(!pVmxTransient->fWasGuestFPUStateActive || pVCpu->hm.s.fUsingDebugLoop);13568 #endif13569 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu);13570 Assert( rc == VINF_EM_RAW_GUEST_TRAP13571 || ((rc == VINF_SUCCESS || rc == VINF_CPUM_HOST_CR0_MODIFIED) && CPUMIsGuestFPUStateActive(pVCpu)));13572 if (rc == VINF_CPUM_HOST_CR0_MODIFIED)13573 HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT);13574 }13575 13576 HM_RESTORE_PREEMPT();13577 VMMRZCallRing3Enable(pVCpu);13578 13579 if (rc == VINF_SUCCESS || rc == VINF_CPUM_HOST_CR0_MODIFIED)13580 {13581 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */13582 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);13583 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);13584 pVCpu->hm.s.fPreloadGuestFpu = true;13585 }13586 else13587 {13588 /* Forward #NM to the guest. */13589 Assert(rc == VINF_EM_RAW_GUEST_TRAP);13590 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);13591 AssertRCReturn(rc, rc);13592 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),13593 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);13594 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);13595 }13596 13597 return VINF_SUCCESS;13598 }13599 13600 13495 13601 13496 /**
Note:
See TracChangeset
for help on using the changeset viewer.