Changeset 77610 in vbox
- Timestamp:
- Mar 8, 2019 10:31:35 AM (6 years ago)
- Location:
- trunk
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/iem.h
r77569 r77610 338 338 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInitIpi(PVMCPU pVCpu); 339 339 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu); 340 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitNmiWindow(PVMCPU pVCpu); 340 341 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitMtf(PVMCPU pVCpu); 341 342 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo); -
trunk/include/VBox/vmm/vm.h
r76585 r77610 369 369 * 370 370 * Available VMCPU bits: 371 * 14, 15, 3 4to 63371 * 14, 15, 36 to 63 372 372 * 373 373 * @todo If we run low on VMCPU, we may consider merging the SELM bits … … 556 556 #define VMCPU_FF_VMX_APIC_WRITE RT_BIT_64(VMCPU_FF_VMX_APIC_WRITE_BIT) 557 557 #define VMCPU_FF_VMX_APIC_WRITE_BIT 33 558 /** VMX interrupt-window event pending. */ 559 #define VMCPU_FF_VMX_INT_WINDOW RT_BIT_64(VMCPU_FF_VMX_INT_WINDOW_BIT) 560 #define VMCPU_FF_VMX_INT_WINDOW_BIT 34 561 /** VMX NMI-window event pending. */ 562 #define VMCPU_FF_VMX_NMI_WINDOW RT_BIT_64(VMCPU_FF_VMX_NMI_WINDOW_BIT) 563 #define VMCPU_FF_VMX_NMI_WINDOW_BIT 35 558 564 559 565 … … 580 586 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF \ 581 587 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \ 582 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF \ 588 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \ 589 | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW \ 583 590 | VM_WHEN_RAW_MODE( VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \ 584 591 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0 ) ) … … 597 604 #define VMCPU_FF_HIGH_PRIORITY_POST_MASK ( VMCPU_FF_PDM_CRITSECT | VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_PENDING_ACTION, 0) \ 598 605 | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_HM_UPDATE_PAE_PDPES \ 599 | VMCPU_FF_ VMX_APIC_WRITE | VMCPU_FF_IEM | VMCPU_FF_IOM )606 | VMCPU_FF_IEM | VMCPU_FF_IOM ) 600 607 601 608 /** Normal priority VM post-execution actions. */ -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r77576 r77610 987 987 IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu); 988 988 IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu); 989 IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmiWindow(PVMCPU pVCpu); 989 990 IEM_STATIC VBOXSTRICTRC iemVmxVmexitMtf(PVMCPU pVCpu); 990 991 IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess); … … 5537 5538 #endif 5538 5539 5540 /* 5541 * Evaluate whether NMI blocking should be in effect. 5542 * Normally, NMI blocking is in effect whenever we inject an NMI. 5543 */ 5544 bool fBlockNmi; 5545 if ( u8Vector == X86_XCPT_NMI 5546 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)) 5547 fBlockNmi = true; 5548 5539 5549 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 5540 5550 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu)) … … 5543 5553 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE) 5544 5554 return rcStrict0; 5555 5556 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */ 5557 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking) 5558 { 5559 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI)); 5560 fBlockNmi = false; 5561 } 5545 5562 } 5546 5563 #endif … … 5569 5586 } 5570 5587 #endif 5588 5589 /* 5590 * Set NMI blocking if necessary. 5591 */ 5592 if ( fBlockNmi 5593 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 5594 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 5571 5595 5572 5596 /* … … 14038 14062 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF)); 14039 14063 } 14040 /* * Finally, check if the VMX preemption timer has expired. */14064 /* VMX preemption timer takes priority over NMI-window exits. */ 14041 14065 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER)) 14042 14066 { … … 14049 14073 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER)); 14050 14074 } 14075 } 14076 /* NMI-window VM-exit. */ 14077 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)) 14078 { 14079 rcStrict = iemVmxVmexitNmiWindow(pVCpu); 14080 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)); 14051 14081 } 14052 14082 } … … 14741 14771 fFlags |= IEM_XCPT_FLAGS_ERR; 14742 14772 break; 14743 14744 case X86_XCPT_NMI:14745 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);14746 break;14747 14773 } 14748 14774 break; … … 15895 15921 15896 15922 /** 15923 * Interface for HM and EM to emulate VM-exits for NMI-windows. 15924 * 15925 * @returns Strict VBox status code. 15926 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15927 * @thread EMT(pVCpu) 15928 */ 15929 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitNmiWindow(PVMCPU pVCpu) 15930 { 15931 VBOXSTRICTRC rcStrict = iemVmxVmexitNmiWindow(pVCpu); 15932 if (pVCpu->iem.s.cActiveMappings) 15933 iemMemRollback(pVCpu); 15934 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 15935 } 15936 15937 15938 /** 15897 15939 * Interface for HM and EM to emulate VM-exits Monitor-Trap Flag (MTF). 15898 15940 * … … 15949 15991 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo); 15950 15992 } 15951 if (pVCpu->iem.s.cActiveMappings) 15952 iemMemRollback(pVCpu); 15953 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 15993 Assert(!pVCpu->iem.s.cActiveMappings); 15994 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 15954 15995 } 15955 15996 … … 15989 16030 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2); 15990 16031 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo); 15991 if (pVCpu->iem.s.cActiveMappings) 15992 iemMemRollback(pVCpu); 15993 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 16032 Assert(!pVCpu->iem.s.cActiveMappings); 16033 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 15994 16034 } 15995 16035 … … 16015 16055 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr; 16016 16056 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo); 16017 if (pVCpu->iem.s.cActiveMappings) 16018 iemMemRollback(pVCpu); 16019 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 16057 Assert(!pVCpu->iem.s.cActiveMappings); 16058 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16020 16059 } 16021 16060 … … 16041 16080 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr; 16042 16081 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo); 16043 if (pVCpu->iem.s.cActiveMappings) 16044 iemMemRollback(pVCpu); 16045 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 16082 Assert(!pVCpu->iem.s.cActiveMappings); 16083 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16046 16084 } 16047 16085 … … 16067 16105 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr; 16068 16106 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo); 16069 if (pVCpu->iem.s.cActiveMappings) 16070 iemMemRollback(pVCpu); 16071 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 16107 Assert(!pVCpu->iem.s.cActiveMappings); 16108 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16072 16109 } 16073 16110 … … 16090 16127 iemInitExec(pVCpu, false /*fBypassHandlers*/); 16091 16128 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId); 16092 if (pVCpu->iem.s.cActiveMappings) 16093 iemMemRollback(pVCpu); 16094 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 16129 Assert(!pVCpu->iem.s.cActiveMappings); 16130 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16095 16131 } 16096 16132 … … 16116 16152 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr; 16117 16153 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo); 16118 if (pVCpu->iem.s.cActiveMappings) 16119 iemMemRollback(pVCpu); 16120 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 16154 Assert(!pVCpu->iem.s.cActiveMappings); 16155 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16121 16156 } 16122 16157 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r77094 r77610 3843 3843 IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize) 3844 3844 { 3845 bool constfBlockingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);3845 bool fBlockingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 3846 3846 3847 3847 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 3848 /*3849 * Record whether NMIs (or virtual-NMIs) were unblocked by execution of this3850 * IRET instruction. We need to provide this information as part of some VM-exits.3851 *3852 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".3853 */3854 3848 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu)) 3855 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = fBlockingNmi; 3849 { 3850 /* 3851 * Record whether NMI (or virtual-NMI) blocking is in effect during the execution 3852 * of this IRET instruction. We need to provide this information as part of some 3853 * VM-exits. 3854 * 3855 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events". 3856 */ 3857 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_VIRT_NMI)) 3858 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking; 3859 else 3860 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = fBlockingNmi; 3861 3862 /* 3863 * If "NMI exiting" is set, IRET does not affect blocking of NMIs. 3864 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation". 3865 */ 3866 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_NMI_EXIT)) 3867 { 3868 fBlockingNmi = false; 3869 3870 /* Signal a pending NMI-window VM-exit before executing the next instruction. */ 3871 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)) 3872 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW); 3873 } 3874 3875 /* Clear virtual-NMI blocking, if any, before causing any further exceptions. */ 3876 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false; 3877 } 3856 3878 #endif 3857 3879 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r77569 r77610 1806 1806 /** @todo NSTVMX: Does triple-fault VM-exit reflect a shutdown activity state or 1807 1807 * not? */ 1808 EMSTATE enmActivityState = EMGetState(pVCpu);1808 EMSTATE const enmActivityState = EMGetState(pVCpu); 1809 1809 switch (enmActivityState) 1810 1810 { … … 1813 1813 } 1814 1814 1815 /* Interruptibility-state. */ 1815 /* 1816 * Interruptibility-state. 1817 */ 1818 /* NMI. */ 1816 1819 pVmcs->u32GuestIntrState = 0; 1817 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1818 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI; 1819 1820 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 1821 { 1822 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking) 1823 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI; 1824 } 1825 else 1826 { 1827 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1828 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI; 1829 } 1830 1831 /* Blocking-by-STI. */ 1820 1832 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1821 1833 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu)) … … 1824 1836 * currently. */ 1825 1837 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI; 1826 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);1827 1838 } 1828 1839 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */ … … 2864 2875 } 2865 2876 2877 /* 2878 * Clear any pending VMX nested-guest force-flags. 2879 * These force-flags have no effect on guest execution and will 2880 * be re-evaluated and setup on the next nested-guest VM-entry. 2881 */ 2882 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER 2883 | VMCPU_FF_VMX_MTF 2884 | VMCPU_FF_VMX_APIC_WRITE 2885 | VMCPU_FF_VMX_INT_WINDOW 2886 | VMCPU_FF_VMX_NMI_WINDOW); 2887 2866 2888 /* Restore the host (outer guest) state. */ 2867 2889 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason); … … 3082 3104 * The MTF VM-exit can occur even when the MTF VM-execution control is 3083 3105 * not set (e.g. when VM-entry injects an MTF pending event), so do not 3084 * check for it here. 3085 */ 3086 3087 /* Clear the force-flag indicating that monitor-trap flag is no longer active. */ 3088 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VMX_MTF); 3089 3090 /* Cause the MTF VM-exit. The VM-exit qualification MBZ. */ 3106 * check for the intercept here. 3107 */ 3091 3108 return iemVmxVmexit(pVCpu, VMX_EXIT_MTF); 3092 3109 } … … 3817 3834 pVmcs->u32PreemptTimer = 0; 3818 3835 3819 /* Clear the force-flag indicating the VMX-preemption timer no longer active. */3820 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);3821 3822 3836 /* Cause the VMX-preemption timer VM-exit. The VM-exit qualification MBZ. */ 3823 3837 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER); … … 3934 3948 { 3935 3949 return iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW); 3950 } 3951 3952 3953 /** 3954 * VMX VM-exit handler for NMI-window VM-exits. 3955 * 3956 * @returns VBox strict status code. 3957 * @param pVCpu The cross context virtual CPU structure. 3958 */ 3959 IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmiWindow(PVMCPU pVCpu) 3960 { 3961 return iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW); 3936 3962 } 3937 3963 … … 4018 4044 iemVmxVmcsSetIdtVectoringInfo(pVCpu, uIdtVectoringInfo); 4019 4045 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, uErrCode); 4046 4047 /* 4048 * If the event is a virtual-NMI (which is an NMI being inject during VM-entry) 4049 * virtual-NMI blocking must be set in effect rather than physical NMI blocking. 4050 * 4051 * See Intel spec. 24.6.1 "Pin-Based VM-Execution Controls". 4052 */ 4053 if ( uVector == X86_XCPT_NMI 4054 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT) 4055 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 4056 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = true; 4057 else 4058 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking); 4020 4059 4021 4060 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = true; … … 7075 7114 7076 7115 /* NMI blocking. */ 7077 if ( (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI) 7078 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7079 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 7116 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI) 7117 { 7118 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 7119 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = true; 7120 else 7121 { 7122 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false; 7123 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7124 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 7125 } 7126 } 7127 else 7128 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false; 7080 7129 7081 7130 /* SMI blocking is irrelevant. We don't support SMIs yet. */ -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r77575 r77610 1697 1697 } 1698 1698 1699 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX1700 /*1701 * VMX Nested-guest APIC-write pending (can cause VM-exits).1702 * Takes priority over even SMI and INIT signals.1703 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".1704 */1705 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))1706 {1707 rc = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));1708 Assert(rc != VINF_VMX_INTERCEPT_NOT_ACTIVE);1709 }1710 #endif1711 1712 1699 #ifdef VBOX_WITH_RAW_MODE 1713 1700 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION)) … … 2152 2139 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 2153 2140 /* 2141 * VMX Nested-guest APIC-write pending (can cause VM-exits). 2142 * Takes priority over even SMI and INIT signals. 2143 * See Intel spec. 29.4.3.2 "APIC-Write Emulation". 2144 */ 2145 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE)) 2146 { 2147 rc = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu)); 2148 Assert(rc != VINF_VMX_INTERCEPT_NOT_ACTIVE); 2149 } 2150 2151 /* 2154 2152 * VMX Nested-guest monitor-trap flag (MTF) VM-exit. 2155 2153 * Takes priority over "Traps on the previous instruction". … … 2165 2163 /* 2166 2164 * VMX Nested-guest preemption timer VM-exit. 2167 * Takes priority over non-maskable interrupts (NMIs).2165 * Takes priority over NMI-window VM-exits. 2168 2166 */ 2169 2167 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER)) … … 2172 2170 if (rc2 == VINF_VMX_INTERCEPT_NOT_ACTIVE) 2173 2171 rc2 = VINF_SUCCESS; 2172 UPDATE_RC(); 2173 } 2174 2175 /* 2176 * VMX NMI-window VM-exit. 2177 * Takes priority over non-maskable interrupts (NMIs). 2178 */ 2179 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)) 2180 { 2181 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitNmiWindow(pVCpu)); 2182 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE); 2174 2183 UPDATE_RC(); 2175 2184 } … … 2397 2406 /* check that we got them all */ 2398 2407 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)); 2399 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VM _WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));2408 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0))); 2400 2409 } 2401 2410
Note:
See TracChangeset
for help on using the changeset viewer.