Changeset 74785 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Oct 12, 2018 10:14:19 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r73606 r74785 1158 1158 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported); 1159 1159 1160 bool const fFlushPending = pVCpu->CTX_SUFF(pVM)->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);1160 bool const fFlushPending = pVCpu->CTX_SUFF(pVM)->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1161 1161 1162 1162 /* Skip it if a TLB flush is already pending. */ … … 2806 2806 { 2807 2807 if ( !pVmcbCtrl->IntCtrl.n.u1VIrqPending 2808 && VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))2808 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 2809 2809 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 2810 2810 } … … 2815 2815 if (pVmcbCtrl->IntShadow.n.u1IntShadow) 2816 2816 EMSetInhibitInterruptsPC(pVCpu, pVmcbGuest->u64RIP); 2817 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))2817 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2818 2818 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2819 2819 } … … 3022 3022 */ 3023 3023 if ( VMMRZCallRing3IsEnabled(pVCpu) 3024 && VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))3024 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 3025 3025 { 3026 3026 Assert(pCtx->cr3 == pVmcbGuest->u64CR3); … … 3669 3669 * delivery/window over a physical interrupt (from the outer guest) 3670 3670 * might be pending? */ 3671 bool const fEnableIntWindow = !VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);3671 bool const fEnableIntWindow = !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 3672 3672 if (!fEnableIntWindow) 3673 3673 { … … 3735 3735 bool const fVirtualGif = CPUMGetSvmNstGstVGif(pCtx); 3736 3736 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu); 3737 bool const fBlockNmi = VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);3737 bool const fBlockNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 3738 3738 3739 3739 Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntPending=%RTbool fNmiPending=%RTbool\n", 3740 3740 fVirtualGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC), 3741 VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));3741 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))); 3742 3742 3743 3743 /** @todo SMI. SMIs take priority over NMIs. */ … … 3748 3748 * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities". 3749 3749 */ 3750 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)3750 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI) 3751 3751 && !fBlockNmi) 3752 3752 { … … 3860 3860 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu); 3861 3861 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 3862 bool const fBlockNmi = VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);3862 bool const fBlockNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 3863 3863 3864 3864 Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool fIntPending=%RTbool NMI pending=%RTbool\n", 3865 3865 fGif, fBlockNmi, fBlockInt, fIntShadow, 3866 3866 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC), 3867 VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));3867 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))); 3868 3868 3869 3869 /** @todo SMI. SMIs take priority over NMIs. */ … … 3874 3874 * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities". 3875 3875 */ 3876 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)3876 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI) 3877 3877 && !fBlockNmi) 3878 3878 { … … 4002 4002 if ( Event.n.u3Type == SVM_EVENT_NMI 4003 4003 && Event.n.u8Vector == X86_XCPT_NMI 4004 && !VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))4004 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 4005 4005 { 4006 4006 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); … … 4027 4027 * but we still need to intercept IRET in order to eventually clear NMI inhibition. 4028 4028 */ 4029 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))4029 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 4030 4030 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET); 4031 4031 … … 4209 4209 { 4210 4210 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 4211 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));4211 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 4212 4212 4213 4213 /* Could happen as a result of longjump. */ 4214 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))4214 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 4215 4215 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); 4216 4216 … … 4229 4229 { 4230 4230 int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, 4231 VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));4231 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 4232 4232 if (rc != VINF_SUCCESS) 4233 4233 { … … 4249 4249 4250 4250 /* Pending VM request packets, such as hardware interrupts. */ 4251 if ( VM_FF_IS_ PENDING(pVM, VM_FF_REQUEST)4252 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_REQUEST))4251 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST) 4252 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 4253 4253 { 4254 4254 Log4Func(("Pending VM request forcing us back to ring-3\n")); … … 6043 6043 6044 6044 /* Update interrupt shadow. */ 6045 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)6045 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 6046 6046 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 6047 6047 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); … … 7456 7456 7457 7457 /* Clear NMI blocking. */ 7458 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))7458 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7459 7459 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 7460 7460 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r74604 r74785 1848 1848 LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt)); 1849 1849 1850 bool fFlushPending = VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);1850 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1851 1851 if (!fFlushPending) 1852 1852 { … … 3452 3452 */ 3453 3453 uint32_t fIntrState = 0; 3454 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))3454 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3455 3455 { 3456 3456 /* If inhibition is active, RIP & RFLAGS should've been accessed … … 3468 3468 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS; 3469 3469 } 3470 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))3470 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3471 3471 { 3472 3472 /* … … 3486 3486 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}. 3487 3487 */ 3488 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)3488 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) 3489 3489 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 3490 3490 { … … 6210 6210 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}. 6211 6211 */ 6212 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)6212 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) 6213 6213 && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI 6214 6214 && ( enmRaise == IEMXCPTRAISE_PREV_EVENT … … 6312 6312 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception". 6313 6313 */ 6314 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))6314 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6315 6315 { 6316 6316 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n", … … 6496 6496 if (!u32Val) 6497 6497 { 6498 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))6498 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6499 6499 { 6500 6500 rc = hmR0VmxImportGuestRip(pVCpu); … … 6503 6503 } 6504 6504 6505 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))6505 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6506 6506 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6507 6507 } … … 6516 6516 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 6517 6517 } 6518 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))6518 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6519 6519 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6520 6520 6521 6521 if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI) 6522 6522 { 6523 if (!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))6523 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6524 6524 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6525 6525 } 6526 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))6526 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6527 6527 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6528 6528 } … … 6841 6841 if (VMMRZCallRing3IsEnabled(pVCpu)) 6842 6842 { 6843 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))6843 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 6844 6844 { 6845 6845 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3)); … … 6847 6847 } 6848 6848 6849 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))6849 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) 6850 6850 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 6851 6851 6852 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));6853 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));6852 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 6853 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 6854 6854 } 6855 6855 … … 6912 6912 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4))); 6913 6913 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, 6914 VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));6914 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 6915 6915 if (rcStrict2 != VINF_SUCCESS) 6916 6916 { … … 6933 6933 /* Pending VM request packets, such as hardware interrupts. */ 6934 6934 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 6935 || VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_REQUEST))6935 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST)) 6936 6936 { 6937 6937 Log4Func(("Pending VM request forcing us back to ring-3\n")); … … 7519 7519 */ 7520 7520 /** @todo SMI. SMIs take priority over NMIs. */ 7521 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))/* NMI. NMIs take priority over regular interrupts. */7521 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */ 7522 7522 { 7523 7523 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */ … … 8527 8527 * the below force flags to be set. 8528 8528 */ 8529 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))8529 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 8530 8530 { 8531 8531 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3)); … … 8533 8533 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3, 8534 8534 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS); 8535 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));8536 } 8537 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))8535 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 8536 } 8537 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) 8538 8538 { 8539 8539 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 8540 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));8540 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 8541 8541 } 8542 8542 … … 8849 8849 VMMRZCallRing3Enable(pVCpu); 8850 8850 8851 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));8852 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));8851 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 8852 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 8853 8853 8854 8854 #if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE) … … 10465 10465 10466 10466 /* Update interrupt inhibition. */ 10467 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)10467 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 10468 10468 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) 10469 10469 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); … … 11290 11290 } 11291 11291 11292 Assert(!VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));11292 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)); 11293 11293 11294 11294 /* … … 11302 11302 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI); 11303 11303 if ( fBlockSti 11304 && VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))11304 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 11305 11305 { 11306 11306 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r74517 r74785 1219 1219 pInput->Elements[iReg].Name = HvRegisterInterruptState; 1220 1220 pInput->Elements[iReg].Value.Reg64 = 0; 1221 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1221 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1222 1222 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip) 1223 1223 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1; 1224 if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))1224 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1225 1225 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1; 1226 1226 iReg++; … … 1229 1229 { 1230 1230 if ( pVCpu->nem.s.fLastInterruptShadow 1231 || ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1231 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1232 1232 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)) 1233 1233 { … … 1235 1235 pInput->Elements[iReg].Name = HvRegisterInterruptState; 1236 1236 pInput->Elements[iReg].Value.Reg64 = 0; 1237 if ( VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1237 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1238 1238 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip) 1239 1239 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1; -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r73203 r74785 904 904 case VINF_EM_RAW_TO_R3: 905 905 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total); 906 if (VM_FF_IS_ PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))906 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC)) 907 907 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt); 908 else if (VM_FF_IS_ PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))908 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES)) 909 909 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages); 910 else if (VM_FF_IS_ PENDING(pVM, VM_FF_PDM_QUEUES))910 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES)) 911 911 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues); 912 else if (VM_FF_IS_ PENDING(pVM, VM_FF_EMT_RENDEZVOUS))912 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS)) 913 913 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous); 914 else if (VM_FF_IS_ PENDING(pVM, VM_FF_PDM_DMA))914 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA)) 915 915 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA); 916 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TIMER))916 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)) 917 917 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer); 918 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))918 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)) 919 919 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect); 920 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_TO_R3))920 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3)) 921 921 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF); 922 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_IEM))922 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)) 923 923 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem); 924 else if (VMCPU_FF_IS_ PENDING(pVCpu, VMCPU_FF_IOM))924 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM)) 925 925 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom); 926 926 else
Note:
See TracChangeset
for help on using the changeset viewer.