Changeset 46420 in vbox
- Timestamp:
- Jun 6, 2013 4:27:25 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 86253
- Location:
- trunk
- Files:
-
- 44 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/vm.h
r46267 r46420 531 531 */ 532 532 #define VM_FF_IS_SET(pVM, fFlag) (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag)) 533 /** @deprecated */ 534 #define VM_FF_ISSET(pVM, fFlag) VM_FF_IS_SET(pVM, fFlag) 535 536 /** @def VMCPU_FF_ISSET 533 534 /** @def VMCPU_FF_IS_SET 537 535 * Checks if a force action flag is set for the given VCPU. 538 536 * … … 541 539 */ 542 540 #define VMCPU_FF_IS_SET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag)) 543 /** @deprecated */544 #define VMCPU_FF_ISSET(pVCpu, fFlag) VMCPU_FF_IS_SET(pVCpu, fFlag)545 541 546 542 /** @def VM_FF_ISPENDING … … 551 547 */ 552 548 #define VM_FF_IS_PENDING(pVM, fFlags) ((pVM)->fGlobalForcedActions & (fFlags)) 553 /** @deprecated */554 #define VM_FF_ISPENDING(pVM, fFlags) VM_FF_IS_PENDING(pVM, fFlags)555 549 556 550 /** @def VM_FF_TESTANDCLEAR … … 563 557 */ 564 558 #define VM_FF_TEST_AND_CLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT)) 565 /** @deprecated */566 #define VM_FF_TESTANDCLEAR(pVM, iBit) (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))567 559 568 560 /** @def VMCPU_FF_TESTANDCLEAR … … 575 567 */ 576 568 #define VMCPU_FF_TEST_AND_CLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT)) 577 /** @deprecated */578 #define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit) (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))579 569 580 570 /** @def VMCPU_FF_ISPENDING … … 585 575 */ 586 576 #define VMCPU_FF_IS_PENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags)) 587 /** @deprecated */588 #define VMCPU_FF_ISPENDING(pVCpu, fFlags) VMCPU_FF_IS_PENDING(pVCpu, fFlags)589 577 590 578 /** @def VM_FF_ISPENDING -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r45907 r46420 233 233 { 234 234 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0); 235 return !!VMCPU_FF_IS PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));235 return !!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)); 236 236 } 237 237 … … 2771 2771 } 2772 2772 AssertRCReturn(rc, rc); 2773 } 2773 } 2774 2774 #endif 2775 2775 else -
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r46363 r46420 64 64 { 65 65 /* Nothing to do if a TLB flush is already pending */ 66 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_TLB_FLUSH))66 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)) 67 67 return; 68 68 #if 1 … … 222 222 /* Nothing to do if a TLB flush is already pending; the VCPU should 223 223 have already been poked if it were active. */ 224 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_TLB_FLUSH))224 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)) 225 225 continue; 226 226 … … 259 259 /* Nothing to do if a TLB flush is already pending; the VCPU should 260 260 have already been poked if it were active. */ 261 if (!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_TLB_FLUSH))261 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)) 262 262 { 263 263 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r46389 r46420 8329 8329 if ( fExecuteInhibit 8330 8330 && rcStrict == VINF_SUCCESS 8331 && VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)8331 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 8332 8332 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip ) 8333 8333 { -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r45311 r46420 2573 2573 2574 2574 #ifdef VBOX_STRICT 2575 if (!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))2575 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)) 2576 2576 { 2577 2577 uint32_t cb = pRange->cb; -
trunk/src/VBox/VMM/VMMAll/PDMAll.cpp
r45965 r46420 52 52 * The local APIC has a higher priority than the PIC. 53 53 */ 54 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))54 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)) 55 55 { 56 56 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); … … 72 72 * Check the PIC. 73 73 */ 74 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))74 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)) 75 75 { 76 76 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); -
trunk/src/VBox/VMM/VMMAll/PDMAllQueue.cpp
r44528 r46420 100 100 { 101 101 PVM pVM = pQueue->CTX_SUFF(pVM); 102 Log2(("PDMQueueInsert: VM_FF_PDM_QUEUES %d -> 1\n", VM_FF_IS SET(pVM, VM_FF_PDM_QUEUES)));102 Log2(("PDMQueueInsert: VM_FF_PDM_QUEUES %d -> 1\n", VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))); 103 103 VM_FF_SET(pVM, VM_FF_PDM_QUEUES); 104 104 ASMAtomicBitSet(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_PENDING_BIT); -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r46326 r46420 1984 1984 { 1985 1985 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc)); 1986 Assert(VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));1986 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3)); 1987 1987 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3; 1988 1988 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3; … … 2134 2134 fGlobal = true; 2135 2135 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal, 2136 VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));2136 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))); 2137 2137 2138 2138 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r45836 r46420 974 974 return rc; 975 975 } 976 if (RT_UNLIKELY(VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY)))976 if (RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))) 977 977 return VINF_EM_NO_MEMORY; 978 978 } … … 1305 1305 */ 1306 1306 # ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH 1307 if ( VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)1308 || ( VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)1307 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) 1308 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) 1309 1309 && fIsBigPage 1310 1310 && PdeSrc.b.u1Global … … 1312 1312 ) 1313 1313 # else 1314 if (VM_FF_IS PENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL) )1314 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL) ) 1315 1315 # endif 1316 1316 { … … 2021 2021 if ( cPages > 1 2022 2022 && !(uErr & X86_TRAP_PF_P) 2023 && !VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))2023 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 2024 2024 { 2025 2025 /* … … 2293 2293 if ( cPages > 1 2294 2294 && !(uErr & X86_TRAP_PF_P) 2295 && !VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))2295 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 2296 2296 { 2297 2297 /* … … 2321 2321 SHW_PTE_IS_TRACK_DIRTY(pPTDst->a[iPTDst]) ? " Track-Dirty" : "")); 2322 2322 2323 if (RT_UNLIKELY(VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY)))2323 if (RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))) 2324 2324 break; 2325 2325 } … … 2980 2980 unsigned iPTDst = 0; 2981 2981 while ( iPTDst < RT_ELEMENTS(pPTDst->a) 2982 && !VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))2982 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 2983 2983 { 2984 2984 if (pRam && GCPhys >= pRam->GCPhys) … … 3015 3015 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys); 3016 3016 AssertRCReturn(rc, rc); 3017 if (VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))3017 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 3018 3018 break; 3019 3019 } … … 3288 3288 SHW_PTE_IS_TRACK_DIRTY(pPTDst->a[iPTDst]) ? " Track-Dirty" : "")); 3289 3289 3290 if (RT_UNLIKELY(VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY)))3290 if (RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))) 3291 3291 break; 3292 3292 } … … 3671 3671 NOREF(cr0); NOREF(cr3); NOREF(cr4); NOREF(fGlobal); 3672 3672 3673 LogFlow(("SyncCR3 FF=%d fGlobal=%d\n", !!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), fGlobal));3673 LogFlow(("SyncCR3 FF=%d fGlobal=%d\n", !!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), fGlobal)); 3674 3674 3675 3675 #if PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT … … 4703 4703 */ 4704 4704 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 4705 Assert(VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));4705 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 4706 4706 # endif 4707 4707 rc = pgmMapActivateCR3(pVM, pNewShwPageCR3); -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r45808 r46420 243 243 } 244 244 else 245 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));245 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3))); 246 246 247 247 return rc; -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r45428 r46420 488 488 { 489 489 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n", 490 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS SET(pVM, VM_FF_PGM_NO_MEMORY) ));490 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )); 491 491 #ifdef IN_RING3 492 492 int rc = PGMR3PhysAllocateHandyPages(pVM); … … 504 504 return VERR_EM_NO_MEMORY; 505 505 } 506 Assert(VM_FF_IS SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));507 Assert(VM_FF_IS SET(pVM, VM_FF_PGM_NO_MEMORY));506 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES)); 507 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)); 508 508 #ifdef IN_RING3 509 509 # ifdef VBOX_WITH_REM -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r45991 r46420 424 424 #if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */ 425 425 if ( uShw.pPD->a[iShw].n.u1Present 426 && !VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))426 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)) 427 427 { 428 428 LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX32 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u)); … … 1071 1071 if (pPage->fDirty) 1072 1072 { 1073 Assert(VMCPU_FF_IS SET(pVCpu, VMCPU_FF_TLB_FLUSH));1073 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)); 1074 1074 pgmUnlock(pVM); 1075 1075 return VINF_SUCCESS; /* SMP guest case where we were blocking on the pgm lock while the same page was being marked dirty. */ … … 2495 2495 AssertFatalMsgRC(rc, ("PGMHandlerPhysicalRegisterEx %RGp failed with %Rrc\n", GCPhysPage, rc)); 2496 2496 PVMCPU pVCpu = VMMGetCpu(pVM); 2497 AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), ("fSyncFlags=%x syncff=%d\n", pVCpu->pgm.s.fSyncFlags, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)));2497 AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), ("fSyncFlags=%x syncff=%d\n", pVCpu->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))); 2498 2498 } 2499 2499 pPage->fMonitored = true; … … 2592 2592 AssertFatalRC(rc); 2593 2593 PVMCPU pVCpu = VMMGetCpu(pVM); 2594 AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3),2594 AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), 2595 2595 ("%#x %#x\n", pVCpu->pgm.s.fSyncFlags, pVM->fGlobalForcedActions)); 2596 2596 } -
trunk/src/VBox/VMM/VMMAll/REMAll.cpp
r44528 r46420 224 224 return; 225 225 } 226 AssertRelease(VM_FF_IS SET(pVM, VM_FF_REM_HANDLER_NOTIFY));226 AssertRelease(VM_FF_IS_SET(pVM, VM_FF_REM_HANDLER_NOTIFY)); 227 227 AssertRelease(pVM->rem.s.idxPendingList != UINT32_MAX); 228 228 -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r44528 r46420 219 219 { 220 220 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 221 if (!VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER))221 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 222 222 { 223 223 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__)); … … 743 743 * Return straight away if the timer FF is already set ... 744 744 */ 745 if (VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER))745 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 746 746 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet); 747 747 … … 762 762 if (i64Delta1 <= 0) 763 763 { 764 if (!VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER))764 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 765 765 { 766 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS PENDING(pVCpuDst, VMCPU_FF_TIMER)));766 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); 767 767 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 768 768 #if defined(IN_RING3) && defined(VBOX_WITH_REM) … … 808 808 809 809 if ( !pVM->tm.s.fRunningQueues 810 && !VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER))810 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 811 811 { 812 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS PENDING(pVCpuDst, VMCPU_FF_TIMER)));812 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); 813 813 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 814 814 #if defined(IN_RING3) && defined(VBOX_WITH_REM) … … 883 883 884 884 /* Repeat the initial checks before iterating. */ 885 if (VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER))885 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 886 886 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet); 887 887 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues)) … … 907 907 { 908 908 if ( !pVM->tm.s.fRunningQueues 909 && !VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER))909 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 910 910 { 911 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS PENDING(pVCpuDst, VMCPU_FF_TIMER)));911 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); 912 912 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 913 913 #if defined(IN_RING3) && defined(VBOX_WITH_REM) -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r44528 r46420 338 338 { 339 339 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 340 if ( !VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER)340 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER) 341 341 && !pVM->tm.s.fRunningQueues 342 342 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64 … … 349 349 { 350 350 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF); 351 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS PENDING(pVCpuDst, VMCPU_FF_TIMER)));351 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); 352 352 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 353 353 #ifdef IN_RING3 … … 506 506 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 507 507 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 508 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS PENDING(pVCpuDst, VMCPU_FF_TIMER)));508 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); 509 509 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 510 510 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); … … 594 594 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 595 595 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 596 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_IS PENDING(pVCpuDst, VMCPU_FF_TIMER)));596 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); 597 597 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 598 598 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); … … 649 649 { 650 650 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 651 if ( !VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER)651 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER) 652 652 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64) 653 653 { … … 819 819 { 820 820 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 821 if (!VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER))821 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 822 822 { 823 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS PENDING(pVCpuDst, VMCPU_FF_TIMER)));823 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_PENDING(pVCpuDst, VMCPU_FF_TIMER))); 824 824 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */ 825 825 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); -
trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
r45728 r46420 494 494 495 495 /* VMCPU_FF_INHIBIT_INTERRUPTS should be cleared upfront or don't call this function at all for dispatching hardware interrupts. */ 496 Assert(enmType != TRPM_HARDWARE_INT || !VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));496 Assert(enmType != TRPM_HARDWARE_INT || !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 497 497 498 498 /* … … 517 517 518 518 Assert(PATMAreInterruptsEnabledByCtxCore(pVM, pRegFrame)); 519 Assert(!VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));519 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); 520 520 521 521 if (GCPtrIDT && iGate * sizeof(VBOXIDTE) >= cbIDT) -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r46379 r46420 1567 1567 #ifdef VBOX_STRICT 1568 1568 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[RTMpCpuId()]; 1569 Assert(!VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));1569 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 1570 1570 Assert(pCpu->fConfigured); 1571 1571 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46394 r46420 538 538 539 539 /* Check for explicit TLB shootdowns. */ 540 if (VMCPU_FF_TEST ANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))540 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 541 541 { 542 542 pVCpu->hm.s.fForceTLBFlush = true; … … 611 611 * not be executed. See hmQueueInvlPage() where it is commented 612 612 * out. Support individual entry flushing someday. */ 613 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))613 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 614 614 { 615 615 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46419 r46420 1255 1255 * Check for explicit TLB shootdowns. 1256 1256 */ 1257 if (VMCPU_FF_TEST ANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))1257 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 1258 1258 { 1259 1259 pVCpu->hm.s.fForceTLBFlush = true; … … 1370 1370 1371 1371 /* Check for explicit TLB shootdown flushes. */ 1372 if (VMCPU_FF_TEST ANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))1372 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 1373 1373 { 1374 1374 pVCpu->hm.s.fForceTLBFlush = true; … … 1436 1436 1437 1437 /* Check for explicit TLB shootdown flushes. */ 1438 if (VMCPU_FF_TEST ANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))1438 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 1439 1439 { 1440 1440 /* -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r46314 r46420 560 560 { 561 561 Log(("INJ-EI: %x at %RGv\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip)); 562 Assert(!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));562 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 563 563 Assert(pCtx->eflags.u32 & X86_EFL_IF); 564 564 } … … 606 606 if (!TRPMHasTrap(pVCpu)) 607 607 { 608 if (VMCPU_FF_TEST ANDCLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))608 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 609 609 { 610 610 SVMEVENT Event; … … 625 625 * When external interrupts are pending, we should exit the VM when IF is set. 626 626 */ 627 if (VMCPU_FF_IS PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))627 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))) 628 628 { 629 629 if ( !(pCtx->eflags.u32 & X86_EFL_IF) 630 || VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))630 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 631 631 { 632 632 if (!pVmcb->ctrl.IntCtrl.n.u1VIrqValid) 633 633 { 634 if (!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))634 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 635 635 LogFlow(("Enable irq window exit!\n")); 636 636 else … … 661 661 { 662 662 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */ 663 Assert(!VMCPU_FF_IS PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));663 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))); 664 664 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 665 665 /* Just continue */ … … 679 679 680 680 if ( (pCtx->eflags.u32 & X86_EFL_IF) 681 && (!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))681 && (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 682 682 && TRPMHasTrap(pVCpu) 683 683 ) … … 885 885 { 886 886 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu); 887 Assert(pVmcb->guest.u64CR3 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));887 Assert(pVmcb->guest.u64CR3 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 888 888 } 889 889 } … … 1105 1105 * Check for TLB shootdown flushes. 1106 1106 */ 1107 if (VMCPU_FF_TEST ANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))1107 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 1108 1108 pVCpu->hm.s.fForceTLBFlush = true; 1109 1109 … … 1175 1175 * not be executed. See hmQueueInvlPage() where it is commented 1176 1176 * out. Support individual entry flushing someday. */ 1177 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))1177 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 1178 1178 { 1179 1179 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ … … 1266 1266 * Check for IRQ inhibition due to instruction fusing (sti, mov ss). 1267 1267 */ 1268 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))1268 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1269 1269 { 1270 1270 Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu))); … … 1304 1304 * Check for pending actions that force us to go back to ring-3. 1305 1305 */ 1306 if ( VM_FF_IS PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)1307 || VMCPU_FF_IS PENDING(pVCpu,1306 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA) 1307 || VMCPU_FF_IS_PENDING(pVCpu, 1308 1308 VMCPU_FF_HM_TO_R3_MASK 1309 1309 | VMCPU_FF_PGM_SYNC_CR3 … … 1312 1312 { 1313 1313 /* Check if a sync operation is pending. */ 1314 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))1315 { 1316 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));1314 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 1315 { 1316 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 1317 1317 AssertRC(VBOXSTRICTRC_VAL(rc)); 1318 1318 if (rc != VINF_SUCCESS) … … 1328 1328 #endif 1329 1329 { 1330 if ( VM_FF_IS PENDING(pVM, VM_FF_HM_TO_R3_MASK)1331 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))1330 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK) 1331 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 1332 1332 { 1333 1333 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 1334 rc = RT_UNLIKELY(VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;1334 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 1335 1335 goto end; 1336 1336 } … … 1338 1338 1339 1339 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */ 1340 if ( VM_FF_IS PENDING(pVM, VM_FF_REQUEST)1341 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_REQUEST))1340 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 1341 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 1342 1342 { 1343 1343 rc = VINF_EM_PENDING_REQUEST; … … 1346 1346 1347 1347 /* Check if a pgm pool flush is in progress. */ 1348 if (VM_FF_IS PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))1348 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 1349 1349 { 1350 1350 rc = VINF_PGM_POOL_FLUSH_PENDING; … … 1353 1353 1354 1354 /* Check if DMA work is pending (2nd+ run). */ 1355 if (VM_FF_IS PENDING(pVM, VM_FF_PDM_DMA) && cResume > 1)1355 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA) && cResume > 1) 1356 1356 { 1357 1357 rc = VINF_EM_RAW_TO_R3; … … 1455 1455 else if (pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes) 1456 1456 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes)); 1457 else if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_TLB_FLUSH))1457 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)) 1458 1458 LogFlow(("Manual TLB flush\n")); 1459 1459 #endif … … 3051 3051 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) 3052 3052 { 3053 bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB | VMCPU_FF_IS SET(pVCpu, VMCPU_FF_TLB_FLUSH);3053 bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB | VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 3054 3054 3055 3055 /* Skip it if a TLB flush is already pending. */ -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r46218 r46420 978 978 LogFlow(("INJ-EI: %x at %RGv\n", iGate, (RTGCPTR)pCtx->rip)); 979 979 Assert( VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT 980 || !VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));980 || !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 981 981 Assert( VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT 982 982 || pCtx->eflags.u32 & X86_EFL_IF); … … 1108 1108 if (!TRPMHasTrap(pVCpu)) 1109 1109 { 1110 if (VMCPU_FF_TEST ANDCLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))1110 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 1111 1111 { 1112 1112 RTGCUINTPTR intInfo; … … 1129 1129 * When external interrupts are pending, we should exit the VM when IF is set. 1130 1130 */ 1131 if (VMCPU_FF_IS PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))1131 if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))) 1132 1132 { 1133 1133 if (!(pCtx->eflags.u32 & X86_EFL_IF)) … … 1142 1142 /* else nothing to do but wait */ 1143 1143 } 1144 else if (!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))1144 else if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1145 1145 { 1146 1146 uint8_t u8Interrupt; … … 1157 1157 { 1158 1158 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */ 1159 Assert(!VMCPU_FF_IS PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));1159 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))); 1160 1160 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 1161 1161 /* Just continue */ … … 1177 1177 1178 1178 if ( (pCtx->eflags.u32 & X86_EFL_IF) 1179 && (!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))1179 && (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1180 1180 && TRPMHasTrap(pVCpu) 1181 1181 ) … … 2251 2251 { 2252 2252 val = PGMGetHyperCR3(pVCpu); 2253 Assert(val || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));2253 Assert(val || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 2254 2254 } 2255 2255 … … 2642 2642 * Check for explicit TLB shootdowns. 2643 2643 */ 2644 if (VMCPU_FF_TEST ANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))2644 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2645 2645 pVCpu->hm.s.fForceTLBFlush = true; 2646 2646 … … 2684 2684 * not be executed. See hmQueueInvlPage() where it is commented 2685 2685 * out. Support individual entry flushing someday. */ 2686 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))2686 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 2687 2687 { 2688 2688 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); … … 2750 2750 * Check for explicit TLB shootdown flushes. 2751 2751 */ 2752 if (VMCPU_FF_TEST ANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))2752 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2753 2753 pVCpu->hm.s.fForceTLBFlush = true; 2754 2754 … … 2763 2763 * not be executed. See hmQueueInvlPage() where it is commented 2764 2764 * out. Support individual entry flushing someday. */ 2765 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))2765 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 2766 2766 { 2767 2767 /* … … 2818 2818 * Check for explicit TLB shootdown flushes. 2819 2819 */ 2820 if (VMCPU_FF_TEST ANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))2820 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2821 2821 pVCpu->hm.s.fForceTLBFlush = true; 2822 2822 … … 2849 2849 * not be executed. See hmQueueInvlPage() where it is commented 2850 2850 * out. Support individual entry flushing someday. */ 2851 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))2851 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 2852 2852 { 2853 2853 /* … … 3042 3042 * Check for IRQ inhibition due to instruction fusing (sti, mov ss). 3043 3043 */ 3044 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))3044 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3045 3045 { 3046 3046 Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu))); … … 3082 3082 * Check for pending actions that force us to go back to ring-3. 3083 3083 */ 3084 if ( VM_FF_IS PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)3085 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))3084 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA) 3085 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST)) 3086 3086 { 3087 3087 /* Check if a sync operation is pending. */ 3088 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))3089 { 3090 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));3088 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 3089 { 3090 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 3091 3091 if (rc != VINF_SUCCESS) 3092 3092 { … … 3102 3102 #endif 3103 3103 { 3104 if ( VM_FF_IS PENDING(pVM, VM_FF_HM_TO_R3_MASK)3105 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))3104 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK) 3105 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 3106 3106 { 3107 3107 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 3108 rc = RT_UNLIKELY(VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;3108 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 3109 3109 goto end; 3110 3110 } … … 3112 3112 3113 3113 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */ 3114 if ( VM_FF_IS PENDING(pVM, VM_FF_REQUEST)3115 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_REQUEST))3114 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 3115 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 3116 3116 { 3117 3117 rc = VINF_EM_PENDING_REQUEST; … … 3120 3120 3121 3121 /* Check if a pgm pool flush is in progress. */ 3122 if (VM_FF_IS PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))3122 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 3123 3123 { 3124 3124 rc = VINF_PGM_POOL_FLUSH_PENDING; … … 3127 3127 3128 3128 /* Check if DMA work is pending (2nd+ run). */ 3129 if (VM_FF_IS PENDING(pVM, VM_FF_PDM_DMA) && cResume > 1)3129 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA) && cResume > 1) 3130 3130 { 3131 3131 rc = VINF_EM_RAW_TO_R3; … … 3232 3232 pCpu->cTlbFlushes)); 3233 3233 } 3234 else if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_TLB_FLUSH))3234 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)) 3235 3235 LogFlow(("Manual TLB flush\n")); 3236 3236 } … … 3846 3846 pCtx->eflags.Bits.u1IF = 1; 3847 3847 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + pDis->cbInstr); 3848 Assert(VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));3848 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 3849 3849 rc2 = VMXWriteVmcs(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, 3850 3850 VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); … … 4257 4257 /* Clear VM-exit on IF=1 change. */ 4258 4258 LogFlow(("VMX_EXIT_INT_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip, 4259 VMCPU_FF_IS PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF));4259 VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF)); 4260 4260 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT; 4261 4261 rc2 = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); … … 5215 5215 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) 5216 5216 { 5217 bool fFlushPending = VMCPU_FF_IS SET(pVCpu, VMCPU_FF_TLB_FLUSH);5217 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 5218 5218 5219 5219 Log2(("VMXR0InvalidatePage %RGv\n", GCVirt)); -
trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp
r44902 r46420 429 429 430 430 LogFlow(("pdmR0PicHlp_SetInterruptFF: caller=%p/%d: VMCPU_FF_INTERRUPT_PIC %d -> 1\n", 431 pDevIns, pDevIns->iInstance, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));431 pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); 432 432 433 433 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); … … 454 454 455 455 LogFlow(("pdmR0PicHlp_ClearInterruptFF: caller=%p/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n", 456 pDevIns, pDevIns->iInstance, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));456 pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); 457 457 458 458 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); … … 508 508 509 509 LogFlow(("pdmR0ApicHlp_SetInterruptFF: CPU%d=caller=%p/%d: VM_FF_INTERRUPT %d -> 1 (CPU%d)\n", 510 VMMGetCpuId(pVM), pDevIns, pDevIns->iInstance, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_APIC), idCpu));510 VMMGetCpuId(pVM), pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC), idCpu)); 511 511 512 512 switch (enmType) … … 559 559 560 560 LogFlow(("pdmR0ApicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 0\n", 561 pDevIns, pDevIns->iInstance, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));561 pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); 562 562 563 563 /* Note: NMI/SMI can't be cleared. */ -
trunk/src/VBox/VMM/VMMR0/PGMR0SharedPage.cpp
r43045 r46420 108 108 rc = pgmPoolTrackUpdateGCPhys(pVM, PageDesc.GCPhys, pPage, true /* clear the entries */, &fFlush); 109 109 Assert( rc == VINF_SUCCESS 110 || ( VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)110 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3) 111 111 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))); 112 112 if (rc == VINF_SUCCESS) -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r45934 r46420 526 526 break; 527 527 case VINF_EM_RAW_TO_R3: 528 if (VM_FF_IS PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))528 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC)) 529 529 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt); 530 else if (VM_FF_IS PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))530 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES)) 531 531 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages); 532 else if (VM_FF_IS PENDING(pVM, VM_FF_PDM_QUEUES))532 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES)) 533 533 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues); 534 else if (VM_FF_IS PENDING(pVM, VM_FF_EMT_RENDEZVOUS))534 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS)) 535 535 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous); 536 else if (VM_FF_IS PENDING(pVM, VM_FF_PDM_DMA))536 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) 537 537 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA); 538 else if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TIMER))538 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)) 539 539 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer); 540 else if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))540 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT)) 541 541 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect); 542 else if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TO_R3))542 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3)) 543 543 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3); 544 544 else -
trunk/src/VBox/VMM/VMMR3/DBGF.cpp
r46167 r46420 241 241 first. The request processing is a bit crazy, but 242 242 unfortunately required by plugin unloading. */ 243 if ( VM_FF_IS PENDING(pVM, VM_FF_REQUEST)244 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_REQUEST))243 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 244 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 245 245 { 246 246 LogFlow(("DBGFR3PowerOff: Processes priority requests...\n")); … … 353 353 int rc = VINF_SUCCESS; 354 354 355 if (VM_FF_TEST ANDCLEAR(pVM, VM_FF_DBGF))355 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_DBGF)) 356 356 { 357 357 PVMCPU pVCpu = VMMGetCpu(pVM); … … 676 676 { 677 677 int rc; 678 if ( !VM_FF_IS PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_REQUEST)679 && !VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_REQUEST))678 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_REQUEST) 679 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 680 680 { 681 681 rc = RTSemPingWait(&pVM->dbgf.s.PingPong, cPollHack); … … 689 689 } 690 690 691 if (VM_FF_IS PENDING(pVM, VM_FF_EMT_RENDEZVOUS))691 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS)) 692 692 { 693 693 rc = VMMR3EmtRendezvousFF(pVM, pVCpu); 694 694 cPollHack = 1; 695 695 } 696 else if ( VM_FF_IS PENDING(pVM, VM_FF_REQUEST)697 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_REQUEST))696 else if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 697 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 698 698 { 699 699 LogFlow(("dbgfR3VMMWait: Processes requests...\n")); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r46155 r46420 1030 1030 1031 1031 #if defined(VBOX_STRICT) && defined(DEBUG_bird) 1032 AssertMsg( VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)1032 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) 1033 1033 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */ 1034 1034 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu))); … … 1078 1078 * important FFs while we were busy switching the state. So, check again. 1079 1079 */ 1080 if ( VM_FF_IS PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)1081 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))1080 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET) 1081 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST)) 1082 1082 { 1083 1083 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions)); … … 1113 1113 * else. Sync back the state and leave the lock to be on the safe side. 1114 1114 */ 1115 if ( VM_FF_IS PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)1116 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))1115 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) 1116 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK)) 1117 1117 { 1118 1118 #ifdef VBOX_WITH_REM … … 1151 1151 #endif 1152 1152 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER); 1153 if ( VM_FF_IS PENDING(pVM, VM_FF_ALL_REM_MASK)1154 || VMCPU_FF_IS PENDING(pVCpu,1153 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK) 1154 || VMCPU_FF_IS_PENDING(pVCpu, 1155 1155 VMCPU_FF_ALL_REM_MASK 1156 1156 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) ) … … 1448 1448 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc); 1449 1449 1450 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))1450 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT)) 1451 1451 PDMCritSectBothFF(pVCpu); 1452 1452 1453 1453 /* Update CR3 (Nested Paging case for HM). */ 1454 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))1454 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 1455 1455 { 1456 1456 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); 1457 1457 if (RT_FAILURE(rc2)) 1458 1458 return rc2; 1459 Assert(!VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));1459 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 1460 1460 } 1461 1461 1462 1462 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */ 1463 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))1463 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) 1464 1464 { 1465 1465 if (CPUMIsGuestInPAEMode(pVCpu)) … … 1471 1471 if (RT_FAILURE(rc2)) 1472 1472 return rc2; 1473 Assert(!VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));1473 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 1474 1474 } 1475 1475 else … … 1478 1478 1479 1479 #ifdef VBOX_WITH_RAW_MODE 1480 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))1480 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION)) 1481 1481 CSAMR3DoPendingAction(pVM, pVCpu); 1482 1482 #endif 1483 1483 1484 if (VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))1484 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 1485 1485 { 1486 1486 if ( rc > VINF_EM_NO_MEMORY … … 1532 1532 * Post execution chunk first. 1533 1533 */ 1534 if ( VM_FF_IS PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)1535 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )1534 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK) 1535 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) ) 1536 1536 { 1537 1537 /* 1538 1538 * EMT Rendezvous (must be serviced before termination). 1539 1539 */ 1540 if (VM_FF_IS PENDING(pVM, VM_FF_EMT_RENDEZVOUS))1540 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS)) 1541 1541 { 1542 1542 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu); … … 1558 1558 * State change request (cleared by vmR3SetStateLocked). 1559 1559 */ 1560 if (VM_FF_IS PENDING(pVM, VM_FF_CHECK_VM_STATE))1560 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE)) 1561 1561 { 1562 1562 VMSTATE enmState = VMR3GetState(pVM); … … 1582 1582 * Debugger Facility polling. 1583 1583 */ 1584 if (VM_FF_IS PENDING(pVM, VM_FF_DBGF))1584 if (VM_FF_IS_PENDING(pVM, VM_FF_DBGF)) 1585 1585 { 1586 1586 rc2 = DBGFR3VMMForcedAction(pVM); … … 1591 1591 * Postponed reset request. 1592 1592 */ 1593 if (VM_FF_TEST ANDCLEAR(pVM, VM_FF_RESET))1593 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET)) 1594 1594 { 1595 1595 rc2 = VMR3Reset(pVM->pUVM); … … 1601 1601 * CSAM page scanning. 1602 1602 */ 1603 if ( !VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY)1604 && VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))1603 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) 1604 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE)) 1605 1605 { 1606 1606 PCPUMCTX pCtx = pVCpu->em.s.pCtx; … … 1617 1617 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory. 1618 1618 */ 1619 if (VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))1619 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 1620 1620 { 1621 1621 rc2 = PGMR3PhysAllocateHandyPages(pVM); … … 1651 1651 * EMT Rendezvous (make sure they are handled before the requests). 1652 1652 */ 1653 if (VM_FF_IS PENDING(pVM, VM_FF_EMT_RENDEZVOUS))1653 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS)) 1654 1654 { 1655 1655 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu); … … 1719 1719 * (Executed in no particular order.) 1720 1720 */ 1721 if ( !VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY)1722 && VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))1721 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) 1722 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK)) 1723 1723 { 1724 1724 /* 1725 1725 * Requests from other threads. 1726 1726 */ 1727 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_REQUEST))1727 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 1728 1728 { 1729 1729 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/); … … 1756 1756 * (Executed in ascending priority order.) 1757 1757 */ 1758 if ( VM_FF_IS PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)1759 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))1758 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK) 1759 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK)) 1760 1760 { 1761 1761 /* 1762 1762 * Timers before interrupts. 1763 1763 */ 1764 if ( VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TIMER)1765 && !VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))1764 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER) 1765 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 1766 1766 TMR3TimerQueuesDo(pVM); 1767 1767 … … 1778 1778 * you might think. 1779 1779 */ 1780 if ( VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1781 && !VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))1780 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1781 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 1782 1782 { 1783 1783 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu)) … … 1794 1794 */ 1795 1795 bool fWakeupPending = false; 1796 if ( !VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY)1797 && !VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)1796 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) 1797 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1798 1798 && (!rc || rc >= VINF_EM_RESCHEDULE_HM) 1799 1799 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */ … … 1806 1806 { 1807 1807 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); 1808 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))1808 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 1809 1809 { 1810 1810 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */ … … 1850 1850 */ 1851 1851 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */ 1852 && VM_FF_IS PENDING(pVM, VM_FF_EMT_RENDEZVOUS))1852 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS)) 1853 1853 { 1854 1854 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu); … … 1870 1870 */ 1871 1871 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */ 1872 && VM_FF_IS PENDING(pVM, VM_FF_CHECK_VM_STATE))1872 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE)) 1873 1873 { 1874 1874 VMSTATE enmState = VMR3GetState(pVM); … … 1897 1897 * than us since we can terminate without allocating more memory. 1898 1898 */ 1899 if (VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))1899 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 1900 1900 { 1901 1901 rc2 = PGMR3PhysAllocateHandyPages(pVM); … … 1908 1908 * If the virtual sync clock is still stopped, make TM restart it. 1909 1909 */ 1910 if (VM_FF_IS PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))1910 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC)) 1911 1911 TMR3VirtualSyncFF(pVM, pVCpu); 1912 1912 … … 1915 1915 * Debug, pause the VM. 1916 1916 */ 1917 if (VM_FF_IS PENDING(pVM, VM_FF_DEBUG_SUSPEND))1917 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND)) 1918 1918 { 1919 1919 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND); … … 2040 2040 && rc != VINF_EM_TERMINATE 2041 2041 && rc != VINF_EM_OFF 2042 && ( VM_FF_IS PENDING(pVM, VM_FF_ALL_REM_MASK)2043 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))2042 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK) 2043 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))) 2044 2044 { 2045 2045 rc = emR3ForcedActions(pVM, pVCpu, rc); … … 2359 2359 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/); 2360 2360 if ( rc == VINF_SUCCESS 2361 && VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))2361 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 2362 2362 { 2363 2363 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n")); -
trunk/src/VBox/VMM/VMMR3/EMHM.cpp
r45728 r46420 91 91 * Check vital forced actions, but ignore pending interrupts and timers. 92 92 */ 93 if ( VM_FF_IS PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)94 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))93 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) 94 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 95 95 { 96 96 rc = emR3HmForcedActions(pVM, pVCpu, pCtx); … … 398 398 * Sync page directory. 399 399 */ 400 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))400 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 401 401 { 402 402 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); 403 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));403 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 404 404 if (RT_FAILURE(rc)) 405 405 return rc; 406 406 407 407 #ifdef VBOX_WITH_RAW_MODE 408 Assert(!VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));408 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); 409 409 #endif 410 410 … … 421 421 return rc; 422 422 } 423 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));423 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 424 424 if (RT_FAILURE(rc)) 425 425 return rc; … … 427 427 /** @todo maybe prefetch the supervisor stack page as well */ 428 428 #ifdef VBOX_WITH_RAW_MODE 429 Assert(!VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));429 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); 430 430 #endif 431 431 } … … 448 448 * this check. 449 449 */ 450 if (VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))450 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 451 451 return VINF_EM_NO_MEMORY; 452 452 … … 503 503 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); 504 504 #endif 505 if ( VM_FF_IS PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)506 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))505 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) 506 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 507 507 { 508 508 rc = emR3HmForcedActions(pVM, pVCpu, pCtx); … … 565 565 */ 566 566 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); 567 if ( VM_FF_IS PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)568 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))567 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) 568 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK)) 569 569 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc); 570 570 … … 585 585 TMTimerPollVoid(pVM, pVCpu); 586 586 #endif 587 if ( VM_FF_IS PENDING(pVM, VM_FF_ALL_MASK)588 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_ALL_MASK))587 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) 588 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK)) 589 589 { 590 590 rc = emR3ForcedActions(pVM, pVCpu, rc); -
trunk/src/VBox/VMM/VMMR3/EMRaw.cpp
r46165 r46420 168 168 * Check vital forced actions, but ignore pending interrupts and timers. 169 169 */ 170 if ( VM_FF_IS PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)171 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))170 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) 171 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 172 172 { 173 173 rc = emR3RawForcedActions(pVM, pVCpu, pCtx); … … 1224 1224 * Sync selector tables. 1225 1225 */ 1226 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))1226 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)) 1227 1227 { 1228 1228 VBOXSTRICTRC rcStrict = SELMR3UpdateFromCPUM(pVM, pVCpu); … … 1238 1238 * PGMSyncCR3+pgmR3PoolClearAll is pending. 1239 1239 */ 1240 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))1241 { 1242 if ( VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)1240 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT)) 1241 { 1242 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3) 1243 1243 && EMIsRawRing0Enabled(pVM) 1244 1244 && CSAMIsEnabled(pVM)) 1245 1245 { 1246 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));1246 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 1247 1247 if (RT_FAILURE(rc)) 1248 1248 return rc; … … 1257 1257 * Sync TSS. 1258 1258 */ 1259 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))1259 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS)) 1260 1260 { 1261 1261 int rc = SELMR3SyncTSS(pVM, pVCpu); … … 1267 1267 * Sync page directory. 1268 1268 */ 1269 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))1269 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 1270 1270 { 1271 1271 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); 1272 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));1272 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 1273 1273 if (RT_FAILURE(rc)) 1274 1274 return rc; 1275 1275 1276 Assert(!VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));1276 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); 1277 1277 1278 1278 /* Prefetch pages for EIP and ESP. */ … … 1288 1288 return rc; 1289 1289 } 1290 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));1290 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 1291 1291 if (RT_FAILURE(rc)) 1292 1292 return rc; 1293 1293 } 1294 1294 /** @todo maybe prefetch the supervisor stack page as well */ 1295 Assert(!VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));1295 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); 1296 1296 } 1297 1297 … … 1313 1313 * this check. 1314 1314 */ 1315 if (VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY))1315 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) 1316 1316 return VINF_EM_NO_MEMORY; 1317 1317 … … 1367 1367 || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip), 1368 1368 ("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip)); 1369 if ( !VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)1369 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) 1370 1370 && PGMMapHasConflicts(pVM)) 1371 1371 { … … 1379 1379 * Process high priority pre-execution raw-mode FFs. 1380 1380 */ 1381 if ( VM_FF_IS PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)1382 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))1381 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) 1382 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 1383 1383 { 1384 1384 rc = emR3RawForcedActions(pVM, pVCpu, pCtx); … … 1410 1410 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip); 1411 1411 STAM_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWEntry, b); 1412 if ( VM_FF_IS PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)1413 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))1412 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) 1413 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 1414 1414 { 1415 1415 rc = emR3RawForcedActions(pVM, pVCpu, pCtx); … … 1479 1479 rc = CPUMRawLeave(pVCpu, NULL, rc); 1480 1480 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); 1481 if ( VM_FF_IS PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)1482 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))1481 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) 1482 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK)) 1483 1483 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc); 1484 1484 … … 1487 1487 * Assert TSS consistency & rc vs patch code. 1488 1488 */ 1489 if ( !VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */1489 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */ 1490 1490 && EMIsRawRing0Enabled(pVM)) 1491 1491 SELMR3CheckTSS(pVM); … … 1511 1511 * Let's go paranoid! 1512 1512 */ 1513 if ( !VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)1513 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) 1514 1514 && PGMMapHasConflicts(pVM)) 1515 1515 { … … 1546 1546 #endif 1547 1547 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d); 1548 if ( VM_FF_IS PENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY)1549 || VMCPU_FF_IS PENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))1548 if ( VM_FF_IS_PENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY) 1549 || VMCPU_FF_IS_PENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 1550 1550 { 1551 1551 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) != (EMIsRawRing1Enabled(pVM) ? 2 : 1)); -
trunk/src/VBox/VMM/VMMR3/FTM.cpp
r46326 r46420 1338 1338 while ((rc = PDMCritSectTryEnter(&pVM->ftm.s.CritSect)) == VERR_SEM_BUSY) 1339 1339 { 1340 if (VM_FF_IS PENDING(pVM, VM_FF_EMT_RENDEZVOUS))1340 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS)) 1341 1341 { 1342 1342 rc = VMMR3EmtRendezvousFF(pVM, pVCpu); … … 1344 1344 } 1345 1345 1346 if (VM_FF_IS PENDING(pVM, VM_FF_REQUEST))1346 if (VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)) 1347 1347 { 1348 1348 rc = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, true /*fPriorityOnly*/); -
trunk/src/VBox/VMM/VMMR3/PDM.cpp
r45808 r46420 797 797 { 798 798 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 799 SSMR3PutU32(pSSM, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_APIC));800 SSMR3PutU32(pSSM, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_PIC));801 SSMR3PutU32(pSSM, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_NMI));802 SSMR3PutU32(pSSM, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_SMI));803 } 804 SSMR3PutU32(pSSM, VM_FF_IS SET(pVM, VM_FF_PDM_DMA));799 SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)); 800 SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)); 801 SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)); 802 SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI)); 803 } 804 SSMR3PutU32(pSSM, VM_FF_IS_SET(pVM, VM_FF_PDM_DMA)); 805 805 806 806 pdmR3SaveBoth(pVM, pSSM); … … 821 821 { 822 822 LogFlow(("pdmR3LoadPrep: %s%s\n", 823 VM_FF_IS SET(pVM, VM_FF_PDM_QUEUES) ? " VM_FF_PDM_QUEUES" : "",824 VM_FF_IS SET(pVM, VM_FF_PDM_DMA) ? " VM_FF_PDM_DMA" : ""));823 VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES) ? " VM_FF_PDM_QUEUES" : "", 824 VM_FF_IS_SET(pVM, VM_FF_PDM_DMA) ? " VM_FF_PDM_DMA" : "")); 825 825 #ifdef LOG_ENABLED 826 826 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) … … 828 828 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 829 829 LogFlow(("pdmR3LoadPrep: VCPU %u %s%s\n", idCpu, 830 VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_APIC) ? " VMCPU_FF_INTERRUPT_APIC" : "",831 VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_PIC) ? " VMCPU_FF_INTERRUPT_PIC" : ""));830 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC) ? " VMCPU_FF_INTERRUPT_APIC" : "", 831 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC) ? " VMCPU_FF_INTERRUPT_PIC" : "")); 832 832 } 833 833 #endif … … 838 838 * start a DMA transfer, or release a lock. (unlikely) 839 839 */ 840 if (VM_FF_IS SET(pVM, VM_FF_PDM_QUEUES))840 if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES)) 841 841 PDMR3QueueFlushAll(pVM); 842 842 … … 900 900 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 901 901 } 902 AssertRelease(!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_APIC));902 AssertRelease(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)); 903 903 if (fInterruptPending) 904 904 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC); … … 914 914 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 915 915 } 916 AssertRelease(!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_PIC));916 AssertRelease(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)); 917 917 if (fInterruptPending) 918 918 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); … … 930 930 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 931 931 } 932 AssertRelease(!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_NMI));932 AssertRelease(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)); 933 933 if (fInterruptPending) 934 934 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI); … … 944 944 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 945 945 } 946 AssertRelease(!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_SMI));946 AssertRelease(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI)); 947 947 if (fInterruptPending) 948 948 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI); … … 962 962 if (fDMAPending) 963 963 VM_FF_SET(pVM, VM_FF_PDM_DMA); 964 Log(("pdmR3LoadExec: VM_FF_PDM_DMA=%RTbool\n", VM_FF_IS SET(pVM, VM_FF_PDM_DMA)));964 Log(("pdmR3LoadExec: VM_FF_PDM_DMA=%RTbool\n", VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))); 965 965 } 966 966 … … 2327 2327 return; 2328 2328 2329 if (VM_FF_TEST ANDCLEAR(pVM, VM_FF_PDM_DMA))2329 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_PDM_DMA)) 2330 2330 { 2331 2331 if (pVM->pdm.s.pDmac) -
trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
r45808 r46420 2035 2035 VM_ASSERT_EMT(pVM); 2036 2036 LogFlow(("pdmR3DevHlp_DMASchedule: caller='%s'/%d: VM_FF_PDM_DMA %d -> 1\n", 2037 pDevIns->pReg->szName, pDevIns->iInstance, VM_FF_IS SET(pVM, VM_FF_PDM_DMA)));2037 pDevIns->pReg->szName, pDevIns->iInstance, VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))); 2038 2038 2039 2039 AssertMsg(pVM->pdm.s.pDmac, ("Configuration error: No DMAC controller available. This could be related to init order too!\n")); … … 3217 3217 VM_ASSERT_EMT(pVM); 3218 3218 LogFlow(("pdmR3DevHlp_VMReset: caller='%s'/%d: VM_FF_RESET %d -> 1\n", 3219 pDevIns->pReg->szName, pDevIns->iInstance, VM_FF_IS SET(pVM, VM_FF_RESET)));3219 pDevIns->pReg->szName, pDevIns->iInstance, VM_FF_IS_SET(pVM, VM_FF_RESET))); 3220 3220 3221 3221 /* -
trunk/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp
r45808 r46420 65 65 66 66 LogFlow(("pdmR3PicHlp_SetInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_PIC %d -> 1\n", 67 pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));67 pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); 68 68 69 69 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); … … 93 93 94 94 LogFlow(("pdmR3PicHlp_ClearInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n", 95 pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));95 pDevIns->pReg->szName, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); 96 96 97 97 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); … … 189 189 190 190 LogFlow(("pdmR3ApicHlp_SetInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_APIC(%d) %d -> 1\n", 191 pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));191 pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); 192 192 193 193 switch (enmType) … … 226 226 227 227 LogFlow(("pdmR3ApicHlp_ClearInterruptFF: caller='%s'/%d: VMCPU_FF_INTERRUPT_APIC(%d) %d -> 0\n", 228 pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));228 pDevIns->pReg->szName, pDevIns->iInstance, idCpu, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); 229 229 230 230 /* Note: NMI/SMI can't be cleared. */ -
trunk/src/VBox/VMM/VMMR3/PDMQueue.cpp
r44504 r46420 672 672 /* We're done if there were no inserts while we were busy. */ 673 673 if ( !ASMBitTest(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_PENDING_BIT) 674 && !VM_FF_IS PENDING(pVM, VM_FF_PDM_QUEUES))674 && !VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES)) 675 675 break; 676 676 VM_FF_CLEAR(pVM, VM_FF_PDM_QUEUES); -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r45798 r46420 3684 3684 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID; 3685 3685 int rc = PGMR3ChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu)); 3686 Assert(VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));3686 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 3687 3687 AssertRCReturn(rc, rc); 3688 3688 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS); -
trunk/src/VBox/VMM/VMMR3/PGMBth.h
r45808 r46420 170 170 { 171 171 Log(("Bth-Enter: PGM pool flushed -> signal sync cr3\n")); 172 Assert(VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));172 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 173 173 pgmUnlock(pVM); 174 174 return VINF_PGM_SYNC_CR3; -
trunk/src/VBox/VMM/VMMR3/SELM.cpp
r45709 r46420 1388 1388 */ 1389 1389 int rc; 1390 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT))1390 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)) 1391 1391 { 1392 1392 rc = selmR3UpdateShadowGdt(pVM, pVCpu); … … 1399 1399 * TSS sync 1400 1400 */ 1401 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))1401 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS)) 1402 1402 { 1403 1403 rc = SELMR3SyncTSS(pVM, pVCpu); … … 1410 1410 * LDT sync 1411 1411 */ 1412 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT))1412 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT)) 1413 1413 { 1414 1414 rc = selmR3UpdateShadowLdt(pVM, pVCpu); … … 1542 1542 1543 1543 STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a); 1544 Assert(VMCPU_FF_IS SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS));1544 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS)); 1545 1545 1546 1546 /* … … 1915 1915 PVMCPU pVCpu = VMMGetCpu(pVM); 1916 1916 1917 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))1917 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS)) 1918 1918 return true; 1919 1919 -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r45808 r46420 1892 1892 AssertCompile(TMCLOCK_MAX == 4); 1893 1893 #ifdef DEBUG_Sander /* very annoying, keep it private. */ 1894 if (VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER))1894 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)) 1895 1895 Log(("tmR3TimerCallback: timer event still pending!!\n")); 1896 1896 #endif 1897 if ( !VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER)1897 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER) 1898 1898 && ( pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule /** @todo FIXME - reconsider offSchedule as a reason for running the timer queues. */ 1899 1899 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].offSchedule … … 1902 1902 || tmR3AnyExpiredTimers(pVM) 1903 1903 ) 1904 && !VMCPU_FF_IS SET(pVCpuDst, VMCPU_FF_TIMER)1904 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER) 1905 1905 && !pVM->tm.s.fRunningQueues 1906 1906 ) -
trunk/src/VBox/VMM/VMMR3/TRPM.cpp
r45808 r46420 871 871 SSMR3PutBool(pSSM, HMIsEnabled(pVM)); 872 872 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies 1 VCPU */ 873 SSMR3PutUInt(pSSM, VM_WHEN_RAW_MODE(VMCPU_FF_IS SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT), 0));873 SSMR3PutUInt(pSSM, VM_WHEN_RAW_MODE(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT), 0)); 874 874 SSMR3PutMem(pSSM, &pTrpm->au32IdtPatched[0], sizeof(pTrpm->au32IdtPatched)); 875 875 SSMR3PutU32(pSSM, ~0); /* separator. */ … … 1494 1494 Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip)); 1495 1495 #endif 1496 Assert(!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));1496 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 1497 1497 1498 1498 /* Currently only useful for external hardware interrupts. */ … … 1543 1543 if (rc == VINF_SUCCESS /* Don't use RT_SUCCESS */) 1544 1544 { 1545 Assert(!VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));1545 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); 1546 1546 1547 1547 STAM_COUNTER_INC(&pVM->trpm.s.paStatForwardedIRQR3[u8Interrupt]); -
trunk/src/VBox/VMM/VMMR3/VMEmt.cpp
r45749 r46420 161 161 } 162 162 163 if (VM_FF_IS PENDING(pVM, VM_FF_EMT_RENDEZVOUS))163 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS)) 164 164 { 165 165 rc = VMMR3EmtRendezvousFF(pVM, &pVM->aCpus[idCpu]); … … 182 182 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); 183 183 } 184 else if (VM_FF_IS SET(pVM, VM_FF_DBGF))184 else if (VM_FF_IS_SET(pVM, VM_FF_DBGF)) 185 185 { 186 186 /* … … 190 190 Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState))); 191 191 } 192 else if (VM_FF_TEST ANDCLEAR(pVM, VM_FF_RESET))192 else if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET)) 193 193 { 194 194 /* … … 338 338 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers; 339 339 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers); 340 if ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)341 || VMCPU_FF_IS PENDING(pVCpu, fMask))340 if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 341 || VMCPU_FF_IS_PENDING(pVCpu, fMask)) 342 342 break; 343 343 uint64_t u64NanoTS; 344 344 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS); 345 if ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)346 || VMCPU_FF_IS PENDING(pVCpu, fMask))345 if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 346 || VMCPU_FF_IS_PENDING(pVCpu, fMask)) 347 347 break; 348 348 … … 537 537 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers; 538 538 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers); 539 if ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)540 || VMCPU_FF_IS PENDING(pVCpu, fMask))539 if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 540 || VMCPU_FF_IS_PENDING(pVCpu, fMask)) 541 541 break; 542 542 … … 546 546 uint64_t u64NanoTS; 547 547 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS); 548 if ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)549 || VMCPU_FF_IS PENDING(pVCpu, fMask))548 if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 549 || VMCPU_FF_IS_PENDING(pVCpu, fMask)) 550 550 break; 551 551 … … 686 686 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers; 687 687 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers); 688 if ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)689 || VMCPU_FF_IS PENDING(pVCpu, fMask))688 if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 689 || VMCPU_FF_IS_PENDING(pVCpu, fMask)) 690 690 break; 691 691 … … 696 696 uint64_t u64Delta; 697 697 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta); 698 if ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)699 || VMCPU_FF_IS PENDING(pVCpu, fMask))698 if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 699 || VMCPU_FF_IS_PENDING(pVCpu, fMask)) 700 700 break; 701 701 … … 706 706 { 707 707 VMMR3YieldStop(pVM); 708 if ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)709 || VMCPU_FF_IS PENDING(pVCpu, fMask))708 if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 709 || VMCPU_FF_IS_PENDING(pVCpu, fMask)) 710 710 break; 711 711 … … 774 774 * Check Relevant FFs. 775 775 */ 776 if ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)777 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))776 if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK) 777 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)) 778 778 break; 779 779 … … 858 858 859 859 if ( pUVCpu->pVM 860 && ( VM_FF_IS PENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)861 || VMCPU_FF_IS PENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)860 && ( VM_FF_IS_PENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK) 861 || VMCPU_FF_IS_PENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK) 862 862 ) 863 863 ) … … 920 920 * Check Relevant FFs. 921 921 */ 922 if ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)923 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))922 if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK) 923 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)) 924 924 break; 925 925 … … 1058 1058 ? VMCPU_FF_EXTERNAL_HALTED_MASK 1059 1059 : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC); 1060 if ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)1061 || VMCPU_FF_IS PENDING(pVCpu, fMask))1060 if ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 1061 || VMCPU_FF_IS_PENDING(pVCpu, fMask)) 1062 1062 { 1063 1063 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions)); … … 1139 1139 1140 1140 if ( pVM 1141 && ( VM_FF_IS PENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)1142 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)1141 && ( VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK) 1142 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK) 1143 1143 ) 1144 1144 ) -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r45808 r46420 1341 1341 1342 1342 #if 0 /* todo triggers too often */ 1343 Assert(!VMCPU_FF_IS SET(pVCpu, VMCPU_FF_TO_R3));1343 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3)); 1344 1344 #endif 1345 1345 … … 1764 1764 while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0)) 1765 1765 { 1766 if (VM_FF_IS PENDING(pVM, VM_FF_EMT_RENDEZVOUS))1766 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS)) 1767 1767 { 1768 1768 rc = VMMR3EmtRendezvousFF(pVM, pVCpu); … … 1776 1776 } 1777 1777 } 1778 Assert(!VM_FF_IS PENDING(pVM, VM_FF_EMT_RENDEZVOUS));1778 Assert(!VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS)); 1779 1779 Assert(!pVCpu->vmm.s.fInRendezvous); 1780 1780 pVCpu->vmm.s.fInRendezvous = true; … … 2120 2120 * when entering other critsects here. 2121 2121 */ 2122 if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))2122 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT)) 2123 2123 PDMCritSectBothFF(pVCpu); 2124 2124 -
trunk/src/VBox/VMM/VMMRC/PDMRCDevice.cpp
r44902 r46420 411 411 412 412 LogFlow(("pdmRCPicHlp_SetInterruptFF: caller=%p/%d: VMMCPU_FF_INTERRUPT_PIC %d -> 1\n", 413 pDevIns, pDevIns->iInstance, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));413 pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); 414 414 415 415 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); … … 436 436 437 437 LogFlow(("pdmRCPicHlp_ClearInterruptFF: caller=%p/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n", 438 pDevIns, pDevIns->iInstance, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));438 pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))); 439 439 440 440 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); … … 490 490 491 491 LogFlow(("pdmRCApicHlp_SetInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 1\n", 492 pDevIns, pDevIns->iInstance, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));492 pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); 493 493 switch (enmType) 494 494 { … … 522 522 523 523 LogFlow(("pdmRCApicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 0\n", 524 pDevIns, pDevIns->iInstance, VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));524 pDevIns, pDevIns->iInstance, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))); 525 525 526 526 /* Note: NMI/SMI can't be cleared. */ -
trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp
r45485 r46420 181 181 TMTimerPollVoid(pVM, pVCpu); 182 182 Log2(("TMTimerPoll at %08RX32 - VM_FF_TM_VIRTUAL_SYNC=%d VM_FF_TM_VIRTUAL_SYNC=%d\n", pRegFrame->eip, 183 VM_FF_IS PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)));183 VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))); 184 184 } 185 185 } … … 189 189 190 190 /* Clear pending inhibit interrupt state if required. (necessary for dispatching interrupts later on) */ 191 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))191 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 192 192 { 193 193 Log2(("VM_FF_INHIBIT_INTERRUPTS at %08RX32 successor %RGv\n", pRegFrame->eip, EMGetInhibitInterruptsPC(pVCpu))); … … 208 208 */ 209 209 if ( rc == VINF_SUCCESS 210 && ( VM_FF_IS PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_DMA)211 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC210 && ( VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_DMA) 211 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC 212 212 | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL 213 213 | VMCPU_FF_PDM_CRITSECT … … 218 218 { 219 219 /* The out of memory condition naturally outranks the others. */ 220 if (RT_UNLIKELY(VM_FF_IS PENDING(pVM, VM_FF_PGM_NO_MEMORY)))220 if (RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))) 221 221 rc = VINF_EM_NO_MEMORY; 222 222 /* Pending Ring-3 action. */ 223 else if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_PDM_CRITSECT))223 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_PDM_CRITSECT)) 224 224 { 225 225 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); … … 227 227 } 228 228 /* Pending timer action. */ 229 else if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_TIMER))229 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)) 230 230 rc = VINF_EM_RAW_TIMER_PENDING; 231 231 /* The Virtual Sync clock has stopped. */ 232 else if (VM_FF_IS PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))232 else if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC)) 233 233 rc = VINF_EM_RAW_TO_R3; 234 234 /* DMA work pending? */ 235 else if (VM_FF_IS PENDING(pVM, VM_FF_PDM_DMA))235 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) 236 236 rc = VINF_EM_RAW_TO_R3; 237 237 /* Pending request packets might contain actions that need immediate 238 238 attention, such as pending hardware interrupts. */ 239 else if ( VM_FF_IS PENDING(pVM, VM_FF_REQUEST)240 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_REQUEST))239 else if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 240 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 241 241 rc = VINF_EM_PENDING_REQUEST; 242 242 /* Pending GDT/LDT/TSS sync. */ 243 else if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS))243 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS)) 244 244 rc = VINF_SELM_SYNC_GDT; 245 245 /* Pending interrupt: dispatch it. */ 246 else if ( VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)247 && !VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)246 else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 247 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 248 248 && PATMAreInterruptsEnabledByCtxCore(pVM, pRegFrame) 249 249 ) … … 270 270 * Try sync CR3? 271 271 */ 272 else if (VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))272 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 273 273 { 274 274 #if 1 275 275 PGMRZDynMapReleaseAutoSet(pVCpu); 276 276 PGMRZDynMapStartAutoSet(pVCpu); 277 rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_IS SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));277 rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 278 278 #else 279 279 rc = VINF_PGM_SYNC_CR3; -
trunk/src/VBox/VMM/include/EMHandleRCTmpl.h
r45693 r46420 131 131 */ 132 132 case VINF_PGM_SYNC_CR3: 133 AssertMsg(VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),133 AssertMsg(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL), 134 134 ("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n")); 135 135 rc = VINF_SUCCESS; … … 289 289 */ 290 290 case VINF_SELM_SYNC_GDT: 291 AssertMsg(VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS),291 AssertMsg(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_SELM_SYNC_TSS), 292 292 ("VINF_SELM_SYNC_GDT without VMCPU_FF_SELM_SYNC_GDT/LDT/TSS!\n")); 293 293 rc = VINF_SUCCESS; -
trunk/src/recompiler/VBoxRecompiler.c
r46160 r46420 1149 1149 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR; 1150 1150 #endif 1151 if ( VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)1151 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 1152 1152 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ) 1153 1153 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD; … … 1175 1175 */ 1176 1176 case EXCP_SINGLE_INSTR: 1177 if ( !VM_FF_IS PENDING(pVM, VM_FF_ALL_REM_MASK)1178 && !VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))1177 if ( !VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK) 1178 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)) 1179 1179 continue; 1180 1180 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n", … … 1208 1208 if (rc == VINF_EM_DBG_STEPPED) 1209 1209 { 1210 if ( !VM_FF_IS PENDING(pVM, VM_FF_ALL_REM_MASK)1211 && !VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))1210 if ( !VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK) 1211 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)) 1212 1212 continue; 1213 1213 … … 2273 2273 /* Update the inhibit IRQ mask. */ 2274 2274 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK; 2275 if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))2275 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2276 2276 { 2277 2277 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu); … … 2536 2536 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER); 2537 2537 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ 2538 || VMCPU_FF_IS PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))2538 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 2539 2539 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD; 2540 2540 … … 2763 2763 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2764 2764 } 2765 else if (VMCPU_FF_IS SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))2765 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2766 2766 { 2767 2767 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu))); … … 3062 3062 3063 3063 /** @todo this isn't ensuring correct replay order. */ 3064 if (VM_FF_TEST ANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))3064 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY)) 3065 3065 { 3066 3066 uint32_t idxNext; … … 4518 4518 if (RT_SUCCESS(rc)) 4519 4519 { 4520 if (VMCPU_FF_IS PENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))4520 if (VMCPU_FF_IS_PENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 4521 4521 env->interrupt_request |= CPU_INTERRUPT_HARD; 4522 4522 return u8Interrupt;
Note:
See TracChangeset
for help on using the changeset viewer.