VirtualBox

Changeset 19141 in vbox


Ignore:
Timestamp:
Apr 23, 2009 1:52:18 PM (16 years ago)
Author:
vboxsync
Message:

Action flags breakup.
Fixed PGM saved state loading of 2.2.2 images.
Reduced hacks in PATM state loading (fixups).

Location:
trunk
Files:
52 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/csam.h

    r12989 r19141  
    259259 * @returns VBox status code.
    260260 * @param   pVM         The VM to operate on.
    261  */
    262 VMMR3DECL(int) CSAMR3DoPendingAction(PVM pVM);
     261 * @param   pVCpu       The VMCPU to operate on.
     262 */
     263VMMR3DECL(int) CSAMR3DoPendingAction(PVM pVM, PVMCPU pVCpu);
    263264
    264265/**
  • trunk/include/VBox/em.h

    r18927 r19141  
    117117#define EMIsRawRing0Enabled(pVM) ((pVM)->fRawR0Enabled)
    118118
    119 VMMDECL(void)       EMSetInhibitInterruptsPC(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR PC);
    120 VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVM pVM, PVMCPU pVCpu);
     119VMMDECL(void)       EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC);
     120VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu);
    121121VMMDECL(int)        EMInterpretDisasOne(PVM pVM, PVMCPU pVCpu, PCCPUMCTXCORE pCtxCore, PDISCPUSTATE pCpu, unsigned *pcbInstr);
    122122VMMDECL(int)        EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
  • trunk/include/VBox/pdmapi.h

    r18618 r19141  
    4343 */
    4444
    45 VMMDECL(int)    PDMGetInterrupt(PVM pVM, uint8_t *pu8Interrupt);
     45VMMDECL(int)    PDMGetInterrupt(PVMCPU pVCpu, uint8_t *pu8Interrupt);
    4646VMMDECL(int)    PDMIsaSetIrq(PVM pVM, uint8_t u8Irq, uint8_t u8Level);
    4747VMMDECL(int)    PDMIoApicSetIrq(PVM pVM, uint8_t u8Irq, uint8_t u8Level);
  • trunk/include/VBox/vm.h

    r19015 r19141  
    8282    /** Per CPU forced action.
    8383     * See the VMCPU_FF_* \#defines. Updated atomically. */
    84     uint32_t volatile       fForcedActions;
     84    uint32_t volatile       fLocalForcedActions;
    8585    /** The CPU state. */
    8686    VMCPUSTATE volatile     enmState;
     
    187187 * @{
    188188 */
    189 /** This action forces the VM to service check and pending interrups on the APIC. */
    190 #define VM_FF_INTERRUPT_APIC            RT_BIT_32(0)
    191 /** This action forces the VM to service check and pending interrups on the PIC. */
    192 #define VM_FF_INTERRUPT_PIC             RT_BIT_32(1)
    193189/** This action forces the VM to schedule and run pending timer (TM). */
    194 #define VM_FF_TIMER                     RT_BIT_32(2)
     190#define VM_FF_TIMER                         RT_BIT_32(2)
    195191/** PDM Queues are pending. */
    196 #define VM_FF_PDM_QUEUES                RT_BIT_32(3)
     192#define VM_FF_PDM_QUEUES                    RT_BIT_32(3)
    197193/** PDM DMA transfers are pending. */
    198 #define VM_FF_PDM_DMA                   RT_BIT_32(4)
     194#define VM_FF_PDM_DMA                       RT_BIT_32(4)
    199195/** PDM critical section unlocking is pending, process promptly upon return to R3. */
    200 #define VM_FF_PDM_CRITSECT              RT_BIT_32(5)
    201 
     196#define VM_FF_PDM_CRITSECT                  RT_BIT_32(5)
    202197/** This action forces the VM to call DBGF so DBGF can service debugger
    203198 * requests in the emulation thread.
    204199 * This action flag stays asserted till DBGF clears it.*/
    205 #define VM_FF_DBGF                      RT_BIT_32(8)
     200#define VM_FF_DBGF                          RT_BIT_32(8)
    206201/** This action forces the VM to service pending requests from other
    207202 * thread or requests which must be executed in another context. */
    208 #define VM_FF_REQUEST                   RT_BIT_32(9)
     203#define VM_FF_REQUEST                       RT_BIT_32(9)
    209204/** Terminate the VM immediately. */
    210 #define VM_FF_TERMINATE                 RT_BIT_32(10)
     205#define VM_FF_TERMINATE                     RT_BIT_32(10)
    211206/** Reset the VM. (postponed) */
    212 #define VM_FF_RESET                     RT_BIT_32(11)
    213 
    214 /** This action forces the VM to resync the page tables before going
    215  * back to execute guest code. (GLOBAL FLUSH) */
    216 #define VM_FF_PGM_SYNC_CR3              RT_BIT_32(16)
    217 /** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
    218  * (NON-GLOBAL FLUSH) */
    219 #define VM_FF_PGM_SYNC_CR3_NON_GLOBAL   RT_BIT_32(17)
     207#define VM_FF_RESET                         RT_BIT_32(11)
    220208/** PGM needs to allocate handy pages. */
    221 #define VM_FF_PGM_NEED_HANDY_PAGES      RT_BIT_32(18)
     209#define VM_FF_PGM_NEED_HANDY_PAGES          RT_BIT_32(18)
    222210/** PGM is out of memory.
    223211 * Abandon all loops and code paths which can be resumed and get up to the EM
    224212 * loops. */
    225 #define VM_FF_PGM_NO_MEMORY             RT_BIT_32(19)
     213#define VM_FF_PGM_NO_MEMORY                 RT_BIT_32(19)
     214/** REM needs to be informed about handler changes. */
     215#define VM_FF_REM_HANDLER_NOTIFY            RT_BIT_32(29)
     216/** Suspend the VM - debug only. */
     217#define VM_FF_DEBUG_SUSPEND                 RT_BIT_32(31)
     218
     219
     220/** This action forces the VM to service check and pending interrups on the APIC. */
     221#define VMCPU_FF_INTERRUPT_APIC             RT_BIT_32(0)
     222/** This action forces the VM to service check and pending interrups on the PIC. */
     223#define VMCPU_FF_INTERRUPT_PIC              RT_BIT_32(1)
     224/** This action forces the VM to schedule and run pending timer (TM). (bogus for now; needed for PATM backwards compatibility) */
     225#define VMCPU_FF_TIMER                      RT_BIT_32(2)
     226/** This action forces the VM to service pending requests from other
     227 * thread or requests which must be executed in another context. */
     228#define VMCPU_FF_REQUEST                    RT_BIT_32(9)
     229/** This action forces the VM to resync the page tables before going
     230 * back to execute guest code. (GLOBAL FLUSH) */
     231#define VMCPU_FF_PGM_SYNC_CR3               RT_BIT_32(16)
     232/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
     233 * (NON-GLOBAL FLUSH) */
     234#define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL    RT_BIT_32(17)
    226235/** Check the interupt and trap gates */
    227 #define VM_FF_TRPM_SYNC_IDT             RT_BIT_32(20)
     236#define VMCPU_FF_TRPM_SYNC_IDT              RT_BIT_32(20)
    228237/** Check Guest's TSS ring 0 stack */
    229 #define VM_FF_SELM_SYNC_TSS             RT_BIT_32(21)
     238#define VMCPU_FF_SELM_SYNC_TSS              RT_BIT_32(21)
    230239/** Check Guest's GDT table */
    231 #define VM_FF_SELM_SYNC_GDT             RT_BIT_32(22)
     240#define VMCPU_FF_SELM_SYNC_GDT              RT_BIT_32(22)
    232241/** Check Guest's LDT table */
    233 #define VM_FF_SELM_SYNC_LDT             RT_BIT_32(23)
     242#define VMCPU_FF_SELM_SYNC_LDT              RT_BIT_32(23)
    234243/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
    235 #define VM_FF_INHIBIT_INTERRUPTS        RT_BIT_32(24)
    236 
     244#define VMCPU_FF_INHIBIT_INTERRUPTS         RT_BIT_32(24)
    237245/** CSAM needs to scan the page that's being executed */
    238 #define VM_FF_CSAM_SCAN_PAGE            RT_BIT_32(26)
     246#define VMCPU_FF_CSAM_SCAN_PAGE             RT_BIT_32(26)
    239247/** CSAM needs to do some homework. */
    240 #define VM_FF_CSAM_PENDING_ACTION       RT_BIT_32(27)
    241 
     248#define VMCPU_FF_CSAM_PENDING_ACTION        RT_BIT_32(27)
    242249/** Force return to Ring-3. */
    243 #define VM_FF_TO_R3                     RT_BIT_32(28)
    244 
    245 /** REM needs to be informed about handler changes. */
    246 #define VM_FF_REM_HANDLER_NOTIFY        RT_BIT_32(29)
    247 
    248 /** Suspend the VM - debug only. */
    249 #define VM_FF_DEBUG_SUSPEND             RT_BIT_32(31)
    250 
    251 /** Externally forced actions. Used to quit the idle/wait loop. */
    252 #define VM_FF_EXTERNAL_SUSPENDED_MASK   (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
    253 /** Externally forced actions. Used to quit the idle/wait loop. */
    254 #define VM_FF_EXTERNAL_HALTED_MASK      (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
    255 /** High priority pre-execution actions. */
    256 #define VM_FF_HIGH_PRIORITY_PRE_MASK    (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
    257                                         | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
    258 /** High priority pre raw-mode execution mask. */
    259 #define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT \
    260                                          | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_INHIBIT_INTERRUPTS | VM_FF_PGM_NO_MEMORY)
     250#define VMCPU_FF_TO_R3                      RT_BIT_32(28)
     251
     252/** Externally VM forced actions. Used to quit the idle/wait loop. */
     253#define VM_FF_EXTERNAL_SUSPENDED_MASK           (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST)
     254/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
     255#define VMCPU_FF_EXTERNAL_SUSPENDED_MASK        (VMCPU_FF_REQUEST)
     256
     257/** Externally forced VM actions. Used to quit the idle/wait loop. */
     258#define VM_FF_EXTERNAL_HALTED_MASK              (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
     259/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
     260#define VMCPU_FF_EXTERNAL_HALTED_MASK           (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST)
     261
     262/** High priority VM pre-execution actions. */
     263#define VM_FF_HIGH_PRIORITY_PRE_MASK            (   VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
     264                                                 |  VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
     265/** High priority VMCPU pre-execution actions. */
     266#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK         (   VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC  \
     267                                                 |  VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)
     268
     269/** High priority VM pre raw-mode execution mask. */
     270#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK        (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
     271/** High priority VMCPU pre raw-mode execution mask. */
     272#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK     (  VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT \
     273                                                 | VMCPU_FF_INHIBIT_INTERRUPTS)
     274
    261275/** High priority post-execution actions. */
    262 #define VM_FF_HIGH_PRIORITY_POST_MASK   (VM_FF_PDM_CRITSECT | VM_FF_CSAM_PENDING_ACTION | VM_FF_PGM_NO_MEMORY)
    263 /** Normal priority post-execution actions. */
    264 #define VM_FF_NORMAL_PRIORITY_POST_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_CSAM_SCAN_PAGE | VM_FF_PGM_NO_MEMORY)
     276#define VM_FF_HIGH_PRIORITY_POST_MASK           (VM_FF_PDM_CRITSECT | VM_FF_PGM_NO_MEMORY)
     277/** High priority post-execution actions. */
     278#define VMCPU_FF_HIGH_PRIORITY_POST_MASK        (VMCPU_FF_CSAM_PENDING_ACTION)
     279
     280/** Normal priority VM post-execution actions. */
     281#define VM_FF_NORMAL_PRIORITY_POST_MASK         (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)
     282/** Normal priority VMCPU post-execution actions. */
     283#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK      (VMCPU_FF_CSAM_SCAN_PAGE)
     284
    265285/** Normal priority actions. */
    266 #define VM_FF_NORMAL_PRIORITY_MASK      (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)
     286#define VM_FF_NORMAL_PRIORITY_MASK              (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)
     287
    267288/** Flags to clear before resuming guest execution. */
    268 #define VM_FF_RESUME_GUEST_MASK         (VM_FF_TO_R3)
    269 /** Flags that causes the HWACCM loops to go back to ring-3. */
    270 #define VM_FF_HWACCM_TO_R3_MASK         (VM_FF_TO_R3 | VM_FF_TIMER | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
     289#define VMCPU_FF_RESUME_GUEST_MASK              (VMCPU_FF_TO_R3)
     290
     291/** VM Flags that cause the HWACCM loops to go back to ring-3. */
     292#define VM_FF_HWACCM_TO_R3_MASK                 (VM_FF_TIMER | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
     293/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
     294#define VMCPU_FF_HWACCM_TO_R3_MASK               (VMCPU_FF_TO_R3)
     295
    271296/** All the forced flags. */
    272 #define VM_FF_ALL_MASK                  (~0U)
    273 /** All the forced flags. */
    274 #define VM_FF_ALL_BUT_RAW_MASK          (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_CSAM_PENDING_ACTION | VM_FF_PDM_CRITSECT) | VM_FF_PGM_NO_MEMORY)
     297#define VM_FF_ALL_MASK                          (~0U)
     298/** All the forced VM flags. */
     299#define VM_FF_ALL_BUT_RAW_MASK                  (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PDM_CRITSECT) | VM_FF_PGM_NO_MEMORY)
     300/** All the forced VMCPU flags. */
     301#define VMCPU_FF_ALL_BUT_RAW_MASK               (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_CSAM_PENDING_ACTION))
    275302
    276303/** @} */
     
    283310 */
    284311#if 1
    285 # define VM_FF_SET(pVM, fFlag)              ASMAtomicOrU32(&(pVM)->fForcedActions, (fFlag))
     312# define VM_FF_SET(pVM, fFlag)              ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
    286313#else
    287314# define VM_FF_SET(pVM, fFlag) \
    288     do { ASMAtomicOrU32(&(pVM)->fForcedActions, (fFlag)); \
    289          RTLogPrintf("VM_FF_SET  : %08x %s - %s(%d) %s\n", (pVM)->fForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
     315    do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
     316         RTLogPrintf("VM_FF_SET  : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
    290317    } while (0)
    291318#endif
    292319
    293320/** @def VMCPU_FF_SET
    294  * Sets a force action flag for given VCPU.
    295  *
    296  * @param   pVM     VM Handle.
    297  * @param   idCpu   Virtual CPU ID.
     321 * Sets a force action flag for the given VCPU.
     322 *
     323 * @param   pVCpu     VMCPU Handle.
    298324 * @param   fFlag   The flag to set.
    299325 */
    300 #ifdef VBOX_WITH_SMP_GUESTS
    301 # define VMCPU_FF_SET(pVM, idCpu, fFlag)    ASMAtomicOrU32(&(pVM)->aCpu[idCpu].fForcedActions, (fFlag))
    302 #else
    303 # define VMCPU_FF_SET(pVM, idCpu, fFlag)    VM_FF_SET(pVM, fFlag)
     326#if 1 //def VBOX_WITH_SMP_GUESTS
     327# define VMCPU_FF_SET(pVCpu, fFlag)    ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
     328#else
     329# define VMCPU_FF_SET(pVCpu, fFlag)    ASMAtomicOrU32(&(pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions, (fFlag))
    304330#endif
    305331
     
    311337 */
    312338#if 1
    313 # define VM_FF_CLEAR(pVM, fFlag)            ASMAtomicAndU32(&(pVM)->fForcedActions, ~(fFlag))
     339# define VM_FF_CLEAR(pVM, fFlag)            ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
    314340#else
    315341# define VM_FF_CLEAR(pVM, fFlag) \
    316     do { ASMAtomicAndU32(&(pVM)->fForcedActions, ~(fFlag)); \
    317          RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
     342    do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
     343         RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
    318344    } while (0)
    319345#endif
    320346
    321347/** @def VMCPU_FF_CLEAR
    322  * Clears a force action flag for given VCPU.
    323  *
    324  * @param   pVM     VM Handle.
    325  * @param   idCpu   Virtual CPU ID.
     348 * Clears a force action flag for the given VCPU.
     349 *
     350 * @param   pVCpu     VMCPU Handle.
    326351 * @param   fFlag   The flag to clear.
    327352 */
    328 #ifdef VBOX_WITH_SMP_GUESTS
    329 # define VMCPU_FF_CLEAR(pVM, idCpu, fFlag)  ASMAtomicAndU32(&(pVM)->aCpu[idCpu].fForcedActions, ~(fFlag))
    330 #else
    331 # define VMCPU_FF_CLEAR(pVM, idCpu, fFlag)  VM_FF_CLEAR(pVM, fFlag)
     353#if 1 //def VBOX_WITH_SMP_GUESTS
     354# define VMCPU_FF_CLEAR(pVCpu, fFlag)  ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
     355#else
     356# define VMCPU_FF_CLEAR(pVCpu, fFlag)  ASMAtomicAndU32(&(pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions, ~(fFlag))
    332357#endif
    333358
     
    338363 * @param   fFlag   The flag to check.
    339364 */
    340 #define VM_FF_ISSET(pVM, fFlag)             (((pVM)->fForcedActions & (fFlag)) == (fFlag))
     365#define VM_FF_ISSET(pVM, fFlag)             (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
    341366
    342367/** @def VMCPU_FF_ISSET
    343  * Checks if a force action flag is set for given VCPU.
    344  *
    345  * @param   pVM     VM Handle.
    346  * @param   idCpu   Virtual CPU ID.
     368 * Checks if a force action flag is set for the given VCPU.
     369 *
     370 * @param   pVCpu     VMCPU Handle.
    347371 * @param   fFlag   The flag to check.
    348372 */
    349 #ifdef VBOX_WITH_SMP_GUESTS
    350 # define VMCPU_FF_ISSET(pVM, idCpu, fFlag)  (((pVM)->aCpu[idCpu].fForcedActions & (fFlag)) == (fFlag))
    351 #else
    352 # define VMCPU_FF_ISSET(pVM, idCpu, fFlag)  VM_FF_ISSET(pVM, fFlag)
     373#if 1 //def VBOX_WITH_SMP_GUESTS
     374# define VMCPU_FF_ISSET(pVCpu, fFlag)  (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
     375#else
     376# define VMCPU_FF_ISSET(pVCpu, fFlag)  (((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
    353377#endif
    354378
     
    359383 * @param   fFlags  The flags to check for.
    360384 */
    361 #define VM_FF_ISPENDING(pVM, fFlags)        ((pVM)->fForcedActions & (fFlags))
     385#define VM_FF_ISPENDING(pVM, fFlags)        ((pVM)->fGlobalForcedActions & (fFlags))
    362386
    363387/** @def VMCPU_FF_ISPENDING
    364  * Checks if one or more force action in the specified set is pending for given VCPU.
    365  *
    366  * @param   pVM     VM Handle.
    367  * @param   idCpu   Virtual CPU ID.
     388 * Checks if one or more force action in the specified set is pending for the given VCPU.
     389 *
     390 * @param   pVCpu     VMCPU Handle.
    368391 * @param   fFlags  The flags to check for.
    369392 */
    370 #ifdef VBOX_WITH_SMP_GUESTS
    371 # define VMCPU_FF_ISPENDING(pVM, idCpu, fFlags) ((pVM)->aCpu[idCpu].fForcedActions & (fFlags))
    372 #else
    373 # define VMCPU_FF_ISPENDING(pVM, idCpu, fFlags) VM_FF_ISPENDING(pVM, fFlags)
     393#if 1 //def VBOX_WITH_SMP_GUESTS
     394# define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
     395#else
     396# define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlags))
    374397#endif
    375398
     
    382405 * @param   fExcpt  The flags that should not be set.
    383406 */
    384 #define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt)            ( ((pVM)->fForcedActions & (fFlags)) && !((pVM)->fForcedActions & (fExcpt)) )
     407#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt)            ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )
    385408
    386409/** @def VMCPU_FF_IS_PENDING_EXCEPT
    387  * Checks if one or more force action in the specified set is pending for given
     410 * Checks if one or more force action in the specified set is pending for the given
    388411 * VCPU while one or more other ones are not.
    389412 *
    390  * @param   pVM     VM Handle.
    391  * @param   idCpu   Virtual CPU ID.
     413 * @param   pVCpu     VMCPU Handle.
    392414 * @param   fFlags  The flags to check for.
    393415 * @param   fExcpt  The flags that should not be set.
    394416 */
    395 #ifdef VBOX_WITH_SMP_GUESTS
    396 # define VMCPU_FF_IS_PENDING_EXCEPT(pVM, idCpu, fFlags, fExcpt) ( ((pVM)->aCpu[idCpu].fForcedActions & (fFlags)) && !((pVM)->aCpu[idCpu].fForcedActions & (fExcpt)) )
    397 #else
    398 # define VMCPU_FF_IS_PENDING_EXCEPT(pVM, idCpu, fFlags, fExcpt) VM_FF_ISPENDING(pVM, fFlags, fExcpt)
     417#if 1 //def VBOX_WITH_SMP_GUESTS
     418# define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
     419#else
     420# define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlags)) && !((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fExcpt)) )
    399421#endif
    400422
     
    487509     * See the VM_FF_* \#defines. Updated atomically.
    488510     */
    489     volatile uint32_t           fForcedActions;
     511    volatile uint32_t           fGlobalForcedActions;
    490512    /** Pointer to the array of page descriptors for the VM structure allocation. */
    491513    R3PTRTYPE(PSUPPAGE)         paVMPagesR3;
  • trunk/include/VBox/vm.mac

    r18927 r19141  
    3535
    3636;/** This action forces the VM to service check and pending interrups on the APIC. */
    37 %define VM_FF_INTERRUPT_APIC            (1 << 0)
     37%define VMCPU_FF_INTERRUPT_APIC            (1 << 0)
    3838;/** This action forces the VM to service check and pending interrups on the PIC. */
    39 %define VM_FF_INTERRUPT_PIC             (1 << 1)
     39%define VMCPU_FF_INTERRUPT_PIC             (1 << 1)
    4040;/** This action forces the VM to schedule and run pending timer (TM). */
    41 %define VM_FF_TIMER                     (1 << 2)
     41%define VMCPU_FF_TIMER                     (1 << 2)
    4242;/** This action forces the VM to service pending requests from other
    4343; * thread or requests which must be executed in another context. */
    44 %define VM_FF_REQUEST                   (1 << 9)
     44%define VMCPU_FF_REQUEST                   (1 << 9)
    4545
    4646;;
     
    4848struc VM
    4949    .enmVMState         resd 1
    50     .fForcedActions    resd 1
     50    .fGlobalForcedActions resd 1
    5151    .paVMPagesR3        RTR3PTR_RES 1
    5252    .pSession           RTR0PTR_RES 1
     
    105105; This is part of  the VMCPU structure.
    106106struc VMCPU
    107     .fForcedActions    resd 1
    108     .enmState           resd 1
    109     .pVMR3              RTR3PTR_RES 1
    110     .pVMR0              RTR0PTR_RES 1
    111     .pVMRC              RTRCPTR_RES 1
    112     .idCpu              resd 1
     107    .fLocalForcedActions resd 1
     108    .enmState            resd 1
     109    .pVMR3               RTR3PTR_RES 1
     110    .pVMR0               RTR0PTR_RES 1
     111    .pVMRC               RTRCPTR_RES 1
     112    .idCpu               resd 1
    113113
    114     .hNativeThread      RTR0PTR_RES 1
     114    .hNativeThread       RTR0PTR_RES 1
    115115
    116116    alignb 64
    117117
    118     .cpum               resb 2048
     118    .cpum                resb 4096
    119119endstruc
    120120
  • trunk/include/VBox/vmapi.h

    r18649 r19141  
    432432VMMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM);
    433433VMMR3DECL(void) VMR3NotifyFFU(PUVM pUVM, bool fNotifiedREM);
    434 VMMR3DECL(int)  VMR3WaitHalted(PVM pVM, bool fIgnoreInterrupts);
     434VMMR3DECL(int)  VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts);
    435435VMMR3DECL(int)  VMR3WaitU(PUVM pUVM);
    436436VMMR3DECL(RTCPUID)          VMR3GetVMCPUId(PVM pVM);
  • trunk/src/VBox/VMM/DBGF.cpp

    r18927 r19141  
    296296     * Clear the FF DBGF request flag.
    297297     */
    298     Assert(pVM->fForcedActions & VM_FF_DBGF);
     298    Assert(pVM->fGlobalForcedActions & VM_FF_DBGF);
    299299    VM_FF_CLEAR(pVM, VM_FF_DBGF);
    300300
  • trunk/src/VBox/VMM/EM.cpp

    r19076 r19141  
    926926            if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_TIMER | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET))
    927927            {
    928                 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fForcedActions));
     928                LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
    929929                goto l_REMDoForcedActions;
    930930            }
     
    943943         * Deal with high priority post execution FFs before doing anything else.
    944944         */
    945         if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
     945        if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
     946            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
    946947            rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
    947948
     
    973974        TMTimerPoll(pVM);
    974975#endif
    975         if (VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK & ~(VM_FF_CSAM_PENDING_ACTION | VM_FF_CSAM_SCAN_PAGE)))
     976        if (    VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
     977            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
    976978        {
    977979l_REMDoForcedActions:
     
    10371039    Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Rrc\n", pCtx->cs, pCtx->eip, pCtx->eflags, rc));
    10381040    rc = CPUMRawLeave(pVCpu, NULL, rc);
    1039     VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
     1041    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
    10401042
    10411043    /*
     
    10731075         * Check vital forced actions, but ignore pending interrupts and timers.
    10741076         */
    1075         if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
     1077        if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
     1078            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
    10761079        {
    10771080            rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
     
    11061109             || rc == VINF_EM_RAW_INTERRUPT);
    11071110    rc = CPUMRawLeave(pVCpu, NULL, rc);
    1108     VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
     1111    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
    11091112
    11101113    /*
     
    11421145    int         rc;
    11431146    PCPUMCTX    pCtx   = pVCpu->em.s.pCtx;
    1144     VM_FF_CLEAR(pVM, (VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_TSS));
     1147    VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
    11451148
    11461149    /*
    11471150     * Check vital forced actions, but ignore pending interrupts and timers.
    11481151     */
    1149     if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
     1152    if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
     1153        ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
    11501154    {
    11511155        rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
     
    11671171    } while (   rc == VINF_SUCCESS
    11681172             || rc == VINF_EM_RAW_INTERRUPT);
    1169     VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
     1173    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
    11701174
    11711175    /*
     
    21132117                case OP_STI:
    21142118                    pCtx->eflags.u32 |= X86_EFL_IF;
    2115                     EMSetInhibitInterruptsPC(pVM, pVCpu, pCtx->rip + Cpu.opsize);
     2119                    EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + Cpu.opsize);
    21162120                    Assert(Cpu.opsize == 1);
    21172121                    pCtx->rip += Cpu.opsize;
     
    23602364         */
    23612365        case VINF_PGM_SYNC_CR3:
    2362             AssertMsg(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL),
    2363                       ("VINF_PGM_SYNC_CR3 and no VM_FF_PGM_SYNC_CR3*!\n"));
     2366            AssertMsg(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL),
     2367                      ("VINF_PGM_SYNC_CR3 and no VMCPU_FF_PGM_SYNC_CR3*!\n"));
    23642368            rc = VINF_SUCCESS;
    23652369            break;
     
    25872591     * Sync selector tables.
    25882592     */
    2589     if (VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT))
     2593    if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))
    25902594    {
    25912595        int rc = SELMR3UpdateFromCPUM(pVM, pVCpu);
     
    26012605     * PGMSyncCR3+pgmPoolClearAll is pending.
    26022606     */
    2603     if (VM_FF_ISPENDING(pVM, VM_FF_TRPM_SYNC_IDT))
    2604     {
    2605         if (   VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3)
     2607    if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))
     2608    {
     2609        if (   VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
    26062610            && EMIsRawRing0Enabled(pVM)
    26072611            && CSAMIsEnabled(pVM))
    26082612        {
    2609             int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     2613            int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    26102614            if (RT_FAILURE(rc))
    26112615                return rc;
     
    26202624     * Sync TSS.
    26212625     */
    2622     if (VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_TSS))
     2626    if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
    26232627    {
    26242628        int rc = SELMR3SyncTSS(pVM, pVCpu);
     
    26302634     * Sync page directory.
    26312635     */
    2632     if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
    2633     {
    2634         int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     2636    if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
     2637    {
     2638        int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    26352639        if (RT_FAILURE(rc))
    26362640            return rc;
    26372641
    2638         Assert(!VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT));
     2642        Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
    26392643
    26402644        /* Prefetch pages for EIP and ESP. */
     
    26502654                return rc;
    26512655            }
    2652             rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     2656            rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    26532657            if (RT_FAILURE(rc))
    26542658                return rc;
    26552659        }
    26562660        /** @todo maybe prefetch the supervisor stack page as well */
    2657         Assert(!VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT));
     2661        Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
    26582662    }
    26592663
     
    27262730                  || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
    27272731                  ("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));
    2728         if (    !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
     2732        if (    !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
    27292733            &&  PGMMapHasConflicts(pVM))
    27302734        {
     
    27382742         * Process high priority pre-execution raw-mode FFs.
    27392743         */
    2740         if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
     2744        if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
     2745            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
    27412746        {
    27422747            rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
     
    27672772            CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
    27682773            STAM_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWEntry, b);
    2769             if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
     2774            if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
     2775                ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
    27702776            {
    27712777                rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
     
    28172823         */
    28182824        rc = CPUMRawLeave(pVCpu, NULL, rc);
    2819         VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
    2820         if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
     2825        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
     2826        if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
     2827            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
    28212828            rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
    28222829
     
    28252832         * Assert TSS consistency & rc vs patch code.
    28262833         */
    2827         if (   !VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_TSS | VM_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
     2834        if (   !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
    28282835            &&  EMIsRawRing0Enabled(pVM))
    28292836            SELMR3CheckTSS(pVM);
     
    28492856         * Let's go paranoid!
    28502857         */
    2851         if (    !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
     2858        if (    !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
    28522859            &&  PGMMapHasConflicts(pVM))
    28532860        {
     
    28842891#endif
    28852892        STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
    2886         if (VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY))
     2893        if (    VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY)
     2894            ||  VMCPU_FF_ISPENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
    28872895        {
    28882896            Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) != 1);
     
    29532961         * Process high priority pre-execution raw-mode FFs.
    29542962         */
    2955         VM_FF_CLEAR(pVM, (VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_TSS)); /* not relevant in HWACCM mode; shouldn't be set really. */
    2956         if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
     2963        VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HWACCM mode; shouldn't be set really. */
     2964        if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
     2965            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
    29572966        {
    29582967            rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
     
    29902999         * Deal with high priority post execution FFs before doing anything else.
    29913000         */
    2992         VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
    2993         if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
     3001        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
     3002        if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
     3003            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
    29943004            rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
    29953005
     
    31943204        PDMR3CritSectFF(pVM);
    31953205
    3196     if (VM_FF_ISPENDING(pVM, VM_FF_CSAM_PENDING_ACTION))
    3197         CSAMR3DoPendingAction(pVM);
     3206    if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
     3207        CSAMR3DoPendingAction(pVM, pVCpu);
    31983208
    31993209    if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
     
    32463256     * Post execution chunk first.
    32473257     */
    3248     if (VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK))
     3258    if (    VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
     3259        ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
    32493260    {
    32503261        /*
     
    32803291         * CSAM page scanning.
    32813292         */
    3282         if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_CSAM_SCAN_PAGE, VM_FF_PGM_NO_MEMORY))
     3293        if (    !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
     3294            &&  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
    32833295        {
    32843296            PCPUMCTX pCtx = pVCpu->em.s.pCtx;
    32853297
    32863298            /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
    3287             Log(("Forced action VM_FF_CSAM_SCAN_PAGE\n"));
     3299            Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
    32883300
    32893301            CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
    3290             VM_FF_CLEAR(pVM, VM_FF_CSAM_SCAN_PAGE);
     3302            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
    32913303        }
    32923304
     
    33033315
    33043316        /* check that we got them all  */
    3305         Assert(!(VM_FF_NORMAL_PRIORITY_POST_MASK & ~(VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_CSAM_SCAN_PAGE | VM_FF_PGM_NO_MEMORY)));
     3317        Assert(!(VM_FF_NORMAL_PRIORITY_POST_MASK & ~(VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)));
     3318        Assert(!(VMCPU_FF_NORMAL_PRIORITY_POST_MASK & ~(VMCPU_FF_CSAM_SCAN_PAGE)));
    33063319    }
    33073320
     
    33513364     * (Executed in ascending priority order.)
    33523365     */
    3353     if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK))
     3366    if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
     3367        ||  VMCPU_FF_ISPENDING(pVCpu, VM_FF_HIGH_PRIORITY_PRE_MASK))
    33543368    {
    33553369        /*
     
    33623376         * The instruction following an emulated STI should *always* be executed!
    33633377         */
    3364         if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_INHIBIT_INTERRUPTS, VM_FF_PGM_NO_MEMORY))
    3365         {
    3366             Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVM, pVCpu)));
    3367             if (CPUMGetGuestEIP(pVCpu) != EMGetInhibitInterruptsPC(pVM, pVCpu))
     3378        if (    !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
     3379            &&  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     3380        {
     3381            Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
     3382            if (CPUMGetGuestEIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
    33683383            {
    33693384                /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
     
    33723387                 *  break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
    33733388                 */
    3374                 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
     3389                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    33753390            }
    33763391            if (HWACCMR3IsActive(pVM))
     
    33853400         * Interrupts.
    33863401         */
    3387         if (    !VM_FF_ISPENDING(pVM, VM_FF_INHIBIT_INTERRUPTS | VM_FF_PGM_NO_MEMORY)
     3402        if (    !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
     3403            &&  !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    33883404            &&  (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
    33893405            &&  !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
     
    33913407            &&  !HWACCMR3IsEventPending(pVM))
    33923408        {
    3393             if (VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
     3409            if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    33943410            {
    33953411                /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
     
    34643480#endif
    34653481        /* check that we got them all  */
    3466         Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_DBGF | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_INHIBIT_INTERRUPTS | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)));
     3482        Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)));
     3483        Assert(!(VMCPU_FF_HIGH_PRIORITY_PRE_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)));
    34673484    }
    34683485
     
    37963813                {
    37973814                    STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
    3798                     rc = VMR3WaitHalted(pVM, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
     3815                    rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
    37993816                    STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
    38003817                    break;
  • trunk/src/VBox/VMM/PATM/CSAM.cpp

    r18992 r19141  
    228228    pVM->csam.s.fScanningStarted = false;
    229229
    230     VM_FF_CLEAR(pVM, VM_FF_CSAM_PENDING_ACTION);
     230    PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies 1 VPCU */
     231    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
    231232    pVM->csam.s.cDirtyPages = 0;
    232233    /* not necessary */
     
    22632264 * @returns VBox status code.
    22642265 * @param   pVM         The VM to operate on.
    2265  */
    2266 VMMR3DECL(int) CSAMR3DoPendingAction(PVM pVM)
     2266 * @param   pVCpu       The VMCPU to operate on.
     2267 */
     2268VMMR3DECL(int) CSAMR3DoPendingAction(PVM pVM, PVMCPU pVCpu)
    22672269{
    22682270    csamR3FlushDirtyPages(pVM);
    22692271    csamR3FlushCodePages(pVM);
    22702272
    2271     VM_FF_CLEAR(pVM, VM_FF_CSAM_PENDING_ACTION);
     2273    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
    22722274    return VINF_SUCCESS;
    22732275}
  • trunk/src/VBox/VMM/PATM/PATM.cpp

    r18988 r19141  
    112112    Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
    113113
    114     AssertReleaseMsg(PATMInterruptFlag == (VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST),
    115                      ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST));
     114    /* These values can't change as they are hardcoded in patch code (old saved states!) */
     115    AssertCompile(VM_FF_TIMER   == VMCPU_FF_TIMER);
     116    AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
     117    AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
     118    AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
     119
     120    AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
     121                     ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
    116122
    117123    /* Allocate patch memory and GC patch state memory. */
     
    62336239        }
    62346240#endif
    6235         EMSetInhibitInterruptsPC(pVM, pVCpu, pNewEip);
     6241        EMSetInhibitInterruptsPC(pVCpu, pNewEip);
    62366242        pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
    62376243    }
  • trunk/src/VBox/VMM/PATM/PATMA.asm

    r11979 r19141  
    146146
    147147    ; if interrupts are pending, then we must go back to the host context to handle them!
    148     test    dword [ss:PATM_VM_FORCEDACTIONS], VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST
     148    test    dword [ss:PATM_VM_FORCEDACTIONS], VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST
    149149    jz      PATMClearInhibitIRQFaultIF0_Continue
    150150
     
    221221
    222222    ; if interrupts are pending, then we must go back to the host context to handle them!
    223     test    dword [ss:PATM_VM_FORCEDACTIONS], VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST
     223    test    dword [ss:PATM_VM_FORCEDACTIONS], VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST
    224224    jz      PATMClearInhibitIRQContIF0_Continue
    225225
     
    744744
    745745    ; if interrupts are pending, then we must go back to the host context to handle them!
    746     test    dword [ss:PATM_VM_FORCEDACTIONS], VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST
     746    test    dword [ss:PATM_VM_FORCEDACTIONS], VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST
    747747    jz      PATMPopf32_Continue
    748748
     
    834834
    835835    ; if interrupts are pending, then we must go back to the host context to handle them!
    836     test    dword [ss:PATM_VM_FORCEDACTIONS], VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST
     836    test    dword [ss:PATM_VM_FORCEDACTIONS], VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST
    837837    jz      PATMPopf32_NoExit_Continue
    838838
     
    922922    ; if interrupts are pending, then we must go back to the host context to handle them!
    923923    ; @note we destroy the flags here, but that should really not matter (PATM_INT3 case)
    924     test    dword [ss:PATM_VM_FORCEDACTIONS], VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST
     924    test    dword [ss:PATM_VM_FORCEDACTIONS], VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST
    925925    jz      PATMPopf16_Continue
    926926    mov     dword [ss:PATM_INTERRUPTFLAG], 1
     
    987987    ; if interrupts are pending, then we must go back to the host context to handle them!
    988988    ; @note we destroy the flags here, but that should really not matter (PATM_INT3 case)
    989     test    dword [ss:PATM_VM_FORCEDACTIONS], VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST
     989    test    dword [ss:PATM_VM_FORCEDACTIONS], VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST
    990990    jz      PATMPopf16_Continue_NoExit
    991991    mov     dword [ss:PATM_INTERRUPTFLAG], 1
     
    12381238; Note: This is very important as pending pic interrupts can be overriden by apic interrupts if we don't check early enough (Fedora 5 boot)
    12391239; @@todo fix this properly, so we can dispatch pending interrupts in GC
    1240     test    dword [ss:PATM_VM_FORCEDACTIONS], VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC
     1240    test    dword [ss:PATM_VM_FORCEDACTIONS], VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
    12411241    jz      iret_continue
    12421242
     
    25822582; For assertion during init (to make absolutely sure the flags are in sync in vm.mac & vm.h)
    25832583GLOBALNAME PATMInterruptFlag
    2584     DD      VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_TIMER | VM_FF_REQUEST
    2585 
     2584    DD      VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST
     2585
  • trunk/src/VBox/VMM/PATM/PATMInternal.h

    r18927 r19141  
    4141#else
    4242# define PATM_SSM_VERSION                    54
     43# define PATM_SSM_VERSION_FIXUP_HACK         54
    4344# define PATM_SSM_VERSION_VER16              53
    4445#endif
     
    134135#define PATM_STAT_MEMSIZE                   (PATM_STAT_MAX_COUNTERS*sizeof(STAMRATIOU32))
    135136
     137/** aCpus[0].fLocalForcedActions fixup (must be uneven to avoid theoretical clashes with valid pointers) */
     138#define PATM_FIXUP_CPU_FF_ACTION            0xffffff01
     139/** default cpuid pointer fixup */
     140#define PATM_FIXUP_CPUID_DEFAULT            0xffffff03
     141/** standard cpuid pointer fixup */
     142#define PATM_FIXUP_CPUID_STANDARD           0xffffff05
     143/** extended cpuid pointer fixup */
     144#define PATM_FIXUP_CPUID_EXTENDED           0xffffff07
     145/** centaur cpuid pointer fixup */
     146#define PATM_FIXUP_CPUID_CENTAUR            0xffffff09
    136147
    137148typedef struct
  • trunk/src/VBox/VMM/PATM/PATMPatch.cpp

    r19075 r19141  
    287287                case PATM_VM_FORCEDACTIONS:
    288288                    /* @todo dirty assumptions when correcting this fixup during saved state loading. */
    289                     dest = pVM->pVMRC + RT_OFFSETOF(VM, fForcedActions);
     289                    dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
    290290                    break;
    291291
  • trunk/src/VBox/VMM/PATM/PATMSSM.cpp

    r19075 r19141  
    142142    PSSMHANDLE          pSSM = pVM->patm.s.savedstate.pSSM;
    143143    RELOCREC            rec  = *(PRELOCREC)pNode;
     144    RTRCPTR            *pFixup = (RTRCPTR *)rec.pRelocPos;
    144145
    145146    Assert(rec.pRelocPos);
     147    /* Convert pointer to an offset into patch memory. */
    146148    PATM_SUBTRACT_PTR(rec.pRelocPos, pVM->patm.s.pPatchMemHC);
     149
     150    if (rec.uType == FIXUP_ABSOLUTE)
     151    {
     152        /* Core.Key abused to store the fixup type. */
     153        if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))
     154            rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;
     155        else
     156        if (*pFixup == CPUMR3GetGuestCpuIdDefRCPtr(pVM))
     157            rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;
     158        else
     159        if (*pFixup == CPUMR3GetGuestCpuIdStdRCPtr(pVM))
     160            rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;
     161        else
     162        if (*pFixup == CPUMR3GetGuestCpuIdExtRCPtr(pVM))
     163            rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;
     164        else
     165        if (*pFixup == CPUMR3GetGuestCpuIdCentaurRCPtr(pVM))
     166            rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;
     167    }
    147168
    148169    /* Save the lookup record. */
     
    286307
    287308    if (    u32Version != PATM_SSM_VERSION
     309        &&  u32Version != PATM_SSM_VERSION_FIXUP_HACK
    288310        &&  u32Version != PATM_SSM_VERSION_VER16
    289311#ifdef PATM_WITH_NEW_SSM
     
    874896        }
    875897        else
    876         /* Note: rather assumptive! */
    877         if (    *pFixup >= pVM->pVMRC
     898        if (    ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
     899            &&  *pFixup >= pVM->pVMRC
    878900            &&  *pFixup < pVM->pVMRC + 32)
    879901        {
    880             LogFlow(("Changing fForcedActions fixup from %x to %x\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, fForcedActions)));
    881             *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, fForcedActions);
     902            LogFlow(("Changing fLocalForcedActions fixup from %x to %x\n", *pFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
     903            *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
    882904        }
    883905        else
    884         if (    *pFixup >= pVM->pVMRC
     906        if (    ulSSMVersion <= PATM_SSM_VERSION_FIXUP_HACK
     907            &&  *pFixup >= pVM->pVMRC
    885908            &&  *pFixup < pVM->pVMRC + 8192)
    886909        {
     
    909932        }
    910933        else
    911             AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
     934        if (ulSSMVersion >= PATM_SSM_VERSION)
     935        {
     936#ifdef LOG_ENABLED
     937            RTRCPTR oldFixup = *pFixup;
     938#endif
     939            /* Core.Key abused to store the type of fixup */
     940            switch ((uint32_t)pRec->Core.Key)
     941            {
     942            case PATM_FIXUP_CPU_FF_ACTION:
     943                *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
     944                LogFlow(("Changing cpu ff action fixup from %x to %x\n", oldFixup, *pFixup));
     945                break;
     946            case PATM_FIXUP_CPUID_DEFAULT:
     947                *pFixup = CPUMR3GetGuestCpuIdDefRCPtr(pVM);
     948                LogFlow(("Changing cpuid def fixup from %x to %x\n", oldFixup, *pFixup));
     949                break;
     950            case PATM_FIXUP_CPUID_STANDARD:
     951                *pFixup = CPUMR3GetGuestCpuIdStdRCPtr(pVM);
     952                LogFlow(("Changing cpuid std fixup from %x to %x\n", oldFixup, *pFixup));
     953                break;
     954            case PATM_FIXUP_CPUID_EXTENDED:
     955                *pFixup = CPUMR3GetGuestCpuIdExtRCPtr(pVM);
     956                LogFlow(("Changing cpuid ext fixup from %x to %x\n", oldFixup, *pFixup));
     957                break;
     958            case PATM_FIXUP_CPUID_CENTAUR:
     959                *pFixup = CPUMR3GetGuestCpuIdCentaurRCPtr(pVM);
     960                LogFlow(("Changing cpuid centaur fixup from %x to %x\n", oldFixup, *pFixup));
     961                break;
     962            default:
     963                AssertMsgFailed(("Unexpected fixup value %x\n", *pFixup));
     964                break;
     965            }
     966        }
    912967
    913968#ifdef RT_OS_WINDOWS
    914         AssertCompile(RT_OFFSETOF(VM, fForcedActions) < 32);
     969        AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
    915970#endif
    916971        break;
  • trunk/src/VBox/VMM/PATM/VMMAll/CSAMAll.cpp

    r14299 r19141  
    3737#include "CSAMInternal.h"
    3838#include <VBox/vm.h>
     39#include <VBox/vmm.h>
    3940#include <VBox/dbg.h>
    4041#include <VBox/err.h>
     
    6869
    6970    STAM_COUNTER_ADD(&pVM->csam.s.StatNrTraps, 1);
    70     VM_FF_SET(pVM, VM_FF_CSAM_SCAN_PAGE);
     71    VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_SCAN_PAGE);
    7172    return VINF_CSAM_PENDING_ACTION;
    7273}
     
    205206    {
    206207        pVM->csam.s.pvPossibleCodePage[pVM->csam.s.cPossibleCodePages++] = (RTRCPTR)GCPtr;
    207         VM_FF_SET(pVM, VM_FF_CSAM_PENDING_ACTION);
     208        VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_PENDING_ACTION);
    208209    }
    209210    return;
  • trunk/src/VBox/VMM/PATM/VMMAll/PATMAll.cpp

    r18927 r19141  
    191191        if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtxCore->eip)
    192192        {
    193             EMSetInhibitInterruptsPC(pVM, VMMGetCpu0(pVM), pCtxCore->eip);
     193            EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtxCore->eip);
    194194        }
    195195        CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
  • trunk/src/VBox/VMM/PATM/VMMGC/CSAMGC.cpp

    r18988 r19141  
    117117    }
    118118
    119     VM_FF_SET(pVM, VM_FF_CSAM_PENDING_ACTION);
     119    VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
    120120
    121121    /* Note that pvFault might be a different address in case of aliases. So use pvRange + offset instead!. */
  • trunk/src/VBox/VMM/PDM.cpp

    r18618 r19141  
    641641     * Save interrupt and DMA states.
    642642     */
    643     SSMR3PutUInt(pSSM, VM_FF_ISSET(pVM, VM_FF_INTERRUPT_APIC));
    644     SSMR3PutUInt(pSSM, VM_FF_ISSET(pVM, VM_FF_INTERRUPT_PIC));
     643    for (unsigned idCpu=0;idCpu<pVM->cCPUs;idCpu++)
     644    {
     645        PVMCPU pVCpu = &pVM->aCpus[idCpu];
     646        SSMR3PutUInt(pSSM, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC));
     647        SSMR3PutUInt(pSSM, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC));
     648    }
    645649    SSMR3PutUInt(pSSM, VM_FF_ISSET(pVM, VM_FF_PDM_DMA));
    646650
     
    675679    LogFlow(("pdmR3LoadPrep: %s%s%s%s\n",
    676680             VM_FF_ISSET(pVM, VM_FF_PDM_QUEUES)     ? " VM_FF_PDM_QUEUES" : "",
    677              VM_FF_ISSET(pVM, VM_FF_PDM_DMA)        ? " VM_FF_PDM_DMA" : "",
    678              VM_FF_ISSET(pVM, VM_FF_INTERRUPT_APIC) ? " VM_FF_INTERRUPT_APIC" : "",
    679              VM_FF_ISSET(pVM, VM_FF_INTERRUPT_PIC)  ? " VM_FF_INTERRUPT_PIC" : ""
     681             VM_FF_ISSET(pVM, VM_FF_PDM_DMA)        ? " VM_FF_PDM_DMA" : ""
    680682             ));
     683#ifdef LOG_ENABLED
     684    for (unsigned idCpu=0;idCpu<pVM->cCPUs;idCpu++)
     685    {
     686        PVMCPU pVCpu = &pVM->aCpus[idCpu];
     687        LogFlow(("pdmR3LoadPrep: VCPU %d %s%s%s%s\n", idCpu,
     688                VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC) ? " VMCPU_FF_INTERRUPT_APIC" : "",
     689                VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)  ? " VMCPU_FF_INTERRUPT_PIC" : ""
     690                ));
     691    }
     692#endif
    681693
    682694    /*
     
    688700
    689701    /* Clear the FFs. */
    690     VM_FF_CLEAR(pVM, VM_FF_INTERRUPT_APIC);
    691     VM_FF_CLEAR(pVM, VM_FF_INTERRUPT_PIC);
     702    for (unsigned idCpu=0;idCpu<pVM->cCPUs;idCpu++)
     703    {
     704        PVMCPU pVCpu = &pVM->aCpus[idCpu];
     705        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC);
     706        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
     707    }
    692708    VM_FF_CLEAR(pVM, VM_FF_PDM_DMA);
    693709
     
    706722static DECLCALLBACK(int) pdmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
    707723{
     724    int rc;
     725
    708726    LogFlow(("pdmR3Load:\n"));
    709727
     
    720738     * Load the interrupt and DMA states.
    721739     */
    722     /* APIC interrupt */
    723     RTUINT fInterruptPending = 0;
    724     int rc = SSMR3GetUInt(pSSM, &fInterruptPending);
    725     if (RT_FAILURE(rc))
    726         return rc;
    727     if (fInterruptPending & ~1)
    728     {
    729         AssertMsgFailed(("fInterruptPending=%#x (APIC)\n", fInterruptPending));
    730         return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    731     }
    732     AssertRelease(!VM_FF_ISSET(pVM, VM_FF_INTERRUPT_APIC));
    733     if (fInterruptPending)
    734         VM_FF_SET(pVM, VM_FF_INTERRUPT_APIC);
    735 
    736     /* PIC interrupt */
    737     fInterruptPending = 0;
    738     rc = SSMR3GetUInt(pSSM, &fInterruptPending);
    739     if (RT_FAILURE(rc))
    740         return rc;
    741     if (fInterruptPending & ~1)
    742     {
    743         AssertMsgFailed(("fInterruptPending=%#x (PIC)\n", fInterruptPending));
    744         return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    745     }
    746     AssertRelease(!VM_FF_ISSET(pVM, VM_FF_INTERRUPT_PIC));
    747     if (fInterruptPending)
    748         VM_FF_SET(pVM, VM_FF_INTERRUPT_PIC);
     740    for (unsigned idCpu=0;idCpu<pVM->cCPUs;idCpu++)
     741    {
     742        PVMCPU pVCpu = &pVM->aCpus[idCpu];
     743
     744        /* APIC interrupt */
     745        RTUINT fInterruptPending = 0;
     746        rc = SSMR3GetUInt(pSSM, &fInterruptPending);
     747        if (RT_FAILURE(rc))
     748            return rc;
     749        if (fInterruptPending & ~1)
     750        {
     751            AssertMsgFailed(("fInterruptPending=%#x (APIC)\n", fInterruptPending));
     752            return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
     753        }
     754        AssertRelease(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC));
     755        if (fInterruptPending)
     756            VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
     757
     758        /* PIC interrupt */
     759        fInterruptPending = 0;
     760        rc = SSMR3GetUInt(pSSM, &fInterruptPending);
     761        if (RT_FAILURE(rc))
     762            return rc;
     763        if (fInterruptPending & ~1)
     764        {
     765            AssertMsgFailed(("fInterruptPending=%#x (PIC)\n", fInterruptPending));
     766            return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
     767        }
     768        AssertRelease(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC));
     769        if (fInterruptPending)
     770            VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC);
     771    }
    749772
    750773    /* DMA pending */
     
    903926     * Clear all pending interrupts and DMA operations.
    904927     */
    905     VM_FF_CLEAR(pVM, VM_FF_INTERRUPT_APIC);
    906     VM_FF_CLEAR(pVM, VM_FF_INTERRUPT_PIC);
     928    for (unsigned idCpu=0;idCpu<pVM->cCPUs;idCpu++)
     929    {
     930        PVMCPU pVCpu = &pVM->aCpus[idCpu];
     931        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC);
     932        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
     933    }
    907934    VM_FF_CLEAR(pVM, VM_FF_PDM_DMA);
    908935
  • trunk/src/VBox/VMM/PDMDevMiscHlp.cpp

    r18927 r19141  
    4848    PDMDEV_ASSERT_DEVINS(pDevIns);
    4949    PVM pVM = pDevIns->Internal.s.pVMR3;
     50    PVMCPU pVCpu = &pVM->aCpus[0];  /* for PIC we always deliver to CPU 0, MP use APIC */
     51
    5052    LogFlow(("pdmR3PicHlp_SetInterruptFF: caller='%s'/%d: VM_FF_INTERRUPT_PIC %d -> 1\n",
    51              pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, VMCPU_FF_ISSET(pVM, 0, VM_FF_INTERRUPT_PIC)));
    52 
    53     /* for PIC we always deliver to CPU 0, MP use APIC */
    54     VMCPU_FF_SET(pVM, 0, VM_FF_INTERRUPT_PIC);
    55     REMR3NotifyInterruptSet(pVM, VMMGetCpu(pVM));
     53             pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));
     54
     55    VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC);
     56    REMR3NotifyInterruptSet(pVM, pVCpu);
    5657    VMR3NotifyFF(pVM, true); /** @todo SMP: notify the right cpu. */
    5758}
     
    6263{
    6364    PDMDEV_ASSERT_DEVINS(pDevIns);
     65    PVM pVM = pDevIns->Internal.s.pVMR3;
     66    PVMCPU pVCpu = &pVM->aCpus[0];  /* for PIC we always deliver to CPU 0, MP use APIC */
     67
    6468    LogFlow(("pdmR3PicHlp_ClearInterruptFF: caller='%s'/%d: VM_FF_INTERRUPT_PIC %d -> 0\n",
    65              pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, VMCPU_FF_ISSET(pDevIns->Internal.s.pVMR3, 0, VM_FF_INTERRUPT_PIC)));
    66 
    67     /* for PIC we always deliver to CPU 0, MP use APIC */
    68     VMCPU_FF_CLEAR(pDevIns->Internal.s.pVMR3, 0, VM_FF_INTERRUPT_PIC);
    69     REMR3NotifyInterruptClear(pDevIns->Internal.s.pVMR3, VMMGetCpu(pDevIns->Internal.s.pVMR3));
     69             pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));
     70
     71    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
     72    REMR3NotifyInterruptClear(pVM, pVCpu);
    7073}
    7174
     
    146149    PDMDEV_ASSERT_DEVINS(pDevIns);
    147150    PVM pVM = pDevIns->Internal.s.pVMR3;
     151    PVMCPU pVCpu = &pVM->aCpus[idCpu];
     152
     153    AssertReturnVoid(idCpu < pVM->cCPUs);
     154
    148155    LogFlow(("pdmR3ApicHlp_SetInterruptFF: caller='%s'/%d: VM_FF_INTERRUPT(%d) %d -> 1\n",
    149              pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, idCpu, VMCPU_FF_ISSET(pVM, idCpu, VM_FF_INTERRUPT_APIC)));
    150 
    151     VMCPU_FF_SET(pVM, idCpu, VM_FF_INTERRUPT_APIC);
    152     REMR3NotifyInterruptSet(pVM, VMMGetCpu(pVM));
     156             pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, idCpu, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));
     157
     158    VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
     159    REMR3NotifyInterruptSet(pVM, pVCpu);
    153160    VMR3NotifyFF(pVM, true);  /** @todo SMP: notify the right cpu. */
    154161}
     
    159166{
    160167    PDMDEV_ASSERT_DEVINS(pDevIns);
     168    PVM pVM = pDevIns->Internal.s.pVMR3;
     169    PVMCPU pVCpu = &pVM->aCpus[idCpu];
     170
     171    AssertReturnVoid(idCpu < pVM->cCPUs);
     172
    161173    LogFlow(("pdmR3ApicHlp_ClearInterruptFF: caller='%s'/%d: VM_FF_INTERRUPT(%d) %d -> 0\n",
    162              pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, idCpu, VMCPU_FF_ISSET(pDevIns->Internal.s.pVMR3, idCpu, VM_FF_INTERRUPT_APIC)));
    163 
    164     VMCPU_FF_CLEAR(pDevIns->Internal.s.pVMR3, idCpu, VM_FF_INTERRUPT_APIC);
    165     REMR3NotifyInterruptClear(pDevIns->Internal.s.pVMR3, VMMGetCpu(pDevIns->Internal.s.pVMR3));
     174             pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, idCpu, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));
     175
     176    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC);
     177    REMR3NotifyInterruptClear(pVM, pVCpu);
    166178}
    167179
  • trunk/src/VBox/VMM/PGM.cpp

    r19077 r19141  
    611611/** Saved state data unit version for 2.5.x and later. */
    612612#define PGM_SAVED_STATE_VERSION                 9
     613/** Saved state data unit version for 2.2.2 and later. */
     614#define PGM_SAVED_STATE_VERSION_2_2_2           8
    613615/** Saved state data unit version for 2.2.0. */
    614616#define PGM_SAVED_STATE_VERSION_RR_DESC         7
     
    21732175         * Clear the FFs PGM owns.
    21742176         */
    2175         VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
    2176         VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
     2177        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     2178        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
    21772179    }
    21782180
     
    30093011     */
    30103012    if (    u32Version != PGM_SAVED_STATE_VERSION
     3013        &&  u32Version != PGM_SAVED_STATE_VERSION_2_2_2
    30113014        &&  u32Version != PGM_SAVED_STATE_VERSION_RR_DESC
    30123015        &&  u32Version != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
     
    30363039        {
    30373040            PVMCPU pVCpu = &pVM->aCpus[i];
    3038             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
    3039             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     3041            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
     3042            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    30403043
    30413044            pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
     
    38873890     * Always flag the necessary updates
    38883891     */
    3889     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     3892    VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    38903893
    38913894    /*
     
    40864089    pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID;
    40874090    int rc = PGMR3ChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu));
    4088     Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     4091    Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    40894092    AssertRCReturn(rc, rc);
    40904093    AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
     
    48494852static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
    48504853{
     4854    /** @todo SMP support */
     4855    PVMCPU pVCpu = &pVM->aCpus[0];
     4856
    48514857    /*
    48524858     * Validate input.
     
    48584864     * Force page directory sync.
    48594865     */
    4860     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     4866    VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    48614867
    48624868    int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Forcing page directory sync.\n");
     
    49334939    {
    49344940        ASMAtomicOrU32(&pVCpu->pgm.s.fSyncFlags, PGM_SYNC_ALWAYS);
    4935         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     4941        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    49364942        return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Enabled permanent forced page directory syncing.\n");
    49374943    }
  • trunk/src/VBox/VMM/PGMBth.h

    r18992 r19141  
    174174    {
    175175        Log(("Bth-Enter: PGM pool flushed -> signal sync cr3\n"));
    176         Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     176        Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    177177        return VINF_PGM_SYNC_CR3;
    178178    }
  • trunk/src/VBox/VMM/PGMHandler.cpp

    r18986 r19141  
    394394
    395395                pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
    396                 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     396                VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    397397            }
    398398            pVM->pgm.s.fGlobalSyncFlags |= PGM_GLOBAL_SYNC_CLEAR_PGM_POOL;
     
    480480
    481481            pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
    482             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     482            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    483483        }
    484484        pVM->pgm.s.fGlobalSyncFlags |= PGM_GLOBAL_SYNC_CLEAR_PGM_POOL;
  • trunk/src/VBox/VMM/PGMMap.cpp

    r18992 r19141  
    214214    }
    215215
    216     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     216    for (unsigned i=0;i<pVM->cCPUs;i++)
     217    {
     218        PVMCPU pVCpu = &pVM->aCpus[i];
     219        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     220    }
    217221    return VINF_SUCCESS;
    218222}
     
    267271            MMHyperFree(pVM, pCur);
    268272
    269             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     273            for (unsigned i=0;i<pVM->cCPUs;i++)
     274            {
     275                PVMCPU pVCpu = &pVM->aCpus[i];
     276                VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     277            }
    270278            return VINF_SUCCESS;
    271279        }
     
    649657        PVMCPU pVCpu = &pVM->aCpus[i];
    650658        pVCpu->pgm.s.fSyncFlags       &= ~PGM_SYNC_MONITOR_CR3;
    651         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     659        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    652660    }
    653661    return VINF_SUCCESS;
     
    684692
    685693        pVCpu->pgm.s.fSyncFlags       &= ~PGM_SYNC_MONITOR_CR3;
    686         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     694        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    687695    }
    688696    return VINF_SUCCESS;
     
    712720        PVMCPU pVCpu = &pVM->aCpus[i];
    713721
    714         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     722        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    715723    }
    716724    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/PGMPool.cpp

    r18927 r19141  
    128128int pgmR3PoolInit(PVM pVM)
    129129{
     130    AssertCompile(NIL_PGMPOOL_IDX == 0);
     131
    130132    /*
    131133     * Query Pool config.
  • trunk/src/VBox/VMM/SELM.cpp

    r18988 r19141  
    223223     * Default action when entering raw mode for the first time
    224224     */
    225     VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
    226     VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
    227     VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
     225    PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies on VCPU */
     226    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     227    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     228    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    228229
    229230    /*
     
    579580     * Default action when entering raw mode for the first time
    580581     */
    581     VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
    582     VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
    583     VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
     582    PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies on VCPU */
     583    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     584    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     585    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    584586}
    585587
     
    651653#endif
    652654
    653     VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
    654     VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
    655     VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
     655    PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies on VCPU */
     656    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     657    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     658    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    656659
    657660    pVM->selm.s.fDisableMonitoring = true;
     
    774777    if (PGMGetGuestMode(pVCpu) != PGMMODE_REAL)
    775778    {
    776         VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
    777         VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
    778         VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     779        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     780        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     781        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    779782        SELMR3UpdateFromCPUM(pVM, pVCpu);
    780783    }
     
    783786     * Flag everything for resync on next raw mode entry.
    784787     */
    785     VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
    786     VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
    787     VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     788    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     789    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     790    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    788791
    789792    return VINF_SUCCESS;
     
    804807    if (pVM->selm.s.fDisableMonitoring)
    805808    {
    806         VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
    807         VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
    808         VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
     809        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     810        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     811        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    809812
    810813        return VINF_SUCCESS;
     
    816819     * GDT sync
    817820     */
    818     if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_GDT))
     821    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
    819822    {
    820823        /*
    821824         * Always assume the best
    822825         */
    823         VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
     826        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    824827
    825828        /* If the GDT was changed, then make sure the LDT is checked too */
    826829        /** @todo only do this if the actual ldtr selector was changed; this is a bit excessive */
    827         VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
     830        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    828831        /* Same goes for the TSS selector */
    829         VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     832        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    830833
    831834        /*
     
    10741077     * TSS sync
    10751078     */
    1076     if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS))
     1079    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
    10771080    {
    10781081        SELMR3SyncTSS(pVM, pVCpu);
     
    10821085     * LDT sync
    10831086     */
    1084     if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_LDT))
     1087    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_LDT))
    10851088    {
    10861089        /*
    10871090         * Always assume the best
    10881091         */
    1089         VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
     1092        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    10901093
    10911094        /*
     
    13681371    Assert(enmAccessType == PGMACCESSTYPE_WRITE);
    13691372    Log(("selmR3GuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf));
    1370     VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
    1371 
     1373
     1374    VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_GDT);
    13721375    return VINF_PGM_HANDLER_DO_DEFAULT;
    13731376}
     
    13941397    Assert(enmAccessType == PGMACCESSTYPE_WRITE);
    13951398    Log(("selmR3GuestLDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf));
    1396     VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
     1399    VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_LDT);
    13971400    return VINF_PGM_HANDLER_DO_DEFAULT;
    13981401}
     
    14251428     *        changes while we're in REM. */
    14261429
    1427     VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     1430    VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_SELM_SYNC_TSS);
    14281431
    14291432    return VINF_PGM_HANDLER_DO_DEFAULT;
     
    14491452    if (pVM->selm.s.fDisableMonitoring)
    14501453    {
    1451         VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
     1454        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    14521455        return VINF_SUCCESS;
    14531456    }
    14541457
    14551458    STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a);
    1456     Assert(VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS));
     1459    Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS));
    14571460
    14581461    /*
     
    16321635    }
    16331636
    1634     VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
     1637    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    16351638
    16361639    STAM_PROFILE_STOP(&pVM->selm.s.StatTSSSync, a);
     
    17891792    PVMCPU pVCpu = VMMGetCpu(pVM);
    17901793
    1791     if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS))
     1794    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
    17921795        return true;
    17931796
  • trunk/src/VBox/VMM/TRPM.cpp

    r19015 r19141  
    564564     * Default action when entering raw mode for the first time
    565565     */
    566     VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
     566    PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies on VCPU */
     567    VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    567568    return 0;
    568569}
     
    753754     * Default action when entering raw mode for the first time
    754755     */
    755     VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
     756    PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies on VCPU */
     757    VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    756758}
    757759
     
    787789    }
    788790    SSMR3PutBool(pSSM,      pTrpm->fDisableMonitoring);
    789     SSMR3PutUInt(pSSM,      VM_FF_ISSET(pVM, VM_FF_TRPM_SYNC_IDT));
     791    PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies 1 VCPU */
     792    SSMR3PutUInt(pSSM,      VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT));
    790793    SSMR3PutMem(pSSM,       &pTrpm->au32IdtPatched[0], sizeof(pTrpm->au32IdtPatched));
    791794    SSMR3PutU32(pSSM, ~0);              /* separator. */
     
    886889    }
    887890    if (fSyncIDT)
    888         VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
     891    {
     892        PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies 1 VCPU */
     893        VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
     894    }
    889895    /* else: cleared by reset call above. */
    890896
     
    938944/**
    939945 * Check if gate handlers were updated
    940  * (callback for the VM_FF_TRPM_SYNC_IDT forced action).
     946 * (callback for the VMCPU_FF_TRPM_SYNC_IDT forced action).
    941947 *
    942948 * @returns VBox status code.
     
    952958    if (pVM->trpm.s.fDisableMonitoring)
    953959    {
    954         VM_FF_CLEAR(pVM, VM_FF_TRPM_SYNC_IDT);
     960        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    955961        return VINF_SUCCESS;    /* Nothing to do */
    956962    }
     
    10401046     * Clear the FF and we're done.
    10411047     */
    1042     VM_FF_CLEAR(pVM, VM_FF_TRPM_SYNC_IDT);
     1048    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    10431049    STAM_PROFILE_STOP(&pVM->trpm.s.StatSyncIDT, a);
    10441050    return VINF_SUCCESS;
     
    10781084#endif
    10791085
    1080     VM_FF_CLEAR(pVM, VM_FF_TRPM_SYNC_IDT);
     1086    PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies on VCPU */
     1087    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    10811088
    10821089    pVM->trpm.s.fDisableMonitoring = true;
     
    11041111    Assert(enmAccessType == PGMACCESSTYPE_WRITE);
    11051112    Log(("trpmR3GuestIDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf));
    1106     VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
     1113    VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TRPM_SYNC_IDT);
    11071114    return VINF_PGM_HANDLER_DO_DEFAULT;
    11081115}
     
    14321439    pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    14331440    Assert(!PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
    1434     Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
     1441    Assert(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    14351442
    14361443    /* Currently only useful for external hardware interrupts. */
     
    14471454
    14481455        uint8_t u8Interrupt;
    1449         rc = PDMGetInterrupt(pVM, &u8Interrupt);
     1456        rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    14501457        Log(("TRPMR3InjectEvent: u8Interrupt=%d (%#x) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
    14511458        if (RT_SUCCESS(rc))
     
    14751482                    if (rc == VINF_SUCCESS /* Don't use RT_SUCCESS */)
    14761483                    {
    1477                         Assert(!VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_TSS));
     1484                        Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
    14781485
    14791486                        STAM_COUNTER_INC(&pVM->trpm.s.paStatForwardedIRQR3[u8Interrupt]);
     
    14921499        {
    14931500            uint8_t u8Interrupt;
    1494             rc = PDMGetInterrupt(pVM, &u8Interrupt);
     1501            rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    14951502            Log(("TRPMR3InjectEvent: u8Interrupt=%d (%#x) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
    14961503            if (RT_SUCCESS(rc))
  • trunk/src/VBox/VMM/VM.cpp

    r19102 r19141  
    11721172    }
    11731173
    1174         /*
     1174    /*
    11751175     * Request the operation in EMT.
    11761176     */
     
    17061706     */
    17071707    ASMAtomicUoWriteBool(&pVM->pUVM->vm.s.fTerminateEMT, true);
    1708     ASMAtomicWriteU32(&pVM->fForcedActions, VM_FF_TERMINATE);
     1708    ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_TERMINATE);
    17091709    LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
    17101710    return VINF_EM_TERMINATE;
  • trunk/src/VBox/VMM/VMEmt.cpp

    r19032 r19141  
    344344 * @param   pUVM            Pointer to the user mode VM structure.
    345345 */
    346 static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVM pUVM, const uint32_t fMask, uint64_t /* u64Now*/)
     346static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
    347347{
    348348    /*
     
    363363        TMR3TimerQueuesDo(pVM);
    364364        STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
    365         if (VM_FF_ISPENDING(pVM, fMask))
     365        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
     366            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
    366367            break;
    367368        uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
    368         if (VM_FF_ISPENDING(pVM, fMask))
     369        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
     370            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
    369371            break;
    370372
     
    498500 * the lag has been eliminated.
    499501 */
    500 static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
     502static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t u64Now)
    501503{
    502504    PVM pVM = pUVM->pVM;
     
    557559        TMR3TimerQueuesDo(pVM);
    558560        STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
    559         if (VM_FF_ISPENDING(pVM, fMask))
     561        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
     562            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
    560563            break;
    561564
     
    564567         */
    565568        uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
    566         if (VM_FF_ISPENDING(pVM, fMask))
     569        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
     570            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
    567571            break;
    568572
     
    653657 * try take care of the global scheduling of EMT threads.
    654658 */
    655 static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
     659static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t u64Now)
    656660{
    657661    PVM pVM = pUVM->pVM;
     
    671675        TMR3TimerQueuesDo(pVM);
    672676        STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
    673         if (VM_FF_ISPENDING(pVM, fMask))
     677        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
     678            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
    674679            break;
    675680
     
    679684        uint64_t u64Delta;
    680685        uint64_t u64GipTime = TMTimerPollGIP(pVM, &u64Delta);
    681         if (VM_FF_ISPENDING(pVM, fMask))
     686        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
     687            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
    682688            break;
    683689
     
    688694        {
    689695            VMMR3YieldStop(pVM);
    690             if (VM_FF_ISPENDING(pVM, fMask))
    691                 break;
     696            if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
     697                ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
     698                    break;
    692699
    693700            //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
     
    734741    ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
    735742
    736     PVM pVM = pUVM->pVM;
     743    PVM    pVM   = pUVM->pVM;
     744    PVMCPU pVCpu = VMMGetCpu(pVM);
     745
    737746    int rc = VINF_SUCCESS;
    738747    for (;;)
     
    741750         * Check Relevant FFs.
    742751         */
    743         if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
     752        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
     753            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
    744754            break;
    745755
     
    804814            break;
    805815        if (    pUVM->pVM
    806             &&  VM_FF_ISPENDING(pUVM->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
     816            &&  (   VM_FF_ISPENDING(pUVM->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
     817                 || VMCPU_FF_ISPENDING(VMMGetCpu(pUVM->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
     818                )
     819            )
    807820            break;
    808821        if (pUVM->vm.s.fTerminateEMT)
     
    859872    ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
    860873
    861     PVM pVM = pUVM->pVM;
    862     int rc = VINF_SUCCESS;
     874    PVM    pVM   = pUVM->pVM;
     875    PVMCPU pVCpu = VMMGetCpu(pVM);
     876    int    rc    = VINF_SUCCESS;
    863877    for (;;)
    864878    {
     
    866880         * Check Relevant FFs.
    867881         */
    868         if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
     882        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
     883            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
    869884            break;
    870885
     
    923938    DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
    924939    /** The halt function. */
    925     DECLR3CALLBACKMEMBER(int,  pfnHalt,(PUVM pUVM, const uint32_t fMask, uint64_t u64Now));
     940    DECLR3CALLBACKMEMBER(int,  pfnHalt,(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t u64Now));
    926941    /** The wait function. */
    927942    DECLR3CALLBACKMEMBER(int,  pfnWait,(PUVM pUVM));
     
    980995 *          case an appropriate status code is returned.
    981996 * @param   pVM         VM handle.
     997 * @param   pVCpu       VMCPU handle.
    982998 * @param   fIgnoreInterrupts   If set the VM_FF_INTERRUPT flags is ignored.
    983999 * @thread  The emulation thread.
    9841000 */
    985 VMMR3DECL(int) VMR3WaitHalted(PVM pVM, bool fIgnoreInterrupts)
    986 {
    987     PVMCPU pVCpu = VMMGetCpu(pVM);  /* @todo SMP: get rid of this */
    988 
     1001VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
     1002{
    9891003    LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
    9901004
     
    9931007     */
    9941008    const uint32_t fMask = !fIgnoreInterrupts
    995         ? VM_FF_EXTERNAL_HALTED_MASK
    996         : VM_FF_EXTERNAL_HALTED_MASK & ~(VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC);
    997     if (VM_FF_ISPENDING(pVM, fMask))
    998     {
    999         LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
     1009        ? VMCPU_FF_EXTERNAL_HALTED_MASK
     1010        : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
     1011    if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
     1012        ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
     1013    {
     1014        LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
    10001015        return VINF_SUCCESS;
    10011016    }
     
    10341049     * Do the halt.
    10351050     */
    1036     int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVM, fMask, u64Now);
     1051    int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVM, pVCpu, fMask, u64Now);
    10371052
    10381053    /*
     
    10421057    VMMR3YieldResume(pVM);
    10431058
    1044     LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fForcedActions));
     1059    LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fGlobalForcedActions));
    10451060    return rc;
    10461061}
     
    10641079     * Check Relevant FFs.
    10651080     */
    1066     PVM pVM = pUVM->pVM;
     1081    PVM    pVM   = pUVM->pVM;
     1082
    10671083    if (    pVM
    1068         &&  VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
    1069     {
    1070         LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
     1084        &&  (   VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
     1085             || VMCPU_FF_ISPENDING(VMMGetCpu(pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
     1086            )
     1087        )
     1088    {
     1089        LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fGlobalForcedActions));
    10711090        return VINF_SUCCESS;
    10721091    }
     
    10771096     */
    10781097    int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVM);
    1079     LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pVM ? pVM->fForcedActions : 0));
     1098    LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pVM ? pVM->fGlobalForcedActions : 0));
    10801099    return rc;
    10811100}
  • trunk/src/VBox/VMM/VMM.cpp

    r18927 r19141  
    15181518static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
    15191519{
    1520     const uint32_t fForcedActions = pVM->fForcedActions;
    1521 
    1522     pHlp->pfnPrintf(pHlp, "Forced action Flags: %#RX32", fForcedActions);
     1520    const uint32_t fGlobalForcedActions = pVM->fGlobalForcedActions;
     1521
     1522    pHlp->pfnPrintf(pHlp, "Forced action Flags: %#RX32", fGlobalForcedActions);
     1523    for (unsigned i=0;i<pVM->cCPUs;i++)
     1524    {
     1525        PVMCPU pVCpu = &pVM->aCpus[i];
     1526
     1527        pHlp->pfnPrintf(pHlp, "CPU %d: Forced action Flags: %#RX32", pVCpu->fLocalForcedActions);
     1528    }
    15231529
    15241530    /* show the flag mnemonics  */
    15251531    int c = 0;
    1526     uint32_t f = fForcedActions;
     1532    uint32_t f = fGlobalForcedActions;
    15271533#define PRINT_FLAG(flag) do { \
    15281534        if (f & (flag)) \
     
    15371543        } \
    15381544    } while (0)
    1539     PRINT_FLAG(VM_FF_INTERRUPT_APIC);
    1540     PRINT_FLAG(VM_FF_INTERRUPT_PIC);
    15411545    PRINT_FLAG(VM_FF_TIMER);
    15421546    PRINT_FLAG(VM_FF_PDM_QUEUES);
     
    15471551    PRINT_FLAG(VM_FF_TERMINATE);
    15481552    PRINT_FLAG(VM_FF_RESET);
    1549     PRINT_FLAG(VM_FF_PGM_SYNC_CR3);
    1550     PRINT_FLAG(VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
    15511553    PRINT_FLAG(VM_FF_PGM_NEED_HANDY_PAGES);
    15521554    PRINT_FLAG(VM_FF_PGM_NO_MEMORY);
    1553     PRINT_FLAG(VM_FF_TRPM_SYNC_IDT);
    1554     PRINT_FLAG(VM_FF_SELM_SYNC_TSS);
    1555     PRINT_FLAG(VM_FF_SELM_SYNC_GDT);
    1556     PRINT_FLAG(VM_FF_SELM_SYNC_LDT);
    1557     PRINT_FLAG(VM_FF_INHIBIT_INTERRUPTS);
    1558     PRINT_FLAG(VM_FF_CSAM_SCAN_PAGE);
    1559     PRINT_FLAG(VM_FF_CSAM_PENDING_ACTION);
    1560     PRINT_FLAG(VM_FF_TO_R3);
    15611555    PRINT_FLAG(VM_FF_REM_HANDLER_NOTIFY);
    15621556    PRINT_FLAG(VM_FF_DEBUG_SUSPEND);
     1557    for (unsigned i=0;i<pVM->cCPUs;i++)
     1558    {
     1559        PVMCPU pVCpu = &pVM->aCpus[i];
     1560
     1561        f = pVCpu->fLocalForcedActions;
     1562#define PRINT_CPU_FLAG(flag) do { \
     1563        if (f & (flag)) \
     1564        { \
     1565            static const char *s_psz = #flag; \
     1566            if (!(c % 6)) \
     1567                pHlp->pfnPrintf(pHlp, "CPU %d: %s\n    %s", i, c ? "," : "", s_psz + 6); \
     1568            else \
     1569                pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
     1570            c++; \
     1571            f &= ~(flag); \
     1572        } \
     1573    } while (0)
     1574
     1575        PRINT_CPU_FLAG(VMCPU_FF_INTERRUPT_APIC);
     1576        PRINT_CPU_FLAG(VMCPU_FF_INTERRUPT_PIC);
     1577        PRINT_CPU_FLAG(VMCPU_FF_PGM_SYNC_CR3);
     1578        PRINT_CPU_FLAG(VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
     1579        PRINT_CPU_FLAG(VMCPU_FF_TRPM_SYNC_IDT);
     1580        PRINT_CPU_FLAG(VMCPU_FF_SELM_SYNC_TSS);
     1581        PRINT_CPU_FLAG(VMCPU_FF_SELM_SYNC_GDT);
     1582        PRINT_CPU_FLAG(VMCPU_FF_SELM_SYNC_LDT);
     1583        PRINT_CPU_FLAG(VMCPU_FF_INHIBIT_INTERRUPTS);
     1584        PRINT_CPU_FLAG(VMCPU_FF_CSAM_SCAN_PAGE);
     1585        PRINT_CPU_FLAG(VMCPU_FF_CSAM_PENDING_ACTION);
     1586        PRINT_CPU_FLAG(VMCPU_FF_TO_R3);
     1587    }
     1588
    15631589    if (f)
    15641590        pHlp->pfnPrintf(pHlp, "%s\n    Unknown bits: %#RX32\n", c ? "," : "", f);
     
    15661592        pHlp->pfnPrintf(pHlp, "\n");
    15671593#undef PRINT_FLAG
     1594#undef PRINT_CPU_FLAG
    15681595
    15691596    /* the groups */
    15701597    c = 0;
    15711598#define PRINT_GROUP(grp) do { \
    1572         if (fForcedActions & (grp)) \
     1599        if (fGlobalForcedActions & (grp)) \
    15731600        { \
    15741601            static const char *s_psz = #grp; \
     
    15871614    PRINT_GROUP(VM_FF_NORMAL_PRIORITY_POST_MASK);
    15881615    PRINT_GROUP(VM_FF_NORMAL_PRIORITY_MASK);
    1589     PRINT_GROUP(VM_FF_RESUME_GUEST_MASK);
    15901616    PRINT_GROUP(VM_FF_ALL_BUT_RAW_MASK);
    15911617    if (c)
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r19076 r19141  
    21472147        {
    21482148            Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val));
    2149             VM_FF_SET(pVM, VM_FF_TO_R3);
     2149            VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    21502150        }
    21512151# endif
    21522152        if ((val ^ oldval) & X86_CR4_VME)
    2153             VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     2153            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    21542154
    21552155        rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
     
    25232523
    25242524    pVCpu->em.s.GCPtrInhibitInterrupts = pRegFrame->eip + pDISState->opsize;
    2525     VM_FF_SET(pVM, VM_FF_INHIBIT_INTERRUPTS);
     2525    VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    25262526
    25272527    return VINF_SUCCESS;
     
    33073307 * Sets the PC for which interrupts should be inhibited.
    33083308 *
    3309  * @param   pVM         The VM handle.
    33103309 * @param   pVCpu       The VMCPU handle.
    33113310 * @param   PC          The PC.
    33123311 */
    3313 VMMDECL(void) EMSetInhibitInterruptsPC(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR PC)
     3312VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
    33143313{
    33153314    pVCpu->em.s.GCPtrInhibitInterrupts = PC;
    3316     VM_FF_SET(pVM, VM_FF_INHIBIT_INTERRUPTS);
     3315    VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    33173316}
    33183317
     
    33283327 *
    33293328 * @returns The PC for which interrupts should be inhibited.
    3330  * @param   pVM         VM handle.
    33313329 * @param   pVCpu       The VMCPU handle.
    33323330 *
    33333331 */
    3334 VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVM pVM, PVMCPU pVCpu)
     3332VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
    33353333{
    33363334    return pVCpu->em.s.GCPtrInhibitInterrupts;
  • trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp

    r18992 r19141  
    18681868
    18691869#ifdef VBOX_STRICT
    1870     if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
     1870    if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
    18711871    {
    18721872        uint32_t cb = pRange->cb;
  • trunk/src/VBox/VMM/VMMAll/PDMAll.cpp

    r13832 r19141  
    4040 *
    4141 * @returns VBox status code.
    42  * @param   pVM             VM handle.
     42 * @param   pVCpu           VMCPU handle.
    4343 * @param   pu8Interrupt    Where to store the interrupt on success.
    4444 */
    45 VMMDECL(int) PDMGetInterrupt(PVM pVM, uint8_t *pu8Interrupt)
    46 {
     45VMMDECL(int) PDMGetInterrupt(PVMCPU pVCpu, uint8_t *pu8Interrupt)
     46{
     47    PVM pVM = pVCpu->CTX_SUFF(pVM);
     48
    4749    pdmLock(pVM);
    4850
     
    5052     * The local APIC has a higer priority than the PIC.
    5153     */
    52     if (VM_FF_ISSET(pVM, VM_FF_INTERRUPT_APIC))
    53     {
    54         VM_FF_CLEAR(pVM, VM_FF_INTERRUPT_APIC);
     54    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC))
     55    {
     56        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC);
    5557        Assert(pVM->pdm.s.Apic.CTX_SUFF(pDevIns));
    5658        Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt));
     
    6870     * Check the PIC.
    6971     */
    70     if (VM_FF_ISSET(pVM, VM_FF_INTERRUPT_PIC))
    71     {
    72         VM_FF_CLEAR(pVM, VM_FF_INTERRUPT_PIC);
     72    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC))
     73    {
     74        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
    7375        Assert(pVM->pdm.s.Pic.CTX_SUFF(pDevIns));
    7476        Assert(pVM->pdm.s.Pic.CTX_SUFF(pfnGetInterrupt));
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r18532 r19141  
    203203    pVM->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
    204204    VM_FF_SET(pVM, VM_FF_PDM_CRITSECT);
    205     VM_FF_SET(pVM, VM_FF_TO_R3);
     205    VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    206206    STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
    207207    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
  • trunk/src/VBox/VMM/VMMAll/PDMAllQueue.cpp

    r13832 r19141  
    123123    else */
    124124    {
    125         VM_FF_SET(pVM, VM_FF_TO_R3);
    126         Log2(("PDMQueueInsertEx: Setting VM_FF_TO_R3\n"));
     125        VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_TO_R3);
     126        Log2(("PDMQueueInsertEx: Setting VMCPU_FF_TO_R3\n"));
    127127    }
    128128#endif
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r18992 r19141  
    710710        {
    711711            LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
    712             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     712            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    713713            STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
    714714            return VINF_PGM_SYNC_CR3;
     
    16111611     */
    16121612    /** @todo optimize this, it shouldn't always be necessary. */
    1613     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
     1613    VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
    16141614    if (fGlobal)
    1615         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     1615        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    16161616    LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
    16171617
     
    16511651        {
    16521652            AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
    1653             Assert(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_PGM_SYNC_CR3));
     1653            Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
    16541654            pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
    16551655            pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
     
    17711771    {
    17721772        Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
    1773         VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
    1774         VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
     1773        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     1774        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
    17751775        return VINF_SUCCESS;
    17761776    }
     
    17801780        fGlobal = true;
    17811781    LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
    1782              VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
     1782             VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
    17831783
    17841784#ifdef PGMPOOL_WITH_MONITORING
     
    18471847        if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
    18481848        {
    1849             VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
    1850             VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
     1849            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     1850            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
    18511851        }
    18521852
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r19015 r19141  
    265265        }
    266266        Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
    267         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
     267        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
    268268        STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
    269269        return VINF_PGM_SYNC_CR3;
     
    299299                            STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eConflicts);
    300300                            Log(("Trap0e: Detected Conflict %RGv-%RGv\n", pMapping->GCPtr, pMapping->GCPtrLast));
    301                             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync,right? */
     301                            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync,right? */
    302302                            STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeMapping, a);
    303303                            return VINF_PGM_SYNC_CR3;
     
    973973        AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
    974974        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
    975         if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
     975        if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
    976976            PGM_INVL_GUEST_TLBS();
    977977        return VINF_SUCCESS;
     
    985985    {
    986986        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePageSkipped));
    987         if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
     987        if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
    988988            PGM_INVL_GUEST_TLBS();
    989989        return VINF_SUCCESS;
     
    10421042     */
    10431043#  ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
    1044     if (    VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)
    1045         || (   VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
     1044    if (    VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
     1045        || (   VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
    10461046            && fIsBigPage
    10471047            && PdeSrc.b.u1Global
     
    25312531        else if (rc == VERR_PGM_POOL_FLUSHED)
    25322532        {
    2533             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     2533            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    25342534# if defined(IN_RC)
    25352535            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
     
    32763276    PVM pVM = pVCpu->CTX_SUFF(pVM);
    32773277
    3278     if (VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
     3278    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
    32793279        fGlobal = true; /* Change this CR3 reload to be a global one. */
    32803280
     
    42984298     */
    42994299#   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    4300     Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     4300    Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    43014301#   endif
    43024302    rc = pgmMapActivateCR3(pVM, pNewShwPageCR3);
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r19092 r19141  
    291291                REMR3NotifyFF(pVM);
    292292#else
    293                 VM_FF_SET(pVM, VM_FF_TO_R3); /* paranoia */
     293                VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
    294294#endif
    295295            }
     
    307307            {
    308308                Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
    309                 VM_FF_SET(pVM, VM_FF_TO_R3);
     309                VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
    310310            }
    311311#endif
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r19024 r19141  
    329329                        {
    330330                            Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    331                             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     331                            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    332332                            LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw=%#x!\n", iShwPdpt, iShw+i));
    333333                            break;
     
    357357                                {
    358358                                    Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    359                                     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     359                                    VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    360360                                    LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw2=%#x!\n", iShwPdpt, iShw2));
    361361                                    break;
     
    437437                {
    438438                    Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    439                     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     439                    VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    440440                    STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    441441                    LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
     
    471471                            Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    472472                            STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    473                             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     473                            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    474474                            LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
    475475                            break;
     
    494494#if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */
    495495                if (    uShw.pPD->a[iShw].n.u1Present
    496                     &&  !VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
     496                    &&  !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
    497497                {
    498498                    LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX32 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u));
     
    515515                {
    516516                    Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    517                     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     517                    VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    518518                    STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    519519                    LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
     
    553553                    {
    554554                        Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    555                         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     555                        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    556556                        STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    557557                        LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
     
    594594                        Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    595595                        STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    596                         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     596                        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    597597                        LogFlow(("pgmPoolMonitorChainChanging: Detected pdpt conflict at iShw=%#x!\n", iShw));
    598598                        break;
     
    626626                                Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    627627                                STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    628                                 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     628                                VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    629629                                LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
    630630                                break;
     
    692692                 * - messing with the bits of pd pointers without changing the physical address
    693693                 */
    694                 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
     694                if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
    695695                {
    696696                    uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     
    725725                 * - messing with the bits of pd pointers without changing the physical address
    726726                 */
    727                 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
     727                if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
    728728                {
    729729                    uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     
    17221722         * the heap size should suffice. */
    17231723        AssertFatalRC(rc);
    1724         Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_GLOBAL_SYNC_CLEAR_PGM_POOL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     1724        Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_GLOBAL_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_ISSET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3));
    17251725    }
    17261726    pPage->fMonitored = true;
     
    18191819        rc = PGMHandlerPhysicalDeregister(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1));
    18201820        AssertFatalRC(rc);
    1821         AssertMsg(!(pVM->pgm.s.fGlobalSyncFlags & PGM_GLOBAL_SYNC_CLEAR_PGM_POOL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3),
    1822                   ("%#x %#x\n", pVM->pgm.s.fGlobalSyncFlags, pVM->fForcedActions));
     1821        AssertMsg(!(pVM->pgm.s.fGlobalSyncFlags & PGM_GLOBAL_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_ISSET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3),
     1822                  ("%#x %#x\n", pVM->pgm.s.fGlobalSyncFlags, pVM->fGlobalForcedActions));
    18231823    }
    18241824    pPage->fMonitored = false;
     
    20812081# else  /* !IN_RING3 */
    20822082        LogFlow(("SyncCR3: PGM_GLOBAL_SYNC_CLEAR_PGM_POOL is set -> VINF_PGM_SYNC_CR3\n"));
    2083         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
     2083        VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
    20842084        return VINF_PGM_SYNC_CR3;
    20852085# endif /* !IN_RING3 */
     
    27132713    if (rc == VINF_PGM_GCPHYS_ALIASED)
    27142714    {
     2715        Assert(pVM->cCPUs == 1);    /* @todo check */
    27152716        pVM->pgm.s.fGlobalSyncFlags |= PGM_GLOBAL_SYNC_CLEAR_PGM_POOL;
    2716         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     2717        for (unsigned i=0;i<pVM->cCPUs;i++)
     2718        {
     2719            PVMCPU pVCpu = &pVM->aCpus[i];
     2720            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     2721        }
    27172722        rc = VINF_PGM_SYNC_CR3;
    27182723    }
     
    37713776#endif /* PGMPOOL_WITH_USER_TRACKING */
    37723777#ifdef IN_RING3
    3773 
    3774 /**
    3775  * Flushes all the special root pages as part of a pgmPoolFlushAllInt operation.
    3776  *
    3777  * @param   pPool       The pool.
    3778  */
    3779 static void pgmPoolFlushAllSpecialRoots(PPGMPOOL pPool)
    3780 {
    3781     /*
    3782      * These special pages are all mapped into the indexes 1..PGMPOOL_IDX_FIRST.
    3783      */
    3784     Assert(NIL_PGMPOOL_IDX == 0);
    3785 
    3786     /*
    3787      * Paranoia (to be removed), flag a global CR3 sync.
    3788      */
    3789     VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
    3790 }
    3791 
    3792 
    37933778/**
    37943779 * Flushes the entire cache.
     
    38233808    /** @todo Need to synchronize this across all VCPUs! */
    38243809    Assert(pVM->cCPUs == 1);
    3825     PVMCPU pVCpu = &pVM->aCpus[0];  /* to get it compiled... */
    3826     pgmR3ExitShadowModeBeforePoolFlush(pVM, pVCpu);
     3810    for (unsigned i=0;i<pVM->cCPUs;i++)
     3811    {
     3812        PVMCPU pVCpu = &pVM->aCpus[i];
     3813        pgmR3ExitShadowModeBeforePoolFlush(pVM, pVCpu);
     3814    }
    38273815
    38283816    /*
     
    39293917
    39303918    /*
    3931      * Flush all the special root pages.
    39323919     * Reinsert active pages into the hash and ensure monitoring chains are correct.
    39333920     */
    3934     pgmPoolFlushAllSpecialRoots(pPool);
    39353921    for (unsigned i = PGMPOOL_IDX_FIRST_SPECIAL; i < PGMPOOL_IDX_FIRST; i++)
    39363922    {
     
    39663952    }
    39673953
    3968     /*
    3969      * Re-enter the shadowing mode and assert Sync CR3 FF.
    3970      */
    3971     pgmR3ReEnterShadowModeAfterPoolFlush(pVM, pVCpu);
    3972     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     3954    for (unsigned i=0;i<pVM->cCPUs;i++)
     3955    {
     3956        PVMCPU pVCpu = &pVM->aCpus[i];
     3957        /*
     3958         * Re-enter the shadowing mode and assert Sync CR3 FF.
     3959         */
     3960        pgmR3ReEnterShadowModeAfterPoolFlush(pVM, pVCpu);
     3961        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
     3962    }
    39733963
    39743964    STAM_PROFILE_STOP(&pPool->StatFlushAllInt, a);
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r19032 r19141  
    9696    uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData));
    9797    if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
    98         VM_FF_SET(pVM, VM_FF_TO_R3); /* S10 hack */
     98        VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
    9999    return u64;
    100100# endif /* !IN_RING3 */
  • trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp

    r19015 r19141  
    410410    eflags.u32 = CPUMRawGetEFlags(pVCpu, pRegFrame);
    411411
    412     /* VM_FF_INHIBIT_INTERRUPTS should be cleared upfront or don't call this function at all for dispatching hardware interrupts. */
    413     Assert(enmType != TRPM_HARDWARE_INT || !VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
     412    /* VMCPU_FF_INHIBIT_INTERRUPTS should be cleared upfront or don't call this function at all for dispatching hardware interrupts. */
     413    Assert(enmType != TRPM_HARDWARE_INT || !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    414414
    415415    /*
     
    434434
    435435        Assert(PATMAreInterruptsEnabledByCtxCore(pVM, pRegFrame));
    436         Assert(!VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_TSS));
     436        Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
    437437
    438438        /* Get the current privilege level. */
  • trunk/src/VBox/VMM/VMMGC/PDMGCDevice.cpp

    r19041 r19141  
    369369{
    370370    PDMDEV_ASSERT_DEVINS(pDevIns);
    371     LogFlow(("pdmRCPicHlp_SetInterruptFF: caller=%p/%d: VM_FF_INTERRUPT_PIC %d -> 1\n",
    372              pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pDevIns->Internal.s.pVMRC, 0, VM_FF_INTERRUPT_PIC)));
    373     /* for PIC we always deliver to CPU 0, MP use APIC */
    374     VMCPU_FF_SET(pDevIns->Internal.s.pVMRC, 0, VM_FF_INTERRUPT_PIC);
     371    PVM pVM = pDevIns->Internal.s.pVMRC;
     372    PVMCPU pVCpu = &pVM->aCpus[0];  /* for PIC we always deliver to CPU 0, MP use APIC */
     373
     374    LogFlow(("pdmRCPicHlp_SetInterruptFF: caller=%p/%d: VMMCPU_FF_INTERRUPT_PIC %d -> 1\n",
     375             pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));
     376
     377    VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC);
    375378}
    376379
     
    380383{
    381384    PDMDEV_ASSERT_DEVINS(pDevIns);
    382     LogFlow(("pdmRCPicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT_PIC %d -> 0\n",
    383              pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pDevIns->Internal.s.pVMRC, 0, VM_FF_INTERRUPT_PIC)));
    384     /* for PIC we always deliver to CPU 0, MP use APIC */
    385     VMCPU_FF_CLEAR(pDevIns->Internal.s.pVMRC, 0, VM_FF_INTERRUPT_PIC);
     385    PVM pVM = pDevIns->Internal.s.CTX_SUFF(pVM);
     386    PVMCPU pVCpu = &pVM->aCpus[0];  /* for PIC we always deliver to CPU 0, MP use APIC */
     387
     388    LogFlow(("pdmRCPicHlp_ClearInterruptFF: caller=%p/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n",
     389             pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));
     390
     391    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
    386392}
    387393
     
    409415{
    410416    PDMDEV_ASSERT_DEVINS(pDevIns);
     417    PVM pVM = pDevIns->Internal.s.pVMRC;
     418    PVMCPU pVCpu = &pVM->aCpus[idCpu];
     419
     420    AssertReturnVoid(idCpu < pVM->cCPUs);
     421
    411422    LogFlow(("pdmRCApicHlp_SetInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 1\n",
    412              pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pDevIns->Internal.s.pVMRC, idCpu, VM_FF_INTERRUPT_APIC)));
    413     VMCPU_FF_SET(pDevIns->Internal.s.pVMRC, idCpu, VM_FF_INTERRUPT_APIC);
     423             pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));
     424    VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
    414425}
    415426
     
    419430{
    420431    PDMDEV_ASSERT_DEVINS(pDevIns);
     432    PVM pVM = pDevIns->Internal.s.pVMRC;
     433    PVMCPU pVCpu = &pVM->aCpus[idCpu];
     434
     435    AssertReturnVoid(idCpu < pVM->cCPUs);
     436
    421437    LogFlow(("pdmRCApicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 0\n",
    422              pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pDevIns->Internal.s.pVMRC, idCpu, VM_FF_INTERRUPT_APIC)));
    423     VMCPU_FF_CLEAR(pDevIns->Internal.s.pVMRC, idCpu, VM_FF_INTERRUPT_APIC);
     438             pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));
     439    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC);
    424440}
    425441
  • trunk/src/VBox/VMM/VMMGC/SELMGC.cpp

    r18992 r19141  
    139139    if (Sel == CPUMGetGuestLDTR(pVCpu)) /** @todo this isn't correct in two(+) ways! 1. It shouldn't be done until the LDTR is reloaded. 2. It caused the next instruction to be emulated.  */
    140140    {
    141         VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
     141        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    142142        return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
    143143    }
     
    229229    {
    230230        /* Not necessary when we need to go back to the host context to sync the LDT or TSS. */
    231         VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
     231        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    232232    }
    233233    STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
     
    253253    ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
    254254
    255     VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
     255    VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_SELM_SYNC_LDT);
    256256    STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
    257257    return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
     
    368368                {
    369369                    Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
    370                     VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
    371                     VM_FF_SET(pVM, VM_FF_TO_R3);
     370                    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     371                    VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    372372                }
    373373                else
     
    401401        if (rc != VINF_SUCCESS)
    402402        {
    403             VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
    404             VM_FF_SET(pVM, VM_FF_TO_R3);
     403            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     404            VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    405405            if (RT_SUCCESS(rc))
    406406                rc = VINF_SUCCESS;
     
    412412    {
    413413        Assert(RT_FAILURE(rc));
    414         VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     414        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    415415        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
    416416        if (rc == VERR_EM_INTERPRETER)
  • trunk/src/VBox/VMM/VMMGC/TRPMGC.cpp

    r19015 r19141  
    150150
    151151    /** @todo Check which IDT entry and keep the update cost low in TRPMR3SyncIDT() and CSAMCheckGates(). */
    152     VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
     152    VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    153153
    154154    STAM_COUNTER_INC(&pVM->trpm.s.StatRCWriteGuestIDTFault);
  • trunk/src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp

    r19032 r19141  
    166166
    167167    /* Clear pending inhibit interrupt state if required. (necessary for dispatching interrupts later on) */
    168     if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
    169     {
    170         Log2(("VM_FF_INHIBIT_INTERRUPTS at %08RX32 successor %RGv\n", pRegFrame->eip, EMGetInhibitInterruptsPC(pVM, pVCpu)));
    171         if (pRegFrame->eip != EMGetInhibitInterruptsPC(pVM, pVCpu))
     168    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     169    {
     170        Log2(("VM_FF_INHIBIT_INTERRUPTS at %08RX32 successor %RGv\n", pRegFrame->eip, EMGetInhibitInterruptsPC(pVCpu)));
     171        if (pRegFrame->eip != EMGetInhibitInterruptsPC(pVCpu))
    172172        {
    173173            /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
     
    176176             *  break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
    177177             */
    178             VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
     178            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    179179        }
    180180    }
     
    185185     */
    186186    if (    rc == VINF_SUCCESS
    187         &&  VM_FF_ISPENDING(pVM, VM_FF_TO_R3 | VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_REQUEST
    188                                | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_PGM_NO_MEMORY))
     187        &&  (   VM_FF_ISPENDING(pVM, VM_FF_TIMER | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY)
     188             || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
     189            )
     190       )
    189191    {
    190192        /* The out of memory condition naturally outrang the others. */
     
    192194            rc = VINF_EM_NO_MEMORY;
    193195        /* Pending Ring-3 action. */
    194         else if (VM_FF_ISPENDING(pVM, VM_FF_TO_R3))
    195         {
    196             VM_FF_CLEAR(pVM, VM_FF_TO_R3);
     196        else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3))
     197        {
     198            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    197199            rc = VINF_EM_RAW_TO_R3;
    198200        }
     
    201203            rc = VINF_EM_RAW_TIMER_PENDING;
    202204        /* Pending interrupt: dispatch it. */
    203         else if (    VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC)
    204                  && !VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS)
     205        else if (    VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
     206                 && !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    205207                 &&  PATMAreInterruptsEnabledByCtxCore(pVM, pRegFrame)
    206208           )
    207209        {
    208210            uint8_t u8Interrupt;
    209             rc = PDMGetInterrupt(pVM, &u8Interrupt);
     211            rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    210212            Log(("trpmGCExitTrap: u8Interrupt=%d (%#x) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
    211213            AssertFatalMsgRC(rc, ("PDMGetInterrupt failed with %Rrc\n", rc));
     
    227229         * Try sync CR3?
    228230         */
    229         else if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
     231        else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
    230232#if 1
    231             rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     233            rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    232234#else
    233235            rc = VINF_PGM_SYNC_CR3;
    234236#endif
    235237        /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
    236         else if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST))
     238        else if (   VM_FF_ISPENDING(pVM, VM_FF_REQUEST)
     239                 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
    237240            rc = VINF_EM_PENDING_REQUEST;
    238241    }
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp

    r19123 r19141  
    10581058#endif
    10591059
    1060     Assert(!VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL));
     1060    Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
    10611061    Assert(HWACCMR0Globals.aCpuInfo[idCpu].fConfigured);
    10621062    AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r19123 r19141  
    375375    {
    376376        Log(("INJ-EI: %x at %RGv\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip));
    377         Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
     377        Assert(!VMCPU_FF_ISSET(VMMGetCpu(pVM), VMCPU_FF_INHIBIT_INTERRUPTS));
    378378        Assert(pCtx->eflags.u32 & X86_EFL_IF);
    379379    }
     
    428428    /* When external interrupts are pending, we should exit the VM when IF is set. */
    429429    if (    !TRPMHasTrap(pVCpu)
    430         &&  VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
     430        &&  VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
    431431    {
    432432        if (    !(pCtx->eflags.u32 & X86_EFL_IF)
    433             ||  VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
     433            ||  VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    434434        {
    435435            if (!pVMCB->ctrl.IntCtrl.n.u1VIrqValid)
    436436            {
    437                 if (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
     437                if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    438438                    LogFlow(("Enable irq window exit!\n"));
    439439                else
     
    450450            uint8_t u8Interrupt;
    451451
    452             rc = PDMGetInterrupt(pVM, &u8Interrupt);
     452            rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    453453            Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
    454454            if (RT_SUCCESS(rc))
     
    460460            {
    461461                /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
    462                 Assert(!VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)));
     462                Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
    463463                STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchGuestIrq);
    464464                /* Just continue */
     
    477477
    478478    if (    pCtx->eflags.u32 & X86_EFL_IF
    479         && (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
     479        && (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    480480        && TRPMHasTrap(pVCpu)
    481481       )
     
    678678        {
    679679            pVMCB->guest.u64CR3             = PGMGetHyperCR3(pVCpu);
    680             Assert(pVMCB->guest.u64CR3 || VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL));
     680            Assert(pVMCB->guest.u64CR3 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
    681681        }
    682682    }
     
    867867
    868868    /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
    869     if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
    870     {
    871         Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVM, pVCpu)));
    872         if (pCtx->rip != EMGetInhibitInterruptsPC(pVM, pVCpu))
     869    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     870    {
     871        Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
     872        if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
    873873        {
    874874            /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
     
    877877             * break the guest. Sounds very unlikely, but such timing sensitive problems are not as rare as you might think.
    878878             */
    879             VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
     879            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    880880            /* Irq inhibition is no longer active; clear the corresponding SVM state. */
    881881            pVMCB->ctrl.u64IntShadow = 0;
     
    894894#endif
    895895    {
    896         if (VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK))
    897         {
    898             VM_FF_CLEAR(pVM, VM_FF_TO_R3);
     896        if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK)
     897            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK))
     898        {
     899            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    899900            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchToR3);
    900901            STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
     
    12451246    {
    12461247        Log(("uInterruptState %x rip=%RGv\n", pVMCB->ctrl.u64IntShadow, (RTGCPTR)pCtx->rip));
    1247         EMSetInhibitInterruptsPC(pVM, pVCpu, pCtx->rip);
     1248        EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
    12481249    }
    12491250    else
    1250         VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
     1251        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    12511252
    12521253    Log2(("exitCode = %x\n", exitCode));
     
    17191720        /* Check if a sync operation is pending. */
    17201721        if (    rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
    1721             &&  VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
    1722         {
    1723             rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     1722            &&  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
     1723        {
     1724            rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    17241725            AssertRC(rc);
    17251726
     
    20102011        pCtx->rip++;    /* skip hlt */
    20112012        if (    pCtx->eflags.Bits.u1IF
    2012             &&  VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
     2013            &&  VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
    20132014            goto ResumeExecution;
    20142015
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r19123 r19141  
    608608    {
    609609        LogFlow(("INJ-EI: %x at %RGv\n", iGate, (RTGCPTR)pCtx->rip));
    610         Assert(VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW || !VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
     610        Assert(VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW || !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    611611        Assert(VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW || pCtx->eflags.u32 & X86_EFL_IF);
    612612    }
     
    737737    /* When external interrupts are pending, we should exit the VM when IF is set. */
    738738    if (    !TRPMHasTrap(pVCpu)
    739         &&  VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
     739        &&  VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
    740740    {
    741741        if (!(pCtx->eflags.u32 & X86_EFL_IF))
     
    751751        }
    752752        else
    753         if (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
     753        if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    754754        {
    755755            uint8_t u8Interrupt;
    756756
    757             rc = PDMGetInterrupt(pVM, &u8Interrupt);
     757            rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    758758            Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc cs:rip=%04X:%RGv\n", u8Interrupt, u8Interrupt, rc, pCtx->cs, (RTGCPTR)pCtx->rip));
    759759            if (RT_SUCCESS(rc))
     
    765765            {
    766766                /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
    767                 Assert(!VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)));
     767                Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
    768768                STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchGuestIrq);
    769769                /* Just continue */
     
    784784
    785785    if (    pCtx->eflags.u32 & X86_EFL_IF
    786         && (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
     786        && (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    787787        && TRPMHasTrap(pVCpu)
    788788       )
     
    15261526        {
    15271527            val = PGMGetHyperCR3(pVCpu);
    1528             Assert(val || VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL));
     1528            Assert(val || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
    15291529        }
    15301530
     
    16931693        Assert(uInterruptState <= 2);    /* only sti & mov ss */
    16941694        Log(("uInterruptState %x eip=%RGv\n", uInterruptState, pCtx->rip));
    1695         EMSetInhibitInterruptsPC(pVM, pVCpu, pCtx->rip);
     1695        EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
    16961696    }
    16971697    else
    1698         VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
     1698        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    16991699
    17001700    /* Control registers. */
     
    20352035
    20362036    /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
    2037     if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
    2038     {
    2039         Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVM, pVCpu)));
    2040         if (pCtx->rip != EMGetInhibitInterruptsPC(pVM, pVCpu))
     2037    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     2038    {
     2039        Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
     2040        if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
    20412041        {
    20422042            /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
     
    20452045             * break the guest. Sounds very unlikely, but such timing sensitive problems are not as rare as you might think.
    20462046             */
    2047             VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
     2047            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    20482048            /* Irq inhibition is no longer active; clear the corresponding VMX state. */
    20492049            rc = VMXWriteVMCS(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE,   0);
     
    20592059
    20602060    /* Check for pending actions that force us to go back to ring 3. */
    2061     if (VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK))
    2062     {
    2063         VM_FF_CLEAR(pVM, VM_FF_TO_R3);
     2061    if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK)
     2062        ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK))
     2063    {
     2064        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    20642065        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchToR3);
    20652066        rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
     
    28902891    case VMX_EXIT_IRQ_WINDOW:           /* 7 Interrupt window. */
    28912892        /* Clear VM-exit on IF=1 change. */
    2892         LogFlow(("VMX_EXIT_IRQ_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip, VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF));
     2893        LogFlow(("VMX_EXIT_IRQ_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip, VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF));
    28932894        pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
    28942895        rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     
    30283029            /* Check if a sync operation is pending. */
    30293030            if (    rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
    3030                 &&  VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
     3031                &&  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
    30313032            {
    3032                 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     3033                rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    30333034                AssertRC(rc);
    30343035            }
     
    33463347        pCtx->rip++;    /* skip hlt */
    33473348        if (    pCtx->eflags.Bits.u1IF
    3348             &&  VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
     3349            &&  VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
    33493350            goto ResumeExecution;
    33503351
  • trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp

    r18927 r19141  
    384384{
    385385    PDMDEV_ASSERT_DEVINS(pDevIns);
    386     LogFlow(("pdmR0PicHlp_SetInterruptFF: caller=%p/%d: VM_FF_INTERRUPT_PIC %d -> 1\n",
    387              pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pDevIns->Internal.s.pVMR0, 0, VM_FF_INTERRUPT_PIC)));
    388     /* for PIC we always deliver to CPU 0, MP use APIC */
    389     VMCPU_FF_SET(pDevIns->Internal.s.pVMR0, 0, VM_FF_INTERRUPT_PIC);
     386    PVM    pVM   = pDevIns->Internal.s.pVMR0;
     387    PVMCPU pVCpu = &pVM->aCpus[0];      /* for PIC we always deliver to CPU 0, MP use APIC */
     388
     389    LogFlow(("pdmR0PicHlp_SetInterruptFF: caller=%p/%d: VMCPU_FF_INTERRUPT_PIC %d -> 1\n",
     390             pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));
     391   
     392    VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC);
    390393}
    391394
     
    395398{
    396399    PDMDEV_ASSERT_DEVINS(pDevIns);
    397     LogFlow(("pdmR0PicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT_PIC %d -> 0\n",
    398              pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pDevIns->Internal.s.pVMR0, 0, VM_FF_INTERRUPT_PIC)));
    399     /* for PIC we always deliver to CPU 0, MP use APIC */
    400     VMCPU_FF_CLEAR(pDevIns->Internal.s.pVMR0, 0, VM_FF_INTERRUPT_PIC);
     400    PVM    pVM   = pDevIns->Internal.s.pVMR0;
     401    PVMCPU pVCpu = &pVM->aCpus[0];      /* for PIC we always deliver to CPU 0, MP use APIC */
     402
     403    LogFlow(("pdmR0PicHlp_ClearInterruptFF: caller=%p/%d: VMCPU_FF_INTERRUPT_PIC %d -> 0\n",
     404             pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_PIC)));
     405
     406    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
    401407}
    402408
     
    423429{
    424430    PDMDEV_ASSERT_DEVINS(pDevIns);
     431    PVM    pVM   = pDevIns->Internal.s.pVMR0;
     432    PVMCPU pVCpu = &pVM->aCpus[idCpu];
     433
     434    AssertReturnVoid(idCpu < pVM->cCPUs);
    425435
    426436    LogFlow(("pdmR0ApicHlp_SetInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 1\n",
    427              pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pDevIns->Internal.s.pVMR0, idCpu, VM_FF_INTERRUPT_APIC)));
    428     VMCPU_FF_SET(pDevIns->Internal.s.pVMR0, idCpu, VM_FF_INTERRUPT_APIC);
     437             pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));
     438    VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
    429439}
    430440
     
    434444{
    435445    PDMDEV_ASSERT_DEVINS(pDevIns);
     446    PVM    pVM   = pDevIns->Internal.s.pVMR0;
     447    PVMCPU pVCpu = &pVM->aCpus[idCpu];
     448
     449    AssertReturnVoid(idCpu < pVM->cCPUs);
    436450
    437451    LogFlow(("pdmR0ApicHlp_ClearInterruptFF: caller=%p/%d: VM_FF_INTERRUPT %d -> 0\n",
    438              pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pDevIns->Internal.s.pVMR0, idCpu, VM_FF_INTERRUPT_APIC)));
    439     VMCPU_FF_CLEAR(pDevIns->Internal.s.pVMR0, idCpu, VM_FF_INTERRUPT_APIC);
     452             pDevIns, pDevIns->iInstance, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INTERRUPT_APIC)));
     453    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC);
    440454}
    441455
  • trunk/src/VBox/VMM/VMMTests.cpp

    r19015 r19141  
    391391         * These forced actions are not necessary for the test and trigger breakpoints too.
    392392         */
    393         VM_FF_CLEAR(pVM, VM_FF_TRPM_SYNC_IDT);
    394         VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
     393        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
     394        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    395395
    396396        /*
     
    485485     * These forced actions are not necessary for the test and trigger breakpoints too.
    486486     */
    487     VM_FF_CLEAR(pVM, VM_FF_TRPM_SYNC_IDT);
    488     VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
     487    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
     488    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    489489
    490490    /* Enable mapping of the hypervisor into the shadow page table. */
     
    504504    PGMSyncCR3(pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true);
    505505
    506     VM_FF_CLEAR(pVM, VM_FF_TO_R3);
     506    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    507507    VM_FF_CLEAR(pVM, VM_FF_TIMER);
    508508    VM_FF_CLEAR(pVM, VM_FF_REQUEST);
     
    555555            pGuestCtx->cr3 = CR3Phys;
    556556
    557             VM_FF_CLEAR(pVM, VM_FF_TO_R3);
     557            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    558558            VM_FF_CLEAR(pVM, VM_FF_TIMER);
    559559
  • trunk/src/VBox/VMM/VMReq.cpp

    r13858 r19141  
    628628        for (unsigned i=0;i<pUVM->pVM->cCPUs;i++)
    629629        {
     630            PVMCPU pVCpu = &pUVM->pVM->aCpus[i];
     631
    630632            if (   !pUVMCPU
    631633                ||  pUVMCPU->idCpu != i)
     
    646648                 */
    647649                if (pUVM->pVM)
    648                     VMCPU_FF_SET(pUVM->pVM, VM_FF_REQUEST, i);
     650                    VMCPU_FF_SET(pVCpu, VM_FF_REQUEST);
    649651                /* @todo: VMR3NotifyFFU*/
    650652                AssertFailed();
     
    674676    {
    675677        RTCPUID  idTarget = (RTCPUID)pReq->enmDest;
     678        PVMCPU   pVCpu = &pUVM->pVM->aCpus[idTarget];
    676679        unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags;     /* volatile paranoia */
    677680
     
    691694         */
    692695        if (pUVM->pVM)
    693             VMCPU_FF_SET(pUVM->pVM, VM_FF_REQUEST, idTarget);
     696            VMCPU_FF_SET(pVCpu, VM_FF_REQUEST);
    694697        /* @todo: VMR3NotifyFFU*/
    695698        AssertFailed();
     
    851854            ppReqs = (void * volatile *)&pUVM->aCpus[enmDest].vm.s.pReqs;
    852855            if (RT_LIKELY(pUVM->pVM))
    853                 VMCPU_FF_CLEAR(pUVM->pVM, enmDest, VM_FF_REQUEST);
     856            {
     857                PVMCPU pVCpu = &pUVM->pVM->aCpus[enmDest];
     858
     859                VMCPU_FF_CLEAR(pVCpu, VM_FF_REQUEST);
     860            }
    854861        }
    855862
  • trunk/src/recompiler_new/VBoxREMWrapper.cpp

    r19090 r19141  
    705705static const REMPARMDESC g_aArgsPDMGetInterrupt[] =
    706706{
    707     { REMPARMDESC_FLAGS_INT,        sizeof(PVM), NULL },
     707    { REMPARMDESC_FLAGS_INT,        sizeof(PVMCPU), NULL },
    708708    { REMPARMDESC_FLAGS_INT,        sizeof(uint8_t *), NULL }
    709709};
     
    11711171    { "TMVirtualResume",                        (void *)(uintptr_t)&TMVirtualResume,                &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_INT,    sizeof(int),        NULL },
    11721172    { "TRPMAssertTrap",                         (void *)(uintptr_t)&TRPMAssertTrap,                 &g_aArgsTRPMAssertTrap[0],                  RT_ELEMENTS(g_aArgsTRPMAssertTrap),                    REMFNDESC_FLAGS_RET_INT,    sizeof(int),        NULL },
    1173     { "TRPMGetErrorCode",                       (void *)(uintptr_t)&TRPMGetErrorCode,               &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_INT,    sizeof(RTGCUINT),   NULL },
    1174     { "TRPMGetFaultAddress",                    (void *)(uintptr_t)&TRPMGetFaultAddress,            &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_INT,    sizeof(RTGCUINTPTR),NULL },
     1173    { "TRPMGetErrorCode",                       (void *)(uintptr_t)&TRPMGetErrorCode,               &g_aArgsVMCPU[0],                           RT_ELEMENTS(g_aArgsVMCPU),                             REMFNDESC_FLAGS_RET_INT,    sizeof(RTGCUINT),   NULL },
     1174    { "TRPMGetFaultAddress",                    (void *)(uintptr_t)&TRPMGetFaultAddress,            &g_aArgsVMCPU[0],                           RT_ELEMENTS(g_aArgsVMCPU),                             REMFNDESC_FLAGS_RET_INT,    sizeof(RTGCUINTPTR),NULL },
    11751175    { "TRPMQueryTrap",                          (void *)(uintptr_t)&TRPMQueryTrap,                  &g_aArgsTRPMQueryTrap[0],                   RT_ELEMENTS(g_aArgsTRPMQueryTrap),                     REMFNDESC_FLAGS_RET_INT,    sizeof(int),        NULL },
    1176     { "TRPMResetTrap",                          (void *)(uintptr_t)&TRPMResetTrap,                  &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_INT,    sizeof(int),        NULL },
     1176    { "TRPMResetTrap",                          (void *)(uintptr_t)&TRPMResetTrap,                  &g_aArgsVMCPU[0],                           RT_ELEMENTS(g_aArgsVMCPU),                             REMFNDESC_FLAGS_RET_INT,    sizeof(int),        NULL },
    11771177    { "TRPMSetErrorCode",                       (void *)(uintptr_t)&TRPMSetErrorCode,               &g_aArgsTRPMSetErrorCode[0],                RT_ELEMENTS(g_aArgsTRPMSetErrorCode),                  REMFNDESC_FLAGS_RET_VOID,   0,                  NULL },
    11781178    { "TRPMSetFaultAddress",                    (void *)(uintptr_t)&TRPMSetFaultAddress,            &g_aArgsTRPMSetFaultAddress[0],             RT_ELEMENTS(g_aArgsTRPMSetFaultAddress),               REMFNDESC_FLAGS_RET_VOID,   0,                  NULL },
  • trunk/src/recompiler_new/VBoxRecompiler.c

    r19076 r19141  
    14501450    pCtx->cr3 = env->cr[3];
    14511451    if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
    1452         VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     1452        VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    14531453    pCtx->cr4 = env->cr[4];
    14541454
     
    14611461    {
    14621462        AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
    1463         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
     1463        VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    14641464    }
    14651465    //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
     
    15661566    pCtx->cr3 = env->cr[3];
    15671567    if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
    1568         VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     1568        VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    15691569    pCtx->cr4 = env->cr[4];
    15701570
     
    16061606    pCtx->cr3 = env->cr[3];
    16071607    if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
    1608         VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     1608        VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    16091609    pCtx->cr4 = env->cr[4];
    16101610
     
    21882188    pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
    21892189    if (    pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
    2190         ||  VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
     2190        ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    21912191        pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
    21922192
     
    23172317    pCtx->cr3           = pVM->rem.s.Env.cr[3];
    23182318    if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
    2319         VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     2319        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    23202320    pCtx->cr4           = pVM->rem.s.Env.cr[4];
    23212321
     
    23282328        pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
    23292329        STAM_COUNTER_INC(&gStatREMGDTChange);
    2330         VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
     2330        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    23312331    }
    23322332
     
    23362336        pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
    23372337        STAM_COUNTER_INC(&gStatREMIDTChange);
    2338         VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
     2338        VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    23392339    }
    23402340
     
    23492349        pCtx->ldtrHid.Attr.u    = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
    23502350        STAM_COUNTER_INC(&gStatREMLDTRChange);
    2351         VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
     2351        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    23522352    }
    23532353
     
    23712371            pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
    23722372        STAM_COUNTER_INC(&gStatREMTRChange);
    2373         VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     2373        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    23742374    }
    23752375
     
    25332533    pCtx->cr3           = pVM->rem.s.Env.cr[3];
    25342534    if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
    2535         VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     2535        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    25362536    pCtx->cr4           = pVM->rem.s.Env.cr[4];
    25372537
     
    25442544        pCtx->gdtr.pGdt     = (RTGCPTR)pVM->rem.s.Env.gdt.base;
    25452545        STAM_COUNTER_INC(&gStatREMGDTChange);
    2546         VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
     2546        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    25472547    }
    25482548
     
    25522552        pCtx->idtr.pIdt     = (RTGCPTR)pVM->rem.s.Env.idt.base;
    25532553        STAM_COUNTER_INC(&gStatREMIDTChange);
    2554         VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
     2554        VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    25552555    }
    25562556
     
    25652565        pCtx->ldtrHid.Attr.u    = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
    25662566        STAM_COUNTER_INC(&gStatREMLDTRChange);
    2567         VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
     2567        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    25682568    }
    25692569
     
    25872587            pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
    25882588        STAM_COUNTER_INC(&gStatREMTRChange);
    2589         VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
     2589        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    25902590    }
    25912591
     
    40304030    }
    40314031    else
    4032         rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
     4032        rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
    40334033
    40344034    LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
    40354035    if (RT_SUCCESS(rc))
    40364036    {
    4037         if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
     4037        if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    40384038            env->interrupt_request |= CPU_INTERRUPT_HARD;
    40394039        return u8Interrupt;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette