VirtualBox

Changeset 19660 in vbox for trunk


Ignore:
Timestamp:
May 13, 2009 2:09:15 PM (16 years ago)
Author:
vboxsync
Message:

TM+affected: SMP changes in progress.

Location:
trunk
Files:
18 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/rem.h

    r18927 r19660  
    9393REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu);
    9494REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu);
    95 REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM);
     95REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst);
    9696REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM);
    9797REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM);
  • trunk/include/VBox/tm.h

    r19507 r19660  
    9696 */
    9797VMMDECL(uint64_t) TMVirtualGet(PVM pVM);
    98 VMMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers);
     98VMMDECL(uint64_t) TMVirtualGetNoCheck(PVM pVM);
    9999VMMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM);
    100100VMMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM);
    101101VMMDECL(uint64_t) TMVirtualGetFreq(PVM pVM);
     102VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM);
     103VMMDECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM);
    102104VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers);
    103 VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM);
    104105VMMDECL(int)      TMVirtualResume(PVM pVM);
    105106VMMDECL(int)      TMVirtualPause(PVM pVM);
     
    199200VMMDECL(int)      TMTimerStop(PTMTIMER pTimer);
    200201VMMDECL(bool)     TMTimerIsActive(PTMTIMER pTimer);
    201 VMMDECL(uint64_t) TMTimerPoll(PVM pVM);
    202 VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, uint64_t *pu64Delta);
     202VMMDECL(uint64_t) TMTimerPoll(PVM pVM, PVMCPU pVCpu);
     203VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta);
    203204
    204205/** @} */
  • trunk/include/VBox/vm.h

    r19593 r19660  
    241241 * @{
    242242 */
    243 /** This action forces the VM to schedule and run pending timer (TM). */
    244 #define VM_FF_TIMER                         RT_BIT_32(2)
     243/** The virtual sync clock has been stopped, go to TM until it has been
     244 *  restarted... */
     245#define VM_FF_TM_VIRTUAL_SYNC               RT_BIT_32(2)
    245246/** PDM Queues are pending. */
    246247#define VM_FF_PDM_QUEUES_BIT                3
     
    278279/** This action forces the VM to service check and pending interrups on the PIC. */
    279280#define VMCPU_FF_INTERRUPT_PIC              RT_BIT_32(1)
    280 /** This action forces the VM to schedule and run pending timer (TM). (bogus for now; needed for PATM backwards compatibility) */
     281/** This action forces the VM to schedule and run pending timer (TM).
     282 * @remarks Don't move - PATM compatability.  */
    281283#define VMCPU_FF_TIMER                      RT_BIT_32(2)
    282284/** PDM critical section unlocking is pending, process promptly upon return to R3. */
     
    316318
    317319/** Externally forced VM actions. Used to quit the idle/wait loop. */
    318 #define VM_FF_EXTERNAL_HALTED_MASK              (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
     320#define VM_FF_EXTERNAL_HALTED_MASK              (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)
    319321/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
    320 #define VMCPU_FF_EXTERNAL_HALTED_MASK           (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST)
     322#define VMCPU_FF_EXTERNAL_HALTED_MASK           (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_TIMER)
    321323
    322324/** High priority VM pre-execution actions. */
    323 #define VM_FF_HIGH_PRIORITY_PRE_MASK            (   VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER | VM_FF_DEBUG_SUSPEND \
    324                                                  |  VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
     325#define VM_FF_HIGH_PRIORITY_PRE_MASK            (  VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC | VM_FF_DEBUG_SUSPEND \
     326                                                 | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
    325327/** High priority VMCPU pre-execution actions. */
    326 #define VMCPU_FF_HIGH_PRIORITY_PRE_MASK         (   VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC  \
    327                                                  |  VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)
     328#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK         (  VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 \
     329                                                 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
     330                                                 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)
    328331
    329332/** High priority VM pre raw-mode execution mask. */
    330333#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK        (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
    331334/** High priority VMCPU pre raw-mode execution mask. */
    332 #define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK     (  VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT \
    333                                                  | VMCPU_FF_INHIBIT_INTERRUPTS)
     335#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK     (  VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
     336                                                 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)
    334337
    335338/** High priority post-execution actions. */
     
    352355
    353356/** VM Flags that cause the HWACCM loops to go back to ring-3. */
    354 #define VM_FF_HWACCM_TO_R3_MASK                 (VM_FF_TIMER | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
     357#define VM_FF_HWACCM_TO_R3_MASK                 (VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
    355358/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
    356 #define VMCPU_FF_HWACCM_TO_R3_MASK               (VMCPU_FF_TO_R3)
     359#define VMCPU_FF_HWACCM_TO_R3_MASK               (VMCPU_FF_TO_R3 | VMCPU_FF_TIMER)
    357360
    358361/** All the forced VM flags. */
  • trunk/src/VBox/VMM/EM.cpp

    r19658 r19660  
    969969             * imporant FFs while we were busy switching the state. So, check again.
    970970             */
    971             if (    VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_TIMER | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET)
    972                 ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
     971            if (    VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET)
     972                ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
    973973            {
    974974                LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
     
    10181018         */
    10191019#ifdef VBOX_HIGH_RES_TIMERS_HACK
    1020         TMTimerPoll(pVM);
     1020        TMTimerPoll(pVM, pVCpu);
    10211021#endif
     1022        AssertCompile((VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER);
    10221023        if (    VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
    10231024            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
     
    29382939         */
    29392940#ifdef VBOX_HIGH_RES_TIMERS_HACK
    2940         TMTimerPoll(pVM);
     2941        TMTimerPoll(pVM, pVCpu);
    29412942#endif
    29422943        STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
     
    30793080         */
    30803081#ifdef VBOX_HIGH_RES_TIMERS_HACK
    3081         TMTimerPoll(pVM);
     3082        TMTimerPoll(pVM, pVCpu);
    30823083#endif
    30833084        if (    VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK)
     
    31213122
    31223123    /*
    3123      * We stay in the wait for SIPI state unless explicitly told otherwise. 
     3124     * We stay in the wait for SIPI state unless explicitly told otherwise.
    31243125     */
    31253126    if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
     
    33823383
    33833384        /* check that we got them all  */
    3384         Assert(!(VM_FF_NORMAL_PRIORITY_POST_MASK & ~(VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)));
    3385         Assert(!(VMCPU_FF_NORMAL_PRIORITY_POST_MASK & ~(VMCPU_FF_CSAM_SCAN_PAGE)));
     3385        AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY));
     3386        AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE);
    33863387    }
    33873388
     
    34283429
    34293430        /* check that we got them all  */
    3430         Assert(!(VM_FF_NORMAL_PRIORITY_MASK & ~(VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)));
     3431        AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY));
    34313432    }
    34323433
     
    34673468         * Timers before interrupts.
    34683469         */
    3469         if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_TIMER, VM_FF_PGM_NO_MEMORY))
     3470        if (    VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)
     3471            &&  !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
    34703472            TMR3TimerQueuesDo(pVM);
    34713473
     
    34733475         * The instruction following an emulated STI should *always* be executed!
    34743476         */
    3475         if (    !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
    3476             &&  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     3477        if (    VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
     3478            &&  !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
    34773479        {
    34783480            Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
     
    35763578
    35773579#endif
     3580        if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
     3581        {
     3582            /** @todo FIXME */
     3583        }
     3584
    35783585        /* check that we got them all  */
    3579         Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)));
    3580         Assert(!(VMCPU_FF_HIGH_PRIORITY_PRE_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)));
     3586        AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY));
     3587        AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS));
    35813588    }
    35823589
     
    37463753                    break;
    37473754
    3748                 /* 
     3755                /*
    37493756                 * Switch to the wait for SIPI state (application processor only)
    37503757                 */
  • trunk/src/VBox/VMM/PATM/PATM.cpp

    r19293 r19660  
    113113
    114114    /* These values can't change as they are hardcoded in patch code (old saved states!) */
    115     AssertCompile(VM_FF_TIMER   == VMCPU_FF_TIMER);
     115    AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
    116116    AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
    117117    AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
  • trunk/src/VBox/VMM/REMInternal.h

    r19297 r19660  
    132132    /** In REM mode.
    133133     * I.e. the correct CPU state and some other bits are with REM. */
    134     bool                    fInREM;
     134    bool volatile           fInREM;
    135135    /** In REMR3State. */
    136136    bool                    fInStateSync;
     
    276276
    277277#else  /* !VBOX_WITH_STATISTICS */
    278 # define remR3ProfileStart(c)
    279 # define remR3ProfileStop(c)
     278# define remR3ProfileStart(c)   do { } while (0)
     279# define remR3ProfileStop(c)    do { } while (0)
    280280#endif /* !VBOX_WITH_STATISTICS */
    281281
  • trunk/src/VBox/VMM/TM.cpp

    r19609 r19660  
    199199
    200200    pVM->tm.s.offVM = RT_OFFSETOF(VM, tm.s);
     201    pVM->tm.s.idTimerCpu = pVM->cCPUs - 1; /* The last CPU. */
    201202    pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].enmClock        = TMCLOCK_VIRTUAL;
    202203    pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].u64Expire       = INT64_MAX;
     
    207208    pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].enmClock            = TMCLOCK_TSC;
    208209    pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].u64Expire           = INT64_MAX;
     210
    209211
    210212    /*
     
    551553    STAM_REG(pVM, &pVM->tm.s.StatDoQueuesRun,                     STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Run",                STAMUNIT_TICKS_PER_CALL, "The run part.");
    552554
    553     STAM_REG(pVM, &pVM->tm.s.StatPollAlreadySet,                      STAMTYPE_COUNTER, "/TM/PollAlreadySet",                  STAMUNIT_OCCURENCES, "TMTimerPoll calls where the FF was already set.");
    554     STAM_REG(pVM, &pVM->tm.s.StatPollVirtual,                         STAMTYPE_COUNTER, "/TM/PollHitsVirtual",                 STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL queue.");
    555     STAM_REG(pVM, &pVM->tm.s.StatPollVirtualSync,                     STAMTYPE_COUNTER, "/TM/PollHitsVirtualSync",             STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL_SYNC queue.");
    556     STAM_REG(pVM, &pVM->tm.s.StatPollMiss,                            STAMTYPE_COUNTER, "/TM/PollMiss",                        STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired.");
     555    STAM_REG(pVM, &pVM->tm.s.StatPoll,                                STAMTYPE_COUNTER, "/TM/Poll",                            STAMUNIT_OCCURENCES, "TMTimerPoll calls.");
     556    STAM_REG(pVM, &pVM->tm.s.StatPollAlreadySet,                      STAMTYPE_COUNTER, "/TM/Poll/AlreadySet",                 STAMUNIT_OCCURENCES, "TMTimerPoll calls where the FF was already set.");
     557    STAM_REG(pVM, &pVM->tm.s.StatPollVirtual,                         STAMTYPE_COUNTER, "/TM/Poll/HitsVirtual",                STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL queue.");
     558    STAM_REG(pVM, &pVM->tm.s.StatPollVirtualSync,                     STAMTYPE_COUNTER, "/TM/Poll/HitsVirtualSync",            STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL_SYNC queue.");
     559    STAM_REG(pVM, &pVM->tm.s.StatPollMiss,                            STAMTYPE_COUNTER, "/TM/Poll/Miss",                       STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired.");
     560    STAM_REG(pVM, &pVM->tm.s.StatPollRunning,                         STAMTYPE_COUNTER, "/TM/Poll/Running",                    STAMUNIT_OCCURENCES, "TMTimerPoll calls where the queues were being run.");
     561
     562    STAM_REG(pVM, &pVM->tm.s.StatPollGIP,                             STAMTYPE_COUNTER, "/TM/PollGIP",                         STAMUNIT_OCCURENCES, "TMTimerPollGIP calls.");
     563    STAM_REG(pVM, &pVM->tm.s.StatPollGIPAlreadySet,                   STAMTYPE_COUNTER, "/TM/PollGIP/AlreadySet",              STAMUNIT_OCCURENCES, "TMTimerPollGIP calls where the FF was already set.");
     564    STAM_REG(pVM, &pVM->tm.s.StatPollGIPVirtual,                      STAMTYPE_COUNTER, "/TM/PollGIP/HitsVirtual",             STAMUNIT_OCCURENCES, "The number of times TMTimerPollGIP found an expired TMCLOCK_VIRTUAL queue.");
     565    STAM_REG(pVM, &pVM->tm.s.StatPollGIPVirtualSync,                  STAMTYPE_COUNTER, "/TM/PollGIP/HitsVirtualSync",         STAMUNIT_OCCURENCES, "The number of times TMTimerPollGIP found an expired TMCLOCK_VIRTUAL_SYNC queue.");
     566    STAM_REG(pVM, &pVM->tm.s.StatPollGIPMiss,                         STAMTYPE_COUNTER, "/TM/PollGIP/Miss",                    STAMUNIT_OCCURENCES, "TMTimerPollGIP calls where nothing had expired.");
     567    STAM_REG(pVM, &pVM->tm.s.StatPollGIPRunning,                      STAMTYPE_COUNTER, "/TM/PollGIP/Running",                 STAMUNIT_OCCURENCES, "TMTimerPollGIP calls where the queues were being run.");
    557568
    558569    STAM_REG(pVM, &pVM->tm.s.StatPostponedR3,                         STAMTYPE_COUNTER, "/TM/PostponedR3",                     STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in ring-3.");
     
    940951    if (pVM->tm.s.fVirtualSyncCatchUp)
    941952    {
    942         const uint64_t offVirtualNow = TMVirtualGetEx(pVM, false /* don't check timers */);
    943         const uint64_t offVirtualSyncNow = TMVirtualSyncGetEx(pVM, false /* don't check timers */);
     953        const uint64_t offVirtualNow = TMVirtualGetNoCheck(pVM);
     954        const uint64_t offVirtualSyncNow = TMVirtualSyncGetNoCheck(pVM);
    944955        if (pVM->tm.s.fVirtualSyncCatchUp)
    945956        {
     
    965976#endif
    966977
    967     VM_FF_CLEAR(pVM, VM_FF_TIMER);
     978    PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     979    VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER); /** @todo FIXME: this isn't right. */
    968980    tmUnlock(pVM);
    969981}
     
    11381150     * Make sure timers get rescheduled immediately.
    11391151     */
    1140     VM_FF_SET(pVM, VM_FF_TIMER);
     1152    PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     1153    VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    11411154
    11421155    return VINF_SUCCESS;
     
    16041617     * TMVirtualSyncGet only permits EMT.
    16051618     */
    1606     uint64_t u64Now = TMVirtualGet(pVM);
     1619    uint64_t u64Now = TMVirtualGetNoCheck(pVM);
    16071620    if (pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64Now)
    16081621        return true;
     
    16381651static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t /*iTick*/)
    16391652{
    1640     PVM pVM = (PVM)pvUser;
     1653    PVM     pVM      = (PVM)pvUser;
     1654    PVMCPU  pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     1655
     1656RTLogFlush(NULL);
     1657
    16411658    AssertCompile(TMCLOCK_MAX == 4);
    16421659#ifdef DEBUG_Sander /* very annoying, keep it private. */
    1643     if (VM_FF_ISSET(pVM, VM_FF_TIMER))
     1660    if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
    16441661        Log(("tmR3TimerCallback: timer event still pending!!\n"));
    16451662#endif
    1646     if (    !VM_FF_ISSET(pVM, VM_FF_TIMER)
    1647         &&  (   pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule
     1663    if (    !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
     1664        &&  (   pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule /** @todo FIXME - reconsider offSchedule as a reason for running the timer queues. */
    16481665            ||  pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].offSchedule
    16491666            ||  pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].offSchedule
     
    16511668            ||  tmR3AnyExpiredTimers(pVM)
    16521669            )
    1653         && !VM_FF_ISSET(pVM, VM_FF_TIMER)
     1670        && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
    16541671        && !pVM->tm.s.fRunningQueues
    16551672       )
    16561673    {
    1657         VM_FF_SET(pVM, VM_FF_TIMER);
    1658         REMR3NotifyTimerPending(pVM);
    1659         VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
     1674        Log5(("TM(%u): FF: 0 -> 1\n", __LINE__));
     1675        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     1676        REMR3NotifyTimerPending(pVM, pVCpuDst);
     1677        VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo | VMNOTIFYFF_FLAGS_POKE ?*/);
    16601678        STAM_COUNTER_INC(&pVM->tm.s.StatTimerCallbackSetFF);
    16611679    }
     
    16741692VMMR3DECL(void) TMR3TimerQueuesDo(PVM pVM)
    16751693{
    1676     /** Note: temporarily restrict this to VCPU 0. */
    1677     if (VMMGetCpuId(pVM) != 0)
    1678         return;
    1679 
    1680     /*
    1681      * Only one EMT should be doing this at a time.
    1682      */
    1683     VM_FF_CLEAR(pVM, VM_FF_TIMER);
    1684     if (ASMBitTestAndSet(&pVM->tm.s.fRunningQueues, 0))
     1694    /*
     1695     * Only the dedicated timer EMT should do stuff here.
     1696     *
     1697     * The lock isn't really necessary any longer, but it might come
     1698     * in handy when dealing VM_FF_TM_VIRTUAL_SYNC later.
     1699     */
     1700    Assert(pVM->tm.s.idTimerCpu < pVM->cCPUs);
     1701    PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     1702    if (    VMMGetCpu(pVM) != pVCpuDst
     1703        ||  ASMBitTestAndSet(&pVM->tm.s.fRunningQueues, 0))
    16851704    {
    16861705        Assert(pVM->cCPUs > 1);
     
    16911710    Log2(("TMR3TimerQueuesDo:\n"));
    16921711    tmLock(pVM);
     1712
     1713    /*
     1714     * Clear the FF before processing the queues but after obtaining the lock.
     1715     */
     1716    VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER);
    16931717
    16941718    /*
     
    17041728    tmR3TimerQueueRunVirtualSync(pVM);
    17051729    STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r1);
     1730    if (pVM->tm.s.fVirtualSyncTicking) /** @todo move into tmR3TimerQueueRunVirtualSync - FIXME */
     1731        VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
    17061732
    17071733    /* TMCLOCK_VIRTUAL */
     
    17131739    STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r2);
    17141740
    1715 #if 0 /** @todo if ever used, remove this and fix the stam prefixes on TMCLOCK_REAL below. */
    17161741    /* TMCLOCK_TSC */
    1717     STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s2);
    1718     tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]);
    1719     STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s3);
    1720     STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r2);
    1721     tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]);
    1722     STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r3);
    1723 #endif
     1742    Assert(!pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].offActive); /* not used */
    17241743
    17251744    /* TMCLOCK_REAL */
     
    17401759
    17411760    /* done */
     1761    tmUnlock(pVM);
    17421762    ASMAtomicBitClear(&pVM->tm.s.fRunningQueues, 0);
    1743     tmUnlock(pVM);
    1744 }
     1763}
     1764
     1765//__BEGIN_DECLS
     1766//int     iomLock(PVM pVM);
     1767//void    iomUnlock(PVM pVM);
     1768//__END_DECLS
    17451769
    17461770
     
    18011825
    18021826            /* fire */
     1827//            tmUnlock(pVM);
    18031828            switch (pTimer->enmType)
    18041829            {
    1805                 case TMTIMERTYPE_DEV:       pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer); break;
     1830                case TMTIMERTYPE_DEV:
     1831//                    iomLock(pVM);
     1832                    pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer);
     1833//                    iomUnlock(pVM);
     1834                    break;
     1835
    18061836                case TMTIMERTYPE_DRV:       pTimer->u.Drv.pfnTimer(pTimer->u.Drv.pDrvIns, pTimer); break;
    18071837                case TMTIMERTYPE_INTERNAL:  pTimer->u.Internal.pfnTimer(pVM, pTimer, pTimer->u.Internal.pvUser); break;
     
    18111841                    break;
    18121842            }
     1843//            tmLock(pVM);
    18131844
    18141845            /* change the state if it wasn't changed already in the handler. */
     
    18581889     * and 2) lag behind at a steady rate.
    18591890     */
    1860     const uint64_t u64VirtualNow = TMVirtualGetEx(pVM, false /* don't check timers */);
     1891    const uint64_t u64VirtualNow = TMVirtualGetNoCheck(pVM);
    18611892    uint64_t u64Now;
    18621893    if (!pVM->tm.s.fVirtualSyncTicking)
     
    19822013
    19832014        /* calc the slack we've handed out. */
    1984         const uint64_t u64VirtualNow2 = TMVirtualGetEx(pVM, false /* don't check timers */);
     2015        const uint64_t u64VirtualNow2 = TMVirtualGetNoCheck(pVM);
    19852016        Assert(u64VirtualNow2 >= u64VirtualNow);
    19862017        AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%RU64 < %RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
  • trunk/src/VBox/VMM/TMInternal.h

    r19537 r19660  
    305305     * Config variable: TSCNotTiedToHalt (bool) */
    306306    bool                        fTSCNotTiedToHalt;
    307     bool                        afAlignment0[6]; /**< alignment padding */
     307    bool                        afAlignment0[2]; /**< alignment padding */
     308    /** The ID of the virtual CPU that normally runs the timers. */
     309    VMCPUID                     idTimerCpu;
    308310    /** The number of CPU clock ticks per second (TMCLOCK_TSC).
    309311     * Config variable: TSCTicksPerSecond (64-bit unsigned int)
     
    451453    /** TMTimerPoll
    452454     * @{ */
     455    STAMCOUNTER                 StatPoll;
    453456    STAMCOUNTER                 StatPollAlreadySet;
    454457    STAMCOUNTER                 StatPollVirtual;
    455458    STAMCOUNTER                 StatPollVirtualSync;
    456459    STAMCOUNTER                 StatPollMiss;
     460    STAMCOUNTER                 StatPollRunning;
     461    /** @} */
     462    /** TMTimerPollGIP
     463     * @{ */
     464    STAMCOUNTER                 StatPollGIP;
     465    STAMCOUNTER                 StatPollGIPAlreadySet;
     466    STAMCOUNTER                 StatPollGIPVirtual;
     467    STAMCOUNTER                 StatPollGIPVirtualSync;
     468    STAMCOUNTER                 StatPollGIPMiss;
     469    STAMCOUNTER                 StatPollGIPRunning;
    457470    /** @} */
    458471    /** TMTimerSet
  • trunk/src/VBox/VMM/VMEmt.cpp

    r19539 r19660  
    302302            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
    303303            break;
    304         uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
     304        uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM, pVCpu));
    305305        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
    306306            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
     
    504504         * Estimate time left to the next event.
    505505         */
    506         uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
     506        uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM, pVCpu));
    507507        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
    508508            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
     
    624624         */
    625625        uint64_t u64Delta;
    626         uint64_t u64GipTime = TMTimerPollGIP(pVM, &u64Delta);
     626        uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
    627627        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
    628628            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
  • trunk/src/VBox/VMM/VMM.cpp

    r19621 r19660  
    12811281    }
    12821282    /* Wait until all other VCPUs are waiting for us. */
    1283     while (RTCritSectGetWaiters(&pVM->vmm.s.CritSectSync) != (pVM->cCPUs - 1))
     1283    while (RTCritSectGetWaiters(&pVM->vmm.s.CritSectSync) != (int32_t)(pVM->cCPUs - 1))
    12841284        RTThreadSleep(1);
    12851285
     
    16931693    c = 0;
    16941694    f = fGlobalForcedActions;
    1695     PRINT_FLAG(VM_FF_,TIMER);
     1695    PRINT_FLAG(VM_FF_,TM_VIRTUAL_SYNC);
    16961696    PRINT_FLAG(VM_FF_,PDM_QUEUES);
    16971697    PRINT_FLAG(VM_FF_,PDM_DMA);
     
    17291729    {
    17301730        const uint32_t fLocalForcedActions = pVM->aCpus[i].fLocalForcedActions;
    1731         pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX32", i, f);
     1731        pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX32", i, fLocalForcedActions);
    17321732
    17331733        /* show the flag mnemonics */
     
    17361736        PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC);
    17371737        PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC);
     1738        PRINT_FLAG(VMCPU_FF_,TIMER);
    17381739        PRINT_FLAG(VMCPU_FF_,PDM_CRITSECT);
    17391740        PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r19538 r19660  
    188188        tmUnlock(pVM);
    189189    }
    190     else if (!VM_FF_ISSET(pVM, VM_FF_TIMER))  /**@todo only do this when arming the timer. */
    191     {
    192         STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
    193         VM_FF_SET(pVM, VM_FF_TIMER);
     190    else
     191    {
     192        /** @todo FIXME: don't use FF for scheduling! */
     193        PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     194        if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))  /**@todo only do this when arming the timer. */
     195        {
     196            Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
     197            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    194198#ifdef IN_RING3
    195         REMR3NotifyTimerPending(pVM);
    196         VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
    197 #endif
     199            REMR3NotifyTimerPending(pVM, pVCpuDst);
     200            VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
     201#endif
     202            STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
     203        }
    198204    }
    199205}
     
    268274 * This function is called before FFs are checked in the inner execution EM loops.
    269275 *
    270  * @returns Virtual timer ticks to the next event.
     276 * @returns Virtual timer ticks to the next event. (I.e. 0 means that an timer
     277 *          has expired or some important rescheduling is pending.)
    271278 * @param   pVM         Pointer to the shared VM structure.
     279 * @param   pVCpu       Pointer to the shared VMCPU structure of the caller.
    272280 * @thread  The emulation thread.
    273281 */
    274 VMMDECL(uint64_t) TMTimerPoll(PVM pVM)
    275 {
    276     int rc = tmLock(pVM); /* play safe for now */
     282VMMDECL(uint64_t) TMTimerPoll(PVM pVM, PVMCPU pVCpu)
     283{
     284    static const uint64_t   s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
     285    PVMCPU  pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     286    STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
    277287
    278288    /*
    279      * Return straight away if the timer FF is already set.
     289     * Return straight away if the timer FF is already set ...
    280290     */
    281     if (VM_FF_ISSET(pVM, VM_FF_TIMER))
     291    if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
    282292    {
    283293        STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
    284 #ifndef IN_RING3
    285         if (RT_SUCCESS(rc))
    286 #endif
    287             tmUnlock(pVM);
    288         return 0;
     294        return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
     295    }
     296
     297    /*
     298     * ... or if timers are being run.
     299     */
     300    if (pVM->tm.s.fRunningQueues)
     301    {
     302        STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
     303        return s_u64OtherRet;
    289304    }
    290305
     
    292307     * Get current time and check the expire times of the two relevant queues.
    293308     */
    294     const uint64_t u64Now = TMVirtualGet(pVM);
     309    int             rc     = tmLock(pVM); /** @todo FIXME: Stop playing safe here... */
     310    const uint64_t  u64Now = TMVirtualGetNoCheck(pVM);
    295311
    296312    /*
    297313     * TMCLOCK_VIRTUAL
    298314     */
    299     const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
    300     const int64_t i64Delta1 = u64Expire1 - u64Now;
     315    const uint64_t  u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
     316    const int64_t   i64Delta1 = u64Expire1 - u64Now;
    301317    if (i64Delta1 <= 0)
    302318    {
     
    307323#endif
    308324            tmUnlock(pVM);
    309         VM_FF_SET(pVM, VM_FF_TIMER);
     325        Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     326        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    310327#ifdef IN_RING3
    311         REMR3NotifyTimerPending(pVM);
    312 #endif
    313         return 0;
     328        REMR3NotifyTimerPending(pVM, pVCpuDst);
     329#endif
     330        return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
    314331    }
    315332
     
    345362    if (i64Delta2 <= 0)
    346363    {
     364        if (    !pVM->tm.s.fRunningQueues
     365            &&  !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
     366        {
     367            Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     368            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     369#ifdef IN_RING3
     370            REMR3NotifyTimerPending(pVM, pVCpuDst);
     371#endif
     372        }
    347373        STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
    348374#ifndef IN_RING3
     
    351377            tmUnlock(pVM);
    352378        LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
    353         VM_FF_SET(pVM, VM_FF_TIMER);
    354 #ifdef IN_RING3
    355         REMR3NotifyTimerPending(pVM);
    356 #endif
    357         return 0;
     379        return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
    358380    }
    359381    if (pVM->tm.s.fVirtualSyncCatchUp)
     
    380402 *          0 if the next event has already expired.
    381403 * @param   pVM         Pointer to the shared VM structure.
    382  * @param   pVM         Pointer to the shared VM structure.
     404 * @param   pVCpu       Pointer to the shared VMCPU structure of the caller.
    383405 * @param   pu64Delta   Where to store the delta.
    384406 * @thread  The emulation thread.
    385407 */
    386 VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, uint64_t *pu64Delta)
    387 {
    388     int rc = tmLock(pVM); /* play safe for now. */
     408VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
     409{
     410    static const uint64_t   s_u64OtherRet = 500000000; /* 500 million GIP ticks for non-timer EMTs. */
     411    PVMCPU                  pVCpuDst      = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     412    const uint64_t          u64Now        = TMVirtualGetNoCheck(pVM);
     413    STAM_COUNTER_INC(&pVM->tm.s.StatPollGIP);
    389414
    390415    /*
    391      * Return straight away if the timer FF is already set.
     416     * Return straight away if the timer FF is already set ...
    392417     */
    393     if (VM_FF_ISSET(pVM, VM_FF_TIMER))
    394     {
    395         STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
    396 #ifndef IN_RING3
    397         if (RT_SUCCESS(rc))
    398 #endif
    399             tmUnlock(pVM);
    400         *pu64Delta = 0;
    401         return 0;
     418    if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
     419    {
     420        STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPAlreadySet);
     421        if (pVCpuDst == pVCpu)
     422        {
     423            *pu64Delta = 0;
     424            return 0;
     425        }
     426        *pu64Delta = s_u64OtherRet;
     427        return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    402428    }
    403429
    404430    /*
    405      * Get current time and check the expire times of the two relevant queues.
     431     * ... or if timers are being run.
    406432     */
    407     const uint64_t  u64Now = TMVirtualGet(pVM);
     433    if (pVM->tm.s.fRunningQueues)
     434    {
     435        STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPRunning);
     436        *pu64Delta = s_u64OtherRet;
     437        return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
     438    }
     439
     440    int rc = tmLock(pVM); /** @todo FIXME: Stop playin safe... */
    408441
    409442    /*
    410      * TMCLOCK_VIRTUAL
     443     * Check for TMCLOCK_VIRTUAL expiration.
    411444     */
    412445    const uint64_t  u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
     
    414447    if (i64Delta1 <= 0)
    415448    {
    416         STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual);
     449        STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtual);
     450        Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     451        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     452#ifdef IN_RING3
     453        REMR3NotifyTimerPending(pVM, pVCpuDst);
     454#endif
    417455#ifndef IN_RING3
    418456        if (RT_SUCCESS(rc))
     
    420458            tmUnlock(pVM);
    421459        LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
    422         VM_FF_SET(pVM, VM_FF_TIMER);
    423 #ifdef IN_RING3
    424         REMR3NotifyTimerPending(pVM);
    425 #endif
    426         *pu64Delta = 0;
    427         return 0;
     460        if (pVCpuDst == pVCpu)
     461        {
     462            *pu64Delta = 0;
     463            return 0;
     464        }
     465        *pu64Delta = s_u64OtherRet;
     466        return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    428467    }
    429468
    430469    /*
    431      * TMCLOCK_VIRTUAL_SYNC
     470     * Check for TMCLOCK_VIRTUAL_SYNC expiration.
    432471     * This isn't quite as stright forward if in a catch-up, not only do
    433472     * we have to adjust the 'now' but when have to adjust the delta as well.
     
    460499    if (i64Delta2 <= 0)
    461500    {
    462         STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
     501        if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TIMER))
     502        {
     503            Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
     504            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER /** @todo poke */);
     505#ifdef IN_RING3
     506            REMR3NotifyTimerPending(pVM, pVCpuDst);
     507#endif
     508        }
     509        STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtualSync);
     510
    463511#ifndef IN_RING3
    464512        if (RT_SUCCESS(rc))
     
    466514            tmUnlock(pVM);
    467515        LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
    468         VM_FF_SET(pVM, VM_FF_TIMER);
    469 #ifdef IN_RING3
    470         REMR3NotifyTimerPending(pVM);
    471 #endif
    472         *pu64Delta = 0;
    473         return 0;
     516        if (pVCpuDst == pVCpu)
     517        {
     518            *pu64Delta = 0;
     519            return 0;
     520        }
     521        *pu64Delta = s_u64OtherRet;
     522        return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    474523    }
    475524    if (pVM->tm.s.fVirtualSyncCatchUp)
    476525        i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
    477526
    478     /*
    479      * Return the GIP time of the next event.
    480      * This is the reverse of what tmVirtualGetRaw is doing.
    481      */
    482     STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
    483     uint64_t u64GipTime = RT_MIN(i64Delta1, i64Delta2);
    484     *pu64Delta = u64GipTime;
    485     u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset;
    486     if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive))
    487     {
    488         u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */
    489         u64GipTime *= 100;
    490         u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage;
    491         u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart;
    492     }
    493 
     527    uint64_t u64GipTime;
     528    if (pVCpuDst == pVCpu)
     529    {
     530        /*
     531         * Return the GIP time of the next event.
     532         * This is the reverse of what tmVirtualGetRaw is doing.
     533         */
     534        STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPMiss);
     535        u64GipTime = RT_MIN(i64Delta1, i64Delta2);
     536        *pu64Delta = u64GipTime;
     537        u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset;
     538        if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive))
     539        {
     540            u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */
     541            u64GipTime *= 100;
     542            u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage;
     543            u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart;
     544        }
     545    }
     546    else
     547    {
     548        *pu64Delta = s_u64OtherRet;
     549        u64GipTime = u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
     550    }
    494551#ifndef IN_RING3
    495552    if (RT_SUCCESS(rc))
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r19500 r19660  
    330330         * Use the chance to check for expired timers.
    331331         */
    332         if (    fCheckTimers
    333             &&  !VM_FF_ISSET(pVM, VM_FF_TIMER)
    334             &&  !pVM->tm.s.fRunningQueues
    335             &&  (   pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
    336                  || (   pVM->tm.s.fVirtualSyncTicking
    337                      && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
     332        if (fCheckTimers)
     333        {
     334            PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     335            if (    !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
     336                &&  !pVM->tm.s.fRunningQueues
     337                &&  (   pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
     338                     || (   pVM->tm.s.fVirtualSyncTicking
     339                         && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
     340                        )
    338341                    )
    339                 )
    340            )
    341         {
    342             VM_FF_SET(pVM, VM_FF_TIMER);
    343             STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
     342                &&  !pVM->tm.s.fRunningQueues
     343               )
     344            {
     345                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
     346                Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     347                VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    344348#ifdef IN_RING3
    345             REMR3NotifyTimerPending(pVM);
    346             VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
    347 #endif
     349                REMR3NotifyTimerPending(pVM, pVCpuDst);
     350                VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
     351#endif
     352            }
    348353        }
    349354    }
     
    372377
    373378/**
    374  * Gets the current TMCLOCK_VIRTUAL time
     379 * Gets the current TMCLOCK_VIRTUAL time without checking
     380 * timers or anything.
     381 *
     382 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
    375383 *
    376384 * @returns The timestamp.
    377  * @param   pVM             VM handle.
    378  * @param   fCheckTimers    Check timers or not
    379  *
    380  * @remark  While the flow of time will never go backwards, the speed of the
    381  *          progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
    382  *          influenced by power saving (SpeedStep, PowerNow!), while the former
    383  *          makes use of TSC and kernel timers.
    384  */
    385 VMMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers)
    386 {
    387     return tmVirtualGet(pVM, fCheckTimers);
     385 * @param   pVM     VM handle.
     386 *
     387 * @remarks See TMVirtualGet.
     388 */
     389VMMDECL(uint64_t) TMVirtualGetNoCheck(PVM pVM)
     390{
     391    return tmVirtualGet(pVM, false /*fCheckTimers*/);
    388392}
    389393
     
    397401 * @thread  EMT.
    398402 */
    399 VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
    400 {
    401     uint64_t u64;
     403DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
     404{
     405    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);
     406    uint64_t    u64;
     407
    402408    if (pVM->tm.s.fVirtualSyncTicking)
    403409    {
    404         STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);
     410        PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
    405411
    406412        /*
     
    409415        Assert(pVM->tm.s.cVirtualTicking);
    410416        u64 = tmVirtualGetRaw(pVM);
    411         if (    fCheckTimers
    412             &&  !VM_FF_ISSET(pVM, VM_FF_TIMER)
    413             &&  pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
     417        if (fCheckTimers)
    414418        {
    415             VM_FF_SET(pVM, VM_FF_TIMER);
     419            if (    !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
     420                &&  pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
     421            {
     422                Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
     423                VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    416424#ifdef IN_RING3
    417             REMR3NotifyTimerPending(pVM);
    418             VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
    419 #endif
    420             STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
     425                REMR3NotifyTimerPending(pVM, pVCpuDst);
     426                VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
     427#endif
     428                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
     429            }
    421430        }
    422431
     
    490499        {
    491500            u64 = u64Expire;
    492             int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */
     501            int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. FIXME */
    493502            if (RT_SUCCESS(rc))
    494503            {
    495504                ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64);
    496505                ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
     506                VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
    497507                tmUnlock(pVM);
    498508            }
    499509            if (    fCheckTimers
    500                 &&  !VM_FF_ISSET(pVM, VM_FF_TIMER))
     510                &&  !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
    501511            {
    502                 VM_FF_SET(pVM, VM_FF_TIMER);
     512                Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     513                VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    503514#ifdef IN_RING3
    504                 REMR3NotifyTimerPending(pVM);
    505                 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
     515                REMR3NotifyTimerPending(pVM, pVCpuDst);
     516                VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
    506517#endif
    507518                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
     
    516527        u64 = pVM->tm.s.u64VirtualSync;
    517528
    518         /*
    519          * If it looks like a halt caused by pending timers, make sure the FF is raised.
    520          * This is a safeguard against timer queue runner leaving the virtual sync clock stopped.
    521          */
    522         if (    fCheckTimers
    523             &&  pVM->tm.s.cVirtualTicking
    524             &&  !VM_FF_ISSET(pVM, VM_FF_TIMER))
    525         {
    526             const uint64_t u64Expire = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
    527             if (u64 >= u64Expire)
    528             {
    529                 VM_FF_SET(pVM, VM_FF_TIMER);
    530 #ifdef IN_RING3
    531                 REMR3NotifyTimerPending(pVM);
    532                 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
    533 #endif
    534                 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
    535                 Log4(("TM: %RU64/%RU64: exp tmr=>ff (!)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
    536             }
    537         }
    538     }
     529    }
     530
    539531    return u64;
    540532}
     
    547539 * @param   pVM             VM handle.
    548540 * @thread  EMT.
     541 * @remarks May set the timer and virtual sync FFs.
    549542 */
    550543VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM)
    551544{
    552     return TMVirtualSyncGetEx(pVM, true /* check timers */);
     545    return tmVirtualSyncGetEx(pVM, true /* check timers */);
     546}
     547
     548
     549/**
     550 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
     551 * TMCLOCK_VIRTUAL.
     552 *
     553 * @returns The timestamp.
     554 * @param   pVM             VM handle.
     555 * @thread  EMT.
     556 * @remarks May set the timer and virtual sync FFs.
     557 */
     558VMMDECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM)
     559{
     560    return tmVirtualSyncGetEx(pVM, false /* check timers */);
     561}
     562
     563
     564/**
     565 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
     566 *
     567 * @returns The timestamp.
     568 * @param   pVM     VM handle.
     569 * @param   fCheckTimers    Check timers on the virtual clock or not.
     570 * @thread  EMT.
     571 * @remarks May set the timer and virtual sync FFs.
     572 */
     573VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
     574{
     575    return tmVirtualSyncGetEx(pVM, fCheckTimers);
    553576}
    554577
  • trunk/src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp

    r19288 r19660  
    157157        if (!(++s_iTimerPoll & 0xf))
    158158        {
    159             uint64_t cTicks = TMTimerPoll(pVM); NOREF(cTicks);
    160             Log2(("TMTimerPoll at %08RX32 returned %RX64 (VM_FF_TIMER=%d)\n", pRegFrame->eip, cTicks, VM_FF_ISPENDING(pVM, VM_FF_TIMER)));
     159            uint64_t cTicks = TMTimerPoll(pVM, pVCpu); NOREF(cTicks);
     160            Log2(("TMTimerPoll at %08RX32 returned %RX64 (VM_FF_TM_VIRTUAL_SYNC=%d VM_FF_TM_VIRTUAL_SYNC=%d)\n", pRegFrame->eip, cTicks,
     161                  VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)));
    161162        }
    162163    }
     
    185186     */
    186187    if (    rc == VINF_SUCCESS
    187         &&  (   VM_FF_ISPENDING(pVM, VM_FF_TIMER | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY)
    188              || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
     188        &&  (   VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY)
     189             || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
    189190            )
    190191       )
     
    200201        }
    201202        /* Pending timer action. */
    202         else if (VM_FF_ISPENDING(pVM, VM_FF_TIMER))
     203        else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))
    203204            rc = VINF_EM_RAW_TIMER_PENDING;
     205        /* The Virtual Sync clock has stopped. */
     206        else if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
     207            rc = VINF_EM_RAW_TO_R3;
    204208        /* Pending interrupt: dispatch it. */
    205209        else if (    VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
  • trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp

    r19657 r19660  
    16621662}
    16631663
     1664
    16641665/**
    16651666 * Wakes up the halted EMT thread so it can service a pending request.
  • trunk/src/VBox/VMM/VMMTests.cpp

    r19463 r19660  
    505505
    506506    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    507     VM_FF_CLEAR(pVM, VM_FF_TIMER);
     507    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
     508    VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
    508509    VM_FF_CLEAR(pVM, VM_FF_REQUEST);
    509510
     
    556557
    557558            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    558             VM_FF_CLEAR(pVM, VM_FF_TIMER);
     559            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
     560            VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
    559561
    560562            uint64_t TickThisStart = ASMReadTSC();
  • trunk/src/recompiler/VBoxREMWrapper.cpp

    r19639 r19660  
    355355static DECLCALLBACKPTR(void, pfnREMR3NotifyInterruptSet)(PVM, PVMCPU);
    356356static DECLCALLBACKPTR(void, pfnREMR3NotifyInterruptClear)(PVM, PVMCPU);
    357 static DECLCALLBACKPTR(void, pfnREMR3NotifyTimerPending)(PVM);
     357static DECLCALLBACKPTR(void, pfnREMR3NotifyTimerPending)(PVM, PVMCPU);
    358358static DECLCALLBACKPTR(void, pfnREMR3NotifyDmaPending)(PVM);
    359359static DECLCALLBACKPTR(void, pfnREMR3NotifyQueuePending)(PVM);
     
    10581058    { "REMR3NotifyInterruptSet",                (void *)&pfnREMR3NotifyInterruptSet,                &g_aArgsVMandVMCPU[0],                      RT_ELEMENTS(g_aArgsVMandVMCPU),                        REMFNDESC_FLAGS_RET_VOID,   0,              NULL },
    10591059    { "REMR3NotifyInterruptClear",              (void *)&pfnREMR3NotifyInterruptClear,              &g_aArgsVMandVMCPU[0],                      RT_ELEMENTS(g_aArgsVMandVMCPU),                        REMFNDESC_FLAGS_RET_VOID,   0,              NULL },
    1060     { "REMR3NotifyTimerPending",                (void *)&pfnREMR3NotifyTimerPending,                &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_VOID,   0,              NULL },
     1060    { "REMR3NotifyTimerPending",                (void *)&pfnREMR3NotifyTimerPending,                &g_aArgsVMandVMCPU[0],                      RT_ELEMENTS(g_aArgsVMandVMCPU),                        REMFNDESC_FLAGS_RET_VOID,   0,              NULL },
    10611061    { "REMR3NotifyDmaPending",                  (void *)&pfnREMR3NotifyDmaPending,                  &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_VOID,   0,              NULL },
    10621062    { "REMR3NotifyQueuePending",                (void *)&pfnREMR3NotifyQueuePending,                &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_VOID,   0,              NULL },
     
    11661166    { "TMNotifyEndOfExecution",                 (void *)(uintptr_t)&TMNotifyEndOfExecution,         &g_aArgsVMCPU[0],                           RT_ELEMENTS(g_aArgsVMCPU),                             REMFNDESC_FLAGS_RET_VOID,   0,                  NULL },
    11671167    { "TMNotifyStartOfExecution",               (void *)(uintptr_t)&TMNotifyStartOfExecution,       &g_aArgsVMCPU[0],                           RT_ELEMENTS(g_aArgsVMCPU),                             REMFNDESC_FLAGS_RET_VOID,   0,                  NULL },
    1168     { "TMTimerPoll",                            (void *)(uintptr_t)&TMTimerPoll,                    &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_INT,    sizeof(uint64_t),   NULL },
     1168    { "TMTimerPoll",                            (void *)(uintptr_t)&TMTimerPoll,                    &g_aArgsVMandVMCPU[0],                      RT_ELEMENTS(g_aArgsVMandVMCPU),                        REMFNDESC_FLAGS_RET_INT,    sizeof(uint64_t),   NULL },
    11691169    { "TMR3TimerQueuesDo",                      (void *)(uintptr_t)&TMR3TimerQueuesDo,              &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_VOID,   0,                  NULL },
    11701170    { "TMVirtualPause",                         (void *)(uintptr_t)&TMVirtualPause,                 &g_aArgsVM[0],                              RT_ELEMENTS(g_aArgsVM),                                REMFNDESC_FLAGS_RET_INT,    sizeof(int),        NULL },
     
    22802280}
    22812281
    2282 REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
     2282REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
    22832283{
    22842284#ifndef USE_REM_STUBS
    22852285    Assert(VALID_PTR(pfnREMR3NotifyTimerPending));
    2286     pfnREMR3NotifyTimerPending(pVM);
     2286    pfnREMR3NotifyTimerPending(pVM, pVCpuDst);
    22872287#endif
    22882288}
  • trunk/src/recompiler/VBoxRecompiler.c

    r19639 r19660  
    16501650{
    16511651    LogFlow(("remR3TimersRun:\n"));
     1652    LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
    16521653    remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
    16531654    remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
     
    38363837 *
    38373838 * @param   pVM             VM Handle.
     3839 * @param   pVCpuDst        The target cpu for this notification.
     3840 *                          TM will not broadcast pending timer events, but use
     3841 *                          a decidated EMT for them. So, only interrupt REM
     3842 *                          execution if the given CPU is executing in REM.
    38383843 * @thread  Any.
    38393844 */
    3840 REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
     3845REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
    38413846{
    38423847#ifndef DEBUG_bird
     
    38453850    if (pVM->rem.s.fInREM)
    38463851    {
    3847         ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
    3848                        CPU_INTERRUPT_EXTERNAL_TIMER);
    3849     }
     3852        if (pVM->rem.s.Env.pVCpu == pVCpuDst)
     3853        {
     3854            LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
     3855            ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
     3856                           CPU_INTERRUPT_EXTERNAL_TIMER);
     3857        }
     3858        else
     3859            LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
     3860    }
     3861    else
     3862        LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
    38503863}
    38513864
  • trunk/src/recompiler/cpu-exec.c

    r19369 r19660  
    525525        }
    526526#ifdef VBOX_HIGH_RES_TIMERS_HACK
    527         /* NULL the current_tb here so cpu_interrupt() doesn't do
    528            anything unnecessary (like crashing during emulate single instruction). */
     527        /* NULL the current_tb here so cpu_interrupt() doesn't do anything
     528           unnecessary (like crashing during emulate single instruction).
     529           Note! Don't use env1->pVM here, the code wouldn't run with
     530                 gcc-4.4/amd64 anymore, see #3883. */
    529531        env->current_tb = NULL;
    530         /* don't use env1->pVM here, the code wouldn't run with gcc-4.4/amd64
    531          * anymore, see #3883 */
    532         TMTimerPoll(env->pVM);
     532        if (    !(env->interrupt_request & (  CPU_INTERRUPT_EXIT | CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
     533                                            | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
     534            &&  (   (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
     535                 || !TMTimerPoll(env->pVM, env->pVCpu)) ) {
     536            ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
     537            remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
     538            TMR3TimerQueuesDo(env->pVM);
     539            remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
     540        }
    533541#endif
    534542    } /* for(;;) */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette