VirtualBox

Changeset 19709 in vbox for trunk/src


Ignore:
Timestamp:
May 14, 2009 5:59:34 PM (16 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
47324
Message:

TM,EM: More TM/SMP work, still stuff in progress.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/EM.cpp

    r19682 r19709  
    35663566        }
    35673567
     3568        /*
     3569         * If the virtual sync clock is still stopped, make TM restart it.
     3570         */
     3571        if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
     3572            TMR3VirtualSyncFF(pVM, pVCpu);
     3573
    35683574#ifdef DEBUG
    35693575        /*
     
    35763582            return VINF_EM_SUSPEND;
    35773583        }
    3578 
    35793584#endif
    3580         if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
    3581         {
    3582             /** @todo FIXME */
    3583         }
    35843585
    35853586        /* check that we got them all  */
  • trunk/src/VBox/VMM/TM.cpp

    r19669 r19709  
    270270
    271271    /*
    272      * Init the lock.
     272     * Init the locks.
    273273     */
    274274    rc = PDMR3CritSectInit(pVM, &pVM->tm.s.EmtLock, "TM EMT Lock");
     275    if (RT_FAILURE(rc))
     276        return rc;
     277    rc = PDMR3CritSectInit(pVM, &pVM->tm.s.VirtualSyncLock, "TM VirtualSync Lock");
    275278    if (RT_FAILURE(rc))
    276279        return rc;
     
    550553    STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cUpdateRaces,STAMTYPE_U32, "/TM/GC/cUpdateRaces",                 STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
    551554    STAM_REG(pVM, &pVM->tm.s.StatDoQueues,                            STAMTYPE_PROFILE, "/TM/DoQueues",                    STAMUNIT_TICKS_PER_CALL, "Profiling timer TMR3TimerQueuesDo.");
    552     STAM_REG(pVM, &pVM->tm.s.StatDoQueuesSchedule,                STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Schedule",           STAMUNIT_TICKS_PER_CALL, "The scheduling part.");
    553     STAM_REG(pVM, &pVM->tm.s.StatDoQueuesRun,                     STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Run",                STAMUNIT_TICKS_PER_CALL, "The run part.");
     555    STAM_REG(pVM, &pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL],      STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Virtual",            STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual clock queue.");
     556    STAM_REG(pVM, &pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL_SYNC], STAMTYPE_PROFILE_ADV, "/TM/DoQueues/VirtualSync",        STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual sync clock queue.");
     557    STAM_REG(pVM, &pVM->tm.s.aStatDoQueues[TMCLOCK_REAL],         STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Real",               STAMUNIT_TICKS_PER_CALL, "Time spent on the real clock queue.");
    554558
    555559    STAM_REG(pVM, &pVM->tm.s.StatPoll,                                STAMTYPE_COUNTER, "/TM/Poll",                            STAMUNIT_OCCURENCES, "TMTimerPoll calls.");
     
    602606    STAM_REG(pVM, (void *)&pVM->tm.s.fVirtualSyncCatchUp,                  STAMTYPE_U8, "/TM/VirtualSync/CatchUpActive",             STAMUNIT_NONE, "Catch-Up active indicator.");
    603607    STAM_REG(pVM, (void *)&pVM->tm.s.u32VirtualSyncCatchUpPercentage,     STAMTYPE_U32, "/TM/VirtualSync/CatchUpPercentage",          STAMUNIT_PCT, "The catch-up percentage. (+100/100 to get clock multiplier)");
     608    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncFF,                       STAMTYPE_PROFILE, "/TM/VirtualSync/FF",         STAMUNIT_TICKS_PER_OCCURENCE, "Time spent in TMR3VirtualSyncFF by all but the dedicate timer EMT.");
    604609    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUp,                   STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUp",              STAMUNIT_OCCURENCES, "Times the catch-up was abandoned.");
    605610    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting,     STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUpBeforeStarting",STAMUNIT_OCCURENCES, "Times the catch-up was abandoned before even starting. (Typically debugging++.)");
     
    16921697    /*
    16931698     * Only the dedicated timer EMT should do stuff here.
    1694      *
    1695      * The lock isn't really necessary any longer, but it might come
    1696      * in handy when dealing VM_FF_TM_VIRTUAL_SYNC later.
     1699     * (fRunningQueues is only used as an indicator.)
    16971700     */
    16981701    Assert(pVM->tm.s.idTimerCpu < pVM->cCPUs);
    16991702    PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
    1700     if (    VMMGetCpu(pVM) != pVCpuDst
    1701         ||  ASMBitTestAndSet(&pVM->tm.s.fRunningQueues, 0))
     1703    if (VMMGetCpu(pVM) != pVCpuDst)
    17021704    {
    17031705        Assert(pVM->cCPUs > 1);
    17041706        return;
    17051707    }
    1706 
    17071708    STAM_PROFILE_START(&pVM->tm.s.StatDoQueues, a);
    17081709    Log2(("TMR3TimerQueuesDo:\n"));
     1710    Assert(!pVM->tm.s.fRunningQueues);
     1711    ASMAtomicWriteBool(&pVM->tm.s.fRunningQueues, true);
    17091712    tmLock(pVM);
    17101713
    17111714    /*
    1712      * Clear the FF before processing the queues but after obtaining the lock.
    1713      */
    1714     VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER);
    1715 
    1716     /*
    17171715     * Process the queues.
    17181716     */
    17191717    AssertCompile(TMCLOCK_MAX == 4);
    17201718
    1721     /* TMCLOCK_VIRTUAL_SYNC */
    1722     STAM_PROFILE_ADV_START(&pVM->tm.s.StatDoQueuesSchedule, s1);
    1723     tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC]);
    1724     STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s1);
    1725     STAM_PROFILE_ADV_START(&pVM->tm.s.StatDoQueuesRun, r1);
     1719    /* TMCLOCK_VIRTUAL_SYNC (see also TMR3VirtualSyncFF) */
     1720    STAM_PROFILE_ADV_START(&pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL_SYNC], s1);
     1721    tmVirtualSyncLock(pVM);
     1722    ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, true);
     1723    VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER);   /* Clear the FF once we started working for real. */
     1724
     1725    if (pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule)
     1726        tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC]);
    17261727    tmR3TimerQueueRunVirtualSync(pVM);
    1727     STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r1);
    17281728    if (pVM->tm.s.fVirtualSyncTicking) /** @todo move into tmR3TimerQueueRunVirtualSync - FIXME */
    17291729        VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
    17301730
     1731    ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, false);
     1732    tmVirtualSyncUnlock(pVM);
     1733    STAM_PROFILE_ADV_STOP(&pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL_SYNC], s1);
     1734
    17311735    /* TMCLOCK_VIRTUAL */
    1732     STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s1);
    1733     tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL]);
    1734     STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s2);
    1735     STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r1);
     1736    STAM_PROFILE_ADV_START(&pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL], s2);
     1737    if (pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].offSchedule)
     1738        tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL]);
    17361739    tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL]);
    1737     STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r2);
     1740    STAM_PROFILE_ADV_STOP(&pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL], s2);
    17381741
    17391742    /* TMCLOCK_TSC */
     
    17411744
    17421745    /* TMCLOCK_REAL */
    1743     STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s2);
    1744     tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL]);
    1745     STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesSchedule, s3);
    1746     STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r2);
     1746    STAM_PROFILE_ADV_START(&pVM->tm.s.aStatDoQueues[TMCLOCK_REAL], s3);
     1747    if (pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].offSchedule)
     1748        tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL]);
    17471749    tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL]);
    1748     STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesRun, r3);
     1750    STAM_PROFILE_ADV_STOP(&pVM->tm.s.aStatDoQueues[TMCLOCK_REAL], s3);
    17491751
    17501752#ifdef VBOX_STRICT
     
    17531755#endif
    17541756
     1757    /* done */
    17551758    Log2(("TMR3TimerQueuesDo: returns void\n"));
     1759    ASMAtomicWriteBool(&pVM->tm.s.fRunningQueues, false);
     1760    tmUnlock(pVM);
    17561761    STAM_PROFILE_STOP(&pVM->tm.s.StatDoQueues, a);
    1757 
    1758     /* done */
    1759     tmUnlock(pVM);
    1760     ASMAtomicBitClear(&pVM->tm.s.fRunningQueues, 0);
    17611762}
    17621763
     
    21202121
    21212122/**
     2123 * Deals with stopped Virtual Sync clock.
     2124 *
     2125 * This is called by the forced action flag handling code in EM when it
     2126 * encounters the VM_FF_TM_VIRTUAL_SYNC flag. It is called by all VCPUs and they
     2127 * will block on the VirtualSyncLock until the pending timers has been executed
     2128 * and the clock restarted.
     2129 *
     2130 * @param   pVM             The VM to run the timers for.
     2131 * @param   pVCpu           The virtual CPU we're running at.
     2132 *
     2133 * @thread  EMTs
     2134 */
     2135VMMR3DECL(void) TMR3VirtualSyncFF(PVM pVM, PVMCPU pVCpu)
     2136{
     2137    Log2(("TMR3VirtualSyncFF:\n"));
     2138
     2139    /*
     2140     * The EMT doing the timers is diverted to them.
     2141     */
     2142    if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
     2143        TMR3TimerQueuesDo(pVM);
     2144    /*
     2145     * The other EMTs will block on the virtual sync lock and the first owner
     2146     * will run the queue and thus restarting the clock.
     2147     *
     2148     * Note! This is very suboptimal code wrt to resuming execution when there
     2149     *       are more than two Virtual CPUs, since they will all have to enter
     2150     *       the critical section one by one. But it's a very simple solution
     2151     *       which will have to do the job for now.
     2152     */
     2153    else
     2154    {
     2155        STAM_PROFILE_START(&pVM->tm.s.StatVirtualSyncFF, a);
     2156        tmVirtualSyncLock(pVM);
     2157        if (pVM->tm.s.fVirtualSyncTicking)
     2158        {
     2159            STAM_PROFILE_STOP(&pVM->tm.s.StatVirtualSyncFF, a); /* before the unlock! */
     2160            tmVirtualSyncUnlock(pVM);
     2161            Log2(("TMR3VirtualSyncFF: ticking\n"));
     2162        }
     2163        else
     2164        {
     2165            tmVirtualSyncUnlock(pVM);
     2166
     2167            /* try run it. */
     2168            tmLock(pVM);
     2169            tmVirtualSyncLock(pVM);
     2170            if (pVM->tm.s.fVirtualSyncTicking)
     2171                Log2(("TMR3VirtualSyncFF: ticking (2)\n"));
     2172            else
     2173            {
     2174                ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, true);
     2175                Log2(("TMR3VirtualSyncFF: running queue\n"));
     2176
     2177                if (pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule)
     2178                    tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC]);
     2179                tmR3TimerQueueRunVirtualSync(pVM);
     2180                if (pVM->tm.s.fVirtualSyncTicking) /** @todo move into tmR3TimerQueueRunVirtualSync - FIXME */
     2181                    VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
     2182
     2183                ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, false);
     2184            }
     2185            STAM_PROFILE_STOP(&pVM->tm.s.StatVirtualSyncFF, a); /* before the unlock! */
     2186            tmVirtualSyncUnlock(pVM);
     2187            tmUnlock(pVM);
     2188        }
     2189    }
     2190}
     2191
     2192
     2193/**
    21222194 * Saves the state of a timer to a saved state.
    21232195 *
  • trunk/src/VBox/VMM/TMInternal.h

    r19660 r19709  
    9696    TMTIMERSTATE_FREE
    9797} TMTIMERSTATE;
     98
     99/** Predicate that returns true if the give state is pending scheduling or
     100 *  rescheduling of any kind. Will reference the argument more than once! */
     101#define TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState) \
     102    (   (enmState) <= TMTIMERSTATE_PENDING_RESCHEDULE \
     103     && (enmState) >= TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE)
    98104
    99105
     
    422428    uint32_t                    u32TimerMillies;
    423429
    424     /** Makes sure only one EMT is running the queues. */
     430    /** Indicates that queues are being run. */
    425431    bool volatile               fRunningQueues;
     432    /** Indicates that the virtual sync queue is being run. */
     433    bool volatile               fRunningVirtualSyncQueue;
    426434
    427435    /** Lock serializing EMT access to TM. */
    428436    PDMCRITSECT                 EmtLock;
     437    /** Lock serializing access to the VirtualSync clock. */
     438    PDMCRITSECT                 VirtualSyncLock;
    429439
    430440    /** TMR3TimerQueuesDo
    431441     * @{ */
    432442    STAMPROFILE                 StatDoQueues;
    433     STAMPROFILEADV              StatDoQueuesSchedule;
    434     STAMPROFILEADV              StatDoQueuesRun;
     443    STAMPROFILEADV              aStatDoQueues[TMCLOCK_MAX];
    435444    /** @} */
    436445    /** tmSchedule
     
    492501    STAMCOUNTER                 aStatVirtualSyncCatchupAdjust[TM_MAX_CATCHUP_PERIODS];
    493502    /** @} */
     503    /** TMR3VirtualSyncFF (non dedicated EMT). */
     504    STAMPROFILE                 StatVirtualSyncFF;
    494505    /** The timer callback. */
    495506    STAMCOUNTER                 StatTimerCallbackSetFF;
     
    536547
    537548#if 0 /* enable this to rule out locking bugs on single cpu guests. */
    538 # define tmLock(pVM)             VINF_SUCCESS
    539 # define tmTryLock(pVM)          VINF_SUCCESS
    540 # define tmUnlock(pVM)           ((void)0)
     549# define tmLock(pVM)                VINF_SUCCESS
     550# define tmTryLock(pVM)             VINF_SUCCESS
     551# define tmUnlock(pVM)              ((void)0)
     552# define tmVirtualSyncLock(pVM)     VINF_SUCCESS
     553# define tmVirtualSyncTryLock(pVM)  VINF_SUCCESS
     554# define tmVirtualSyncUnlock(pVM)   ((void)0)
    541555# define TM_ASSERT_EMT_LOCK(pVM) VM_ASSERT_EMT(pVM)
    542556#else
     
    546560/** Checks that the caller owns the EMT lock.  */
    547561#define TM_ASSERT_EMT_LOCK(pVM) Assert(PDMCritSectIsOwner(&pVM->tm.s.EmtLock))
     562int                     tmVirtualSyncLock(PVM pVM);
     563int                     tmVirtualSyncTryLock(PVM pVM);
     564void                    tmVirtualSyncUnlock(PVM pVM);
    548565#endif
    549566
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r19660 r19709  
    8080
    8181/**
    82  * Release EMT/TM lock.
     82 * Release the EMT/TM lock.
    8383 *
    8484 * @param   pVM         The VM handle.
     
    8787{
    8888    PDMCritSectLeave(&pVM->tm.s.EmtLock);
     89}
     90
     91
     92/**
     93 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
     94 *
     95 * @retval  VINF_SUCCESS on success (always in ring-3).
     96 * @retval  VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
     97 *
     98 * @param   pVM         The VM handle.
     99 */
     100int tmVirtualSyncLock(PVM pVM)
     101{
     102    VM_ASSERT_EMT(pVM);
     103    int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
     104    return rc;
     105}
     106
     107
     108/**
     109 * Try take the VirtualSync lock, no waiting.
     110 *
     111 * @retval  VINF_SUCCESS on success.
     112 * @retval  VERR_SEM_BUSY if busy.
     113 *
     114 * @param   pVM         The VM handle.
     115 */
     116int tmVirtualSyncTryLock(PVM pVM)
     117{
     118    VM_ASSERT_EMT(pVM);
     119    int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
     120    return rc;
     121}
     122
     123
     124/**
     125 * Release the VirtualSync lock.
     126 *
     127 * @param   pVM         The VM handle.
     128 */
     129void tmVirtualSyncUnlock(PVM pVM)
     130{
     131    PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
    89132}
    90133
     
    170213
    171214/**
     215 * Raise the timer force action flag and notify the dedicated timer EMT.
     216 *
     217 * @param   pVM         The VM handle.
     218 */
     219DECLINLINE(void) tmScheduleNotify(PVM pVM)
     220{
     221    PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     222    if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
     223    {
     224        Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
     225        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     226#ifdef IN_RING3
     227        REMR3NotifyTimerPending(pVM, pVCpuDst);
     228        VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
     229#endif
     230        STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
     231    }
     232}
     233
     234
     235/**
    172236 * Schedule the queue which was changed.
    173237 */
     
    178242        &&  RT_SUCCESS(tmTryLock(pVM)))
    179243    {
    180         STAM_PROFILE_START(&pVM->tm.s.CTXALLSUFF(StatScheduleOne), a);
    181         PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock];
     244        STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
    182245        Log3(("tmSchedule: tmTimerQueueSchedule\n"));
    183         tmTimerQueueSchedule(pVM, pQueue);
     246        tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
    184247#ifdef VBOX_STRICT
    185248        tmTimerQueuesSanityChecks(pVM, "tmSchedule");
     
    190253    else
    191254    {
    192         /** @todo FIXME: don't use FF for scheduling! */
    193         PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
    194         if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))  /**@todo only do this when arming the timer. */
    195         {
    196             Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
    197             VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    198 #ifdef IN_RING3
    199             REMR3NotifyTimerPending(pVM, pVCpuDst);
    200             VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
    201 #endif
    202             STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
    203         }
     255        TMTIMERSTATE enmState = pTimer->enmState;
     256        if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
     257            tmScheduleNotify(pVM);
    204258    }
    205259}
     
    231285 * @param   pQueue  The timer queue the timer belongs to.
    232286 * @param   pTimer  The timer.
     287 *
     288 * @todo    FIXME: Look into potential race with the thread running the queues
     289 *          and stuff.
    233290 */
    234291DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
     
    307364     * Get current time and check the expire times of the two relevant queues.
    308365     */
    309     int             rc     = tmLock(pVM); /** @todo FIXME: Stop playing safe here... */
    310366    const uint64_t  u64Now = TMVirtualGetNoCheck(pVM);
    311367
     
    319375        STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual);
    320376        LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
    321 #ifndef IN_RING3
    322         if (RT_SUCCESS(rc))
    323 #endif
    324             tmUnlock(pVM);
    325377        Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
    326378        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     
    336388     * we have to adjust the 'now' but when have to adjust the delta as well.
    337389     */
     390    int            rc         = tmVirtualSyncLock(pVM); /** @todo FIXME: Stop playing safe here... */
    338391    const uint64_t u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
    339392    uint64_t u64VirtualSyncNow;
     
    375428        if (RT_SUCCESS(rc))
    376429#endif
    377             tmUnlock(pVM);
     430            tmVirtualSyncUnlock(pVM);
    378431        LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
    379432        return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
     
    389442    if (RT_SUCCESS(rc))
    390443#endif
    391         tmUnlock(pVM);
     444        tmVirtualSyncUnlock(pVM);
    392445    return RT_MIN(i64Delta1, i64Delta2);
    393446}
     
    438491    }
    439492
    440     int rc = tmLock(pVM); /** @todo FIXME: Stop playin safe... */
    441 
    442493    /*
    443494     * Check for TMCLOCK_VIRTUAL expiration.
     
    453504        REMR3NotifyTimerPending(pVM, pVCpuDst);
    454505#endif
    455 #ifndef IN_RING3
    456         if (RT_SUCCESS(rc))
    457 #endif
    458             tmUnlock(pVM);
    459506        LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
    460507        if (pVCpuDst == pVCpu)
     
    472519     * we have to adjust the 'now' but when have to adjust the delta as well.
    473520     */
     521    int rc = tmVirtualSyncLock(pVM); /** @todo FIXME: Stop playin safe... */
     522
    474523    const uint64_t  u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
    475524    uint64_t        u64VirtualSyncNow;
     
    512561        if (RT_SUCCESS(rc))
    513562#endif
    514             tmUnlock(pVM);
     563            tmVirtualSyncUnlock(pVM);
    515564        LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
    516565        if (pVCpuDst == pVCpu)
     
    552601    if (RT_SUCCESS(rc))
    553602#endif
    554         tmUnlock(pVM);
     603        tmVirtualSyncUnlock(pVM);
    555604    return u64GipTime;
    556605}
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r19660 r19709  
    452452        if (pVM->tm.s.fVirtualSyncCatchUp)
    453453        {
    454             int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */
     454            int rc = tmVirtualSyncTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */
    455455
    456456            const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
     
    486486
    487487            if (RT_SUCCESS(rc))
    488                 tmUnlock(pVM);
     488                tmVirtualSyncUnlock(pVM);
    489489        }
    490490
     
    499499        {
    500500            u64 = u64Expire;
    501             int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. FIXME */
     501            int rc = tmVirtualSyncTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. FIXME */
    502502            if (RT_SUCCESS(rc))
    503503            {
     
    505505                ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
    506506                VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
    507                 tmUnlock(pVM);
     507                tmVirtualSyncUnlock(pVM);
    508508            }
    509509            if (    fCheckTimers
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette