VirtualBox

Changeset 19660 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
May 13, 2009 2:09:15 PM (16 years ago)
Author:
vboxsync
Message:

TM+affected: SMP changes in progress.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r19538 r19660  
    188188        tmUnlock(pVM);
    189189    }
    190     else if (!VM_FF_ISSET(pVM, VM_FF_TIMER))  /**@todo only do this when arming the timer. */
    191     {
    192         STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
    193         VM_FF_SET(pVM, VM_FF_TIMER);
     190    else
     191    {
     192        /** @todo FIXME: don't use FF for scheduling! */
     193        PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     194        if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))  /**@todo only do this when arming the timer. */
     195        {
     196            Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
     197            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    194198#ifdef IN_RING3
    195         REMR3NotifyTimerPending(pVM);
    196         VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
    197 #endif
     199            REMR3NotifyTimerPending(pVM, pVCpuDst);
     200            VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
     201#endif
     202            STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
     203        }
    198204    }
    199205}
     
    268274 * This function is called before FFs are checked in the inner execution EM loops.
    269275 *
    270  * @returns Virtual timer ticks to the next event.
     276 * @returns Virtual timer ticks to the next event. (I.e. 0 means that an timer
     277 *          has expired or some important rescheduling is pending.)
    271278 * @param   pVM         Pointer to the shared VM structure.
     279 * @param   pVCpu       Pointer to the shared VMCPU structure of the caller.
    272280 * @thread  The emulation thread.
    273281 */
    274 VMMDECL(uint64_t) TMTimerPoll(PVM pVM)
    275 {
    276     int rc = tmLock(pVM); /* play safe for now */
     282VMMDECL(uint64_t) TMTimerPoll(PVM pVM, PVMCPU pVCpu)
     283{
     284    static const uint64_t   s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
     285    PVMCPU  pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     286    STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
    277287
    278288    /*
    279      * Return straight away if the timer FF is already set.
     289     * Return straight away if the timer FF is already set ...
    280290     */
    281     if (VM_FF_ISSET(pVM, VM_FF_TIMER))
     291    if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
    282292    {
    283293        STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
    284 #ifndef IN_RING3
    285         if (RT_SUCCESS(rc))
    286 #endif
    287             tmUnlock(pVM);
    288         return 0;
     294        return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
     295    }
     296
     297    /*
     298     * ... or if timers are being run.
     299     */
     300    if (pVM->tm.s.fRunningQueues)
     301    {
     302        STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
     303        return s_u64OtherRet;
    289304    }
    290305
     
    292307     * Get current time and check the expire times of the two relevant queues.
    293308     */
    294     const uint64_t u64Now = TMVirtualGet(pVM);
     309    int             rc     = tmLock(pVM); /** @todo FIXME: Stop playing safe here... */
     310    const uint64_t  u64Now = TMVirtualGetNoCheck(pVM);
    295311
    296312    /*
    297313     * TMCLOCK_VIRTUAL
    298314     */
    299     const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
    300     const int64_t i64Delta1 = u64Expire1 - u64Now;
     315    const uint64_t  u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
     316    const int64_t   i64Delta1 = u64Expire1 - u64Now;
    301317    if (i64Delta1 <= 0)
    302318    {
     
    307323#endif
    308324            tmUnlock(pVM);
    309         VM_FF_SET(pVM, VM_FF_TIMER);
     325        Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     326        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    310327#ifdef IN_RING3
    311         REMR3NotifyTimerPending(pVM);
    312 #endif
    313         return 0;
     328        REMR3NotifyTimerPending(pVM, pVCpuDst);
     329#endif
     330        return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
    314331    }
    315332
     
    345362    if (i64Delta2 <= 0)
    346363    {
     364        if (    !pVM->tm.s.fRunningQueues
     365            &&  !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
     366        {
     367            Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     368            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     369#ifdef IN_RING3
     370            REMR3NotifyTimerPending(pVM, pVCpuDst);
     371#endif
     372        }
    347373        STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
    348374#ifndef IN_RING3
     
    351377            tmUnlock(pVM);
    352378        LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
    353         VM_FF_SET(pVM, VM_FF_TIMER);
    354 #ifdef IN_RING3
    355         REMR3NotifyTimerPending(pVM);
    356 #endif
    357         return 0;
     379        return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
    358380    }
    359381    if (pVM->tm.s.fVirtualSyncCatchUp)
     
    380402 *          0 if the next event has already expired.
    381403 * @param   pVM         Pointer to the shared VM structure.
    382  * @param   pVM         Pointer to the shared VM structure.
     404 * @param   pVCpu       Pointer to the shared VMCPU structure of the caller.
    383405 * @param   pu64Delta   Where to store the delta.
    384406 * @thread  The emulation thread.
    385407 */
    386 VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, uint64_t *pu64Delta)
    387 {
    388     int rc = tmLock(pVM); /* play safe for now. */
     408VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
     409{
     410    static const uint64_t   s_u64OtherRet = 500000000; /* 500 million GIP ticks for non-timer EMTs. */
     411    PVMCPU                  pVCpuDst      = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     412    const uint64_t          u64Now        = TMVirtualGetNoCheck(pVM);
     413    STAM_COUNTER_INC(&pVM->tm.s.StatPollGIP);
    389414
    390415    /*
    391      * Return straight away if the timer FF is already set.
     416     * Return straight away if the timer FF is already set ...
    392417     */
    393     if (VM_FF_ISSET(pVM, VM_FF_TIMER))
    394     {
    395         STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
    396 #ifndef IN_RING3
    397         if (RT_SUCCESS(rc))
    398 #endif
    399             tmUnlock(pVM);
    400         *pu64Delta = 0;
    401         return 0;
     418    if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
     419    {
     420        STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPAlreadySet);
     421        if (pVCpuDst == pVCpu)
     422        {
     423            *pu64Delta = 0;
     424            return 0;
     425        }
     426        *pu64Delta = s_u64OtherRet;
     427        return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    402428    }
    403429
    404430    /*
    405      * Get current time and check the expire times of the two relevant queues.
     431     * ... or if timers are being run.
    406432     */
    407     const uint64_t  u64Now = TMVirtualGet(pVM);
     433    if (pVM->tm.s.fRunningQueues)
     434    {
     435        STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPRunning);
     436        *pu64Delta = s_u64OtherRet;
     437        return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
     438    }
     439
     440    int rc = tmLock(pVM); /** @todo FIXME: Stop playin safe... */
    408441
    409442    /*
    410      * TMCLOCK_VIRTUAL
     443     * Check for TMCLOCK_VIRTUAL expiration.
    411444     */
    412445    const uint64_t  u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
     
    414447    if (i64Delta1 <= 0)
    415448    {
    416         STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual);
     449        STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtual);
     450        Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     451        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     452#ifdef IN_RING3
     453        REMR3NotifyTimerPending(pVM, pVCpuDst);
     454#endif
    417455#ifndef IN_RING3
    418456        if (RT_SUCCESS(rc))
     
    420458            tmUnlock(pVM);
    421459        LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
    422         VM_FF_SET(pVM, VM_FF_TIMER);
    423 #ifdef IN_RING3
    424         REMR3NotifyTimerPending(pVM);
    425 #endif
    426         *pu64Delta = 0;
    427         return 0;
     460        if (pVCpuDst == pVCpu)
     461        {
     462            *pu64Delta = 0;
     463            return 0;
     464        }
     465        *pu64Delta = s_u64OtherRet;
     466        return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    428467    }
    429468
    430469    /*
    431      * TMCLOCK_VIRTUAL_SYNC
     470     * Check for TMCLOCK_VIRTUAL_SYNC expiration.
    432471     * This isn't quite as stright forward if in a catch-up, not only do
    433472     * we have to adjust the 'now' but when have to adjust the delta as well.
     
    460499    if (i64Delta2 <= 0)
    461500    {
    462         STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
     501        if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TIMER))
     502        {
     503            Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
     504            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER /** @todo poke */);
     505#ifdef IN_RING3
     506            REMR3NotifyTimerPending(pVM, pVCpuDst);
     507#endif
     508        }
     509        STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtualSync);
     510
    463511#ifndef IN_RING3
    464512        if (RT_SUCCESS(rc))
     
    466514            tmUnlock(pVM);
    467515        LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
    468         VM_FF_SET(pVM, VM_FF_TIMER);
    469 #ifdef IN_RING3
    470         REMR3NotifyTimerPending(pVM);
    471 #endif
    472         *pu64Delta = 0;
    473         return 0;
     516        if (pVCpuDst == pVCpu)
     517        {
     518            *pu64Delta = 0;
     519            return 0;
     520        }
     521        *pu64Delta = s_u64OtherRet;
     522        return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    474523    }
    475524    if (pVM->tm.s.fVirtualSyncCatchUp)
    476525        i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
    477526
    478     /*
    479      * Return the GIP time of the next event.
    480      * This is the reverse of what tmVirtualGetRaw is doing.
    481      */
    482     STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
    483     uint64_t u64GipTime = RT_MIN(i64Delta1, i64Delta2);
    484     *pu64Delta = u64GipTime;
    485     u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset;
    486     if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive))
    487     {
    488         u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */
    489         u64GipTime *= 100;
    490         u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage;
    491         u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart;
    492     }
    493 
     527    uint64_t u64GipTime;
     528    if (pVCpuDst == pVCpu)
     529    {
     530        /*
     531         * Return the GIP time of the next event.
     532         * This is the reverse of what tmVirtualGetRaw is doing.
     533         */
     534        STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPMiss);
     535        u64GipTime = RT_MIN(i64Delta1, i64Delta2);
     536        *pu64Delta = u64GipTime;
     537        u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset;
     538        if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive))
     539        {
     540            u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */
     541            u64GipTime *= 100;
     542            u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage;
     543            u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart;
     544        }
     545    }
     546    else
     547    {
     548        *pu64Delta = s_u64OtherRet;
     549        u64GipTime = u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
     550    }
    494551#ifndef IN_RING3
    495552    if (RT_SUCCESS(rc))
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r19500 r19660  
    330330         * Use the chance to check for expired timers.
    331331         */
    332         if (    fCheckTimers
    333             &&  !VM_FF_ISSET(pVM, VM_FF_TIMER)
    334             &&  !pVM->tm.s.fRunningQueues
    335             &&  (   pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
    336                  || (   pVM->tm.s.fVirtualSyncTicking
    337                      && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
     332        if (fCheckTimers)
     333        {
     334            PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     335            if (    !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
     336                &&  !pVM->tm.s.fRunningQueues
     337                &&  (   pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
     338                     || (   pVM->tm.s.fVirtualSyncTicking
     339                         && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
     340                        )
    338341                    )
    339                 )
    340            )
    341         {
    342             VM_FF_SET(pVM, VM_FF_TIMER);
    343             STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
     342                &&  !pVM->tm.s.fRunningQueues
     343               )
     344            {
     345                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
     346                Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     347                VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    344348#ifdef IN_RING3
    345             REMR3NotifyTimerPending(pVM);
    346             VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
    347 #endif
     349                REMR3NotifyTimerPending(pVM, pVCpuDst);
     350                VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
     351#endif
     352            }
    348353        }
    349354    }
     
    372377
    373378/**
    374  * Gets the current TMCLOCK_VIRTUAL time
     379 * Gets the current TMCLOCK_VIRTUAL time without checking
     380 * timers or anything.
     381 *
     382 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
    375383 *
    376384 * @returns The timestamp.
    377  * @param   pVM             VM handle.
    378  * @param   fCheckTimers    Check timers or not
    379  *
    380  * @remark  While the flow of time will never go backwards, the speed of the
    381  *          progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
    382  *          influenced by power saving (SpeedStep, PowerNow!), while the former
    383  *          makes use of TSC and kernel timers.
    384  */
    385 VMMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers)
    386 {
    387     return tmVirtualGet(pVM, fCheckTimers);
     385 * @param   pVM     VM handle.
     386 *
     387 * @remarks See TMVirtualGet.
     388 */
     389VMMDECL(uint64_t) TMVirtualGetNoCheck(PVM pVM)
     390{
     391    return tmVirtualGet(pVM, false /*fCheckTimers*/);
    388392}
    389393
     
    397401 * @thread  EMT.
    398402 */
    399 VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
    400 {
    401     uint64_t u64;
     403DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
     404{
     405    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);
     406    uint64_t    u64;
     407
    402408    if (pVM->tm.s.fVirtualSyncTicking)
    403409    {
    404         STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);
     410        PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
    405411
    406412        /*
     
    409415        Assert(pVM->tm.s.cVirtualTicking);
    410416        u64 = tmVirtualGetRaw(pVM);
    411         if (    fCheckTimers
    412             &&  !VM_FF_ISSET(pVM, VM_FF_TIMER)
    413             &&  pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
     417        if (fCheckTimers)
    414418        {
    415             VM_FF_SET(pVM, VM_FF_TIMER);
     419            if (    !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
     420                &&  pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
     421            {
     422                Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
     423                VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    416424#ifdef IN_RING3
    417             REMR3NotifyTimerPending(pVM);
    418             VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
    419 #endif
    420             STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
     425                REMR3NotifyTimerPending(pVM, pVCpuDst);
     426                VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
     427#endif
     428                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
     429            }
    421430        }
    422431
     
    490499        {
    491500            u64 = u64Expire;
    492             int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */
     501            int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. FIXME */
    493502            if (RT_SUCCESS(rc))
    494503            {
    495504                ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64);
    496505                ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
     506                VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
    497507                tmUnlock(pVM);
    498508            }
    499509            if (    fCheckTimers
    500                 &&  !VM_FF_ISSET(pVM, VM_FF_TIMER))
     510                &&  !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
    501511            {
    502                 VM_FF_SET(pVM, VM_FF_TIMER);
     512                Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     513                VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    503514#ifdef IN_RING3
    504                 REMR3NotifyTimerPending(pVM);
    505                 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
     515                REMR3NotifyTimerPending(pVM, pVCpuDst);
     516                VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
    506517#endif
    507518                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
     
    516527        u64 = pVM->tm.s.u64VirtualSync;
    517528
    518         /*
    519          * If it looks like a halt caused by pending timers, make sure the FF is raised.
    520          * This is a safeguard against timer queue runner leaving the virtual sync clock stopped.
    521          */
    522         if (    fCheckTimers
    523             &&  pVM->tm.s.cVirtualTicking
    524             &&  !VM_FF_ISSET(pVM, VM_FF_TIMER))
    525         {
    526             const uint64_t u64Expire = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
    527             if (u64 >= u64Expire)
    528             {
    529                 VM_FF_SET(pVM, VM_FF_TIMER);
    530 #ifdef IN_RING3
    531                 REMR3NotifyTimerPending(pVM);
    532                 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
    533 #endif
    534                 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
    535                 Log4(("TM: %RU64/%RU64: exp tmr=>ff (!)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
    536             }
    537         }
    538     }
     529    }
     530
    539531    return u64;
    540532}
     
    547539 * @param   pVM             VM handle.
    548540 * @thread  EMT.
     541 * @remarks May set the timer and virtual sync FFs.
    549542 */
    550543VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM)
    551544{
    552     return TMVirtualSyncGetEx(pVM, true /* check timers */);
     545    return tmVirtualSyncGetEx(pVM, true /* check timers */);
     546}
     547
     548
     549/**
     550 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
     551 * TMCLOCK_VIRTUAL.
     552 *
     553 * @returns The timestamp.
     554 * @param   pVM             VM handle.
     555 * @thread  EMT.
     556 * @remarks May set the timer and virtual sync FFs.
     557 */
     558VMMDECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM)
     559{
     560    return tmVirtualSyncGetEx(pVM, false /* check timers */);
     561}
     562
     563
     564/**
     565 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
     566 *
     567 * @returns The timestamp.
     568 * @param   pVM     VM handle.
     569 * @param   fCheckTimers    Check timers on the virtual clock or not.
     570 * @thread  EMT.
     571 * @remarks May set the timer and virtual sync FFs.
     572 */
     573VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
     574{
     575    return tmVirtualSyncGetEx(pVM, fCheckTimers);
    553576}
    554577
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette