VirtualBox

Changeset 19820 in vbox for trunk/src


Ignore:
Timestamp:
May 19, 2009 1:14:54 PM (16 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
47474
Message:

TM: Joined up the two poll functions and making TMTimerPollGIP lockless as well.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/TM.cpp

    r19810 r19820  
    567567    STAM_REG(pVM, &pVM->tm.s.StatPollVirtual,                         STAMTYPE_COUNTER, "/TM/Poll/HitsVirtual",                STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL queue.");
    568568    STAM_REG(pVM, &pVM->tm.s.StatPollVirtualSync,                     STAMTYPE_COUNTER, "/TM/Poll/HitsVirtualSync",            STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL_SYNC queue.");
    569 
    570     STAM_REG(pVM, &pVM->tm.s.StatPollGIP,                             STAMTYPE_COUNTER, "/TM/PollGIP",                         STAMUNIT_OCCURENCES, "TMTimerPollGIP calls.");
    571     STAM_REG(pVM, &pVM->tm.s.StatPollGIPAlreadySet,                   STAMTYPE_COUNTER, "/TM/PollGIP/AlreadySet",              STAMUNIT_OCCURENCES, "TMTimerPollGIP calls where the FF was already set.");
    572     STAM_REG(pVM, &pVM->tm.s.StatPollGIPVirtual,                      STAMTYPE_COUNTER, "/TM/PollGIP/HitsVirtual",             STAMUNIT_OCCURENCES, "The number of times TMTimerPollGIP found an expired TMCLOCK_VIRTUAL queue.");
    573     STAM_REG(pVM, &pVM->tm.s.StatPollGIPVirtualSync,                  STAMTYPE_COUNTER, "/TM/PollGIP/HitsVirtualSync",         STAMUNIT_OCCURENCES, "The number of times TMTimerPollGIP found an expired TMCLOCK_VIRTUAL_SYNC queue.");
    574     STAM_REG(pVM, &pVM->tm.s.StatPollGIPMiss,                         STAMTYPE_COUNTER, "/TM/PollGIP/Miss",                    STAMUNIT_OCCURENCES, "TMTimerPollGIP calls where nothing had expired.");
    575     STAM_REG(pVM, &pVM->tm.s.StatPollGIPRunning,                      STAMTYPE_COUNTER, "/TM/PollGIP/Running",                 STAMUNIT_OCCURENCES, "TMTimerPollGIP calls where the queues were being run.");
    576569
    577570    STAM_REG(pVM, &pVM->tm.s.StatPostponedR3,                         STAMTYPE_COUNTER, "/TM/PostponedR3",                     STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in ring-3.");
  • trunk/src/VBox/VMM/TMInternal.h

    r19810 r19820  
    477477    STAMCOUNTER                 StatPollVirtualSync;
    478478    /** @} */
    479     /** TMTimerPollGIP
    480      * @{ */
    481     STAMCOUNTER                 StatPollGIP;
    482     STAMCOUNTER                 StatPollGIPAlreadySet;
    483     STAMCOUNTER                 StatPollGIPVirtual;
    484     STAMCOUNTER                 StatPollGIPVirtualSync;
    485     STAMCOUNTER                 StatPollGIPMiss;
    486     STAMCOUNTER                 StatPollGIPRunning;
    487     /** @} */
    488479    /** TMTimerSet
    489480     * @{ */
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r19810 r19820  
    326326
    327327#ifdef VBOX_HIGH_RES_TIMERS_HACK
    328 /**
    329  * Set FF if we've passed the next virtual event.
     328
     329/**
     330 * Worker for tmTimerPollInternal that handles misses when the decidate timer
     331 * EMT is polling.
     332 *
     333 * @returns See tmTimerPollInternal.
     334 * @param   pVM                 Pointer to the shared VM structure.
     335 * @param   u64Now              Current virtual clock timestamp.
     336 * @param   u64Delta            The delta to the next even in ticks of the
     337 *                              virtual clock.
     338 * @param   pu64Delta           Where to return the delta.
     339 * @param   pCounter            The statistics counter to update.
     340 */
     341DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
     342{
     343    Assert(!(u64Delta & RT_BIT_64(63)));
     344
     345    if (!pVM->tm.s.fVirtualWarpDrive)
     346    {
     347        *pu64Delta = u64Delta;
     348        return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
     349    }
     350
     351    /*
     352     * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
     353     */
     354    uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
     355    uint32_t const u32Pct   = pVM->tm.s.u32VirtualWarpDrivePercentage;
     356
     357    uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
     358    u64GipTime -= u64Start; /* the start is GIP time. */
     359    if (u64GipTime >= u64Delta)
     360    {
     361        ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
     362        ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
     363    }
     364    else
     365    {
     366        u64Delta -= u64GipTime;
     367        ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
     368        u64Delta += u64GipTime;
     369    }
     370    *pu64Delta = u64Delta;
     371    u64GipTime += u64Start;
     372    return u64GipTime;
     373}
     374
     375
     376/**
     377 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
     378 * than the one dedicated to timer work.
     379 *
     380 * @returns See tmTimerPollInternal.
     381 * @param   pVM                 Pointer to the shared VM structure.
     382 * @param   u64Now              Current virtual clock timestamp.
     383 * @param   pu64Delta           Where to return the delta.
     384 */
     385DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
     386{
     387    static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
     388    *pu64Delta = s_u64OtherRet;
     389    return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
     390}
     391
     392
     393/**
     394 * Worker for tmTimerPollInternal.
     395 *
     396 * @returns See tmTimerPollInternal.
     397 * @param   pVM                 Pointer to the shared VM structure.
     398 * @param   pVCpu               Pointer to the shared VMCPU structure of the
     399 *                              caller.
     400 * @param   pVCpuDst            Pointer to the shared VMCPU structure of the
     401 *                              dedicated timer EMT.
     402 * @param   u64Now              Current virtual clock timestamp.
     403 * @param   pu64Delta           Where to return the delta.
     404 * @param   pCounter            The statistics counter to update.
     405 */
     406DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
     407                                                 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
     408{
     409    STAM_COUNTER_INC(pCounter);
     410    if (pVCpuDst != pVCpu)
     411        return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
     412    *pu64Delta = 0;
     413    return 0;
     414}
     415
     416/**
     417 * Common worker for TMTimerPollGIP and TMTimerPoll.
    330418 *
    331419 * This function is called before FFs are checked in the inner execution EM loops.
    332420 *
    333  * @returns Virtual timer ticks to the next event. (I.e. 0 means that an timer
    334  *          has expired or some important rescheduling is pending.)
     421 * @returns The GIP timestamp of the next event.
     422 *          0 if the next event has already expired.
     423 *
    335424 * @param   pVM         Pointer to the shared VM structure.
    336425 * @param   pVCpu       Pointer to the shared VMCPU structure of the caller.
     426 * @param   pu64Delta   Where to store the delta.
     427 *
    337428 * @thread  The emulation thread.
    338  */
    339 VMMDECL(uint64_t) TMTimerPoll(PVM pVM, PVMCPU pVCpu)
    340 {
    341     static const uint64_t   s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
    342     PVMCPU  pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     429 *
     430 * @remarks GIP uses ns ticks.
     431 */
     432DECLINLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
     433{
     434    PVMCPU                  pVCpuDst      = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     435    const uint64_t          u64Now        = TMVirtualGetNoCheck(pVM);
    343436    STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
    344437
     
    347440     */
    348441    if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
    349     {
    350         STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
    351         return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
    352     }
     442        return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
    353443
    354444    /*
    355445     * ... or if timers are being run.
    356446     */
    357     if (pVM->tm.s.fRunningQueues)
     447    if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
    358448    {
    359449        STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
    360         return s_u64OtherRet;
     450        return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
    361451    }
    362452
    363453    /*
    364      * Get current time and check the expire times of the two relevant queues.
     454     * Check for TMCLOCK_VIRTUAL expiration.
    365455     */
    366     const uint64_t  u64Now = TMVirtualGetNoCheck(pVM);
    367 
    368     /*
    369      * TMCLOCK_VIRTUAL
    370      */
    371     const uint64_t  u64Expire1 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
     456    const uint64_t  u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
    372457    const int64_t   i64Delta1  = u64Expire1 - u64Now;
    373458    if (i64Delta1 <= 0)
    374459    {
    375         LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
    376         if (    !pVM->tm.s.fRunningQueues
    377             &&  !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
     460        if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
    378461        {
    379462            Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     
    383466#endif
    384467        }
    385         STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual);
    386         return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
     468        LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
     469        return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
    387470    }
    388471
    389472    /*
    390      * TMCLOCK_VIRTUAL_SYNC
     473     * Check for TMCLOCK_VIRTUAL_SYNC expiration.
    391474     * This isn't quite as stright forward if in a catch-up, not only do
    392475     * we have to adjust the 'now' but when have to adjust the delta as well.
     
    409492            {
    410493                u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
    411                 if (u64VirtualSyncNow < u64Expire2)
     494                int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
     495                if (i64Delta2 > 0)
    412496                {
    413497                    STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
    414498                    STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
    415                     return pVCpu == pVCpuDst
    416                          ? RT_MIN(i64Delta1, (int64_t)(u64Expire2 - u64VirtualSyncNow))
    417                          : s_u64OtherRet;
     499
     500                    if (pVCpu == pVCpuDst)
     501                        return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
     502                    return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
    418503                }
    419504
     
    428513                }
    429514
    430                 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
    431515                STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
    432516                LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
    433                 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
     517                return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
    434518            }
    435519        }
     
    437521    else
    438522    {
    439         STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
    440523        STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
    441524        LogFlow(("TMTimerPoll: stopped\n"));
    442         return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
     525        return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
    443526    }
    444527
     
    497580        /* Repeat the initial checks before iterating. */
    498581        if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
    499         {
    500             STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
    501             return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
    502         }
     582            return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
    503583        if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
    504584        {
    505585            STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
    506             return s_u64OtherRet;
     586            return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
    507587        }
    508588        if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
    509589        {
    510             STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
    511590            LogFlow(("TMTimerPoll: stopped\n"));
    512             return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
     591            return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
    513592        }
    514593        if (cOuterTries <= 0)
     
    519598    u64VirtualSyncNow = u64Now - off;
    520599
     600    /* Calc delta and see if we've got a virtual sync hit. */
    521601    int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
    522602    if (i64Delta2 <= 0)
     
    533613        STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
    534614        LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
    535         return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
     615        return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
    536616    }
    537617
     
    544624        if (fCatchUp)
    545625            i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
    546         return RT_MIN(i64Delta1, i64Delta2);
    547     }
    548     return s_u64OtherRet;
     626        return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
     627    }
     628    return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
     629}
     630
     631
     632/**
     633 * Set FF if we've passed the next virtual event.
     634 *
     635 * This function is called before FFs are checked in the inner execution EM loops.
     636 *
     637 * @returns Virtual timer ticks to the next event. (I.e. 0 means that an timer
     638 *          has expired or some important rescheduling is pending.)
     639 * @param   pVM         Pointer to the shared VM structure.
     640 * @param   pVCpu       Pointer to the shared VMCPU structure of the caller.
     641 * @thread  The emulation thread.
     642 */
     643VMMDECL(uint64_t) TMTimerPoll(PVM pVM, PVMCPU pVCpu)
     644{
     645    AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
     646    uint64_t off = 0;
     647    tmTimerPollInternal(pVM, pVCpu, &off);
     648    return off;
    549649}
    550650
     
    564664VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
    565665{
    566     static const uint64_t   s_u64OtherRet = 500000000; /* 500 million GIP ticks for non-timer EMTs. */
    567     PVMCPU                  pVCpuDst      = &pVM->aCpus[pVM->tm.s.idTimerCpu];
    568     const uint64_t          u64Now        = TMVirtualGetNoCheck(pVM);
    569     STAM_COUNTER_INC(&pVM->tm.s.StatPollGIP);
    570 
    571     /*
    572      * Return straight away if the timer FF is already set ...
    573      */
    574     if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
    575     {
    576         STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPAlreadySet);
    577         if (pVCpuDst == pVCpu)
    578         {
    579             *pu64Delta = 0;
    580             return 0;
    581         }
    582         *pu64Delta = s_u64OtherRet;
    583         return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    584     }
    585 
    586     /*
    587      * ... or if timers are being run.
    588      */
    589     if (pVM->tm.s.fRunningQueues)
    590     {
    591         STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPRunning);
    592         *pu64Delta = s_u64OtherRet;
    593         return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    594     }
    595 
    596     /*
    597      * Check for TMCLOCK_VIRTUAL expiration.
    598      */
    599     const uint64_t  u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
    600     const int64_t   i64Delta1  = u64Expire1 - u64Now;
    601     if (i64Delta1 <= 0)
    602     {
    603         STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtual);
    604         Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
    605         VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    606 #ifdef IN_RING3
    607         REMR3NotifyTimerPending(pVM, pVCpuDst);
    608 #endif
    609         LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
    610         if (pVCpuDst == pVCpu)
    611         {
    612             *pu64Delta = 0;
    613             return 0;
    614         }
    615         *pu64Delta = s_u64OtherRet;
    616         return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    617     }
    618 
    619     /*
    620      * Check for TMCLOCK_VIRTUAL_SYNC expiration.
    621      * This isn't quite as stright forward if in a catch-up, not only do
    622      * we have to adjust the 'now' but when have to adjust the delta as well.
    623      */
    624     int rc = tmVirtualSyncLock(pVM); /** @todo FIXME: Stop playin safe... */
    625 
    626     const uint64_t  u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
    627     uint64_t        u64VirtualSyncNow;
    628     if (!pVM->tm.s.fVirtualSyncTicking)
    629         u64VirtualSyncNow = pVM->tm.s.u64VirtualSync;
    630     else
    631     {
    632         if (!pVM->tm.s.fVirtualSyncCatchUp)
    633             u64VirtualSyncNow = u64Now - pVM->tm.s.offVirtualSync;
    634         else
    635         {
    636             uint64_t off = pVM->tm.s.offVirtualSync;
    637             uint64_t u64Delta = u64Now - pVM->tm.s.u64VirtualSyncCatchUpPrev;
    638             if (RT_LIKELY(!(u64Delta >> 32)))
    639             {
    640                 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
    641                 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
    642                     off -= u64Sub;
    643                 else
    644                     off = pVM->tm.s.offVirtualSyncGivenUp;
    645             }
    646             u64VirtualSyncNow = u64Now - off;
    647         }
    648     }
    649 
    650     int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
    651     if (i64Delta2 <= 0)
    652     {
    653         if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TIMER))
    654         {
    655             Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
    656             VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER /** @todo poke */);
    657 #ifdef IN_RING3
    658             REMR3NotifyTimerPending(pVM, pVCpuDst);
    659 #endif
    660         }
    661         STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtualSync);
    662 
    663 #ifndef IN_RING3
    664         if (RT_SUCCESS(rc))
    665 #endif
    666             tmVirtualSyncUnlock(pVM);
    667         LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
    668         if (pVCpuDst == pVCpu)
    669         {
    670             *pu64Delta = 0;
    671             return 0;
    672         }
    673         *pu64Delta = s_u64OtherRet;
    674         return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    675     }
    676     if (pVM->tm.s.fVirtualSyncCatchUp)
    677         i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
    678 
    679     uint64_t u64GipTime;
    680     if (pVCpuDst == pVCpu)
    681     {
    682         /*
    683          * Return the GIP time of the next event.
    684          * This is the reverse of what tmVirtualGetRaw is doing.
    685          */
    686         STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPMiss);
    687         u64GipTime = RT_MIN(i64Delta1, i64Delta2);
    688         *pu64Delta = u64GipTime;
    689         u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset;
    690         if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive))
    691         {
    692             u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */
    693             u64GipTime *= 100;
    694             u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage;
    695             u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart;
    696         }
    697     }
    698     else
    699     {
    700         *pu64Delta = s_u64OtherRet;
    701         u64GipTime = u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
    702     }
    703 #ifndef IN_RING3
    704     if (RT_SUCCESS(rc))
    705 #endif
    706         tmVirtualSyncUnlock(pVM);
    707     return u64GipTime;
    708 }
    709 #endif
    710 
     666    return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
     667}
     668
     669#endif /* VBOX_HIGH_RES_TIMERS_HACK */
    711670
    712671/**
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette