VirtualBox

Changeset 19752 in vbox


Ignore:
Timestamp:
May 15, 2009 6:32:16 PM (16 years ago)
Author:
vboxsync
Message:

TM: Revised tmVirtualSyncGetEx for SMP.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/TM.cpp

    r19747 r19752  
    588588    STAM_REG(pVM, &pVM->tm.s.StatVirtualGet,                          STAMTYPE_COUNTER, "/TM/VirtualGet",                      STAMUNIT_OCCURENCES, "The number of times TMTimerGet was called when the clock was running.");
    589589    STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSetFF,                     STAMTYPE_COUNTER, "/TM/VirtualGetSetFF",                 STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGet.");
    590     STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSync,                      STAMTYPE_COUNTER, "/TM/VirtualGetSync",                  STAMUNIT_OCCURENCES, "The number of times TMTimerGetSync was called when the clock was running.");
    591     STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSyncSetFF,                 STAMTYPE_COUNTER, "/TM/VirtualGetSyncSetFF",             STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGetSync.");
     590    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGet,                      STAMTYPE_COUNTER, "/TM/VirtualSyncGet",                  STAMUNIT_OCCURENCES, "The number of times TMTimerSyncGet was called when the clock was running.");
     591    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetSetFF,                 STAMTYPE_COUNTER, "/TM/VirtualSyncGetSetFF",             STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerSyncGet.");
     592    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetELoop,                 STAMTYPE_COUNTER, "/TM/VirtualSyncGetELoop",             STAMUNIT_OCCURENCES, "Times we give up because too many loops in TMTimerSyncGet.");
    592593    STAM_REG(pVM, &pVM->tm.s.StatVirtualPause,                        STAMTYPE_COUNTER, "/TM/VirtualPause",                    STAMUNIT_OCCURENCES, "The number of times TMR3TimerPause was called.");
    593594    STAM_REG(pVM, &pVM->tm.s.StatVirtualResume,                       STAMTYPE_COUNTER, "/TM/VirtualResume",                   STAMUNIT_OCCURENCES, "The number of times TMR3TimerResume was called.");
     
    603604    STAM_REG(pVM, &pVM->tm.s.StatTSCSyncNotTicking,                   STAMTYPE_COUNTER, "/TM/TSC/Intercept/SyncNotTicking",    STAMUNIT_OCCURENCES, "VirtualSync isn't ticking.");
    604605    STAM_REG(pVM, &pVM->tm.s.StatTSCWarp,                             STAMTYPE_COUNTER, "/TM/TSC/Intercept/Warp",              STAMUNIT_OCCURENCES, "Warpdrive is active.");
    605 
    606606
    607607    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncCatchup,              STAMTYPE_PROFILE_ADV, "/TM/VirtualSync/CatchUp",    STAMUNIT_TICKS_PER_OCCURENCE, "Counting and measuring the times spent catching up.");
     
    954954    /*
    955955     * Abort any pending catch up.
    956      * This isn't perfect,
     956     * This isn't perfect...
    957957     */
    958958    if (pVM->tm.s.fVirtualSyncCatchUp)
     
    24242424     * the warp drive settings.
    24252425     */
    2426     tmLock(pVM); /* paranoia */
     2426    tmLock(pVM);
    24272427    bool fPaused = !!pVM->tm.s.cVirtualTicking;
    24282428    if (fPaused) /** @todo this isn't really working, but wtf. */
  • trunk/src/VBox/VMM/TMInternal.h

    r19747 r19752  
    457457    STAMCOUNTER                 StatVirtualGet;
    458458    STAMCOUNTER                 StatVirtualGetSetFF;
    459     STAMCOUNTER                 StatVirtualGetSync;
    460     STAMCOUNTER                 StatVirtualGetSyncSetFF;
     459    STAMCOUNTER                 StatVirtualSyncGet;
     460    STAMCOUNTER                 StatVirtualSyncGetSetFF;
     461    STAMCOUNTER                 StatVirtualSyncGetELoop;
    461462    STAMCOUNTER                 StatVirtualPause;
    462463    STAMCOUNTER                 StatVirtualResume;
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r19747 r19752  
    403403DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
    404404{
    405     STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);
    406     uint64_t    u64;
    407 
    408     if (pVM->tm.s.fVirtualSyncTicking)
     405    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
     406
     407    if (!pVM->tm.s.fVirtualSyncTicking)
     408        return pVM->tm.s.u64VirtualSync;
     409
     410    /*
     411     * Query the virtual clock and do the usual expired timer check.
     412     */
     413    Assert(pVM->tm.s.cVirtualTicking);
     414    uint64_t u64 = tmVirtualGetRaw(pVM);
     415    if (fCheckTimers)
    409416    {
    410417        PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
    411 
    412         /*
    413          * Query the virtual clock and do the usual expired timer check.
    414          */
    415         Assert(pVM->tm.s.cVirtualTicking);
    416         u64 = tmVirtualGetRaw(pVM);
    417         if (fCheckTimers)
     418        if (    !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
     419            &&  pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
    418420        {
    419             if (    !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
    420                 &&  pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
     421            Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
     422            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     423#ifdef IN_RING3
     424            REMR3NotifyTimerPending(pVM, pVCpuDst);
     425            VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
     426#endif
     427            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
     428        }
     429    }
     430
     431    /*
     432     * Read the offset and adjust if we're playing catch-up.
     433     *
     434     * The catch-up adjusting work by us decrementing the offset by a percentage of
     435     * the time elapsed since the previous TMVirtualGetSync call.
     436     *
     437     * It's possible to get a very long or even negative interval between two read
     438     * for the following reasons:
     439     *  - Someone might have suspended the process execution, frequently the case when
     440     *    debugging the process.
     441     *  - We might be on a different CPU which TSC isn't quite in sync with the
     442     *    other CPUs in the system.
     443     *  - Another thread is racing us and we might have been preemnted while inside
     444     *    this function.
     445     *
     446     * Assuming nano second virtual time, we can simply ignore any intervals which has
     447     * any of the upper 32 bits set.
     448     */
     449    AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
     450    int         cOuterTries = 42;
     451    int         rcLock = tmVirtualSyncTryLock(pVM);
     452    uint64_t    off;
     453    for (;; cOuterTries--)
     454    {
     455        /* Re-check the ticking flag. */
     456        if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
     457        {
     458            if (RT_SUCCESS(rcLock))
     459                tmVirtualSyncUnlock(pVM);
     460            return ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
     461        }
     462
     463        off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
     464        if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
     465        {
     466            /* adjust the offset. */
     467            if (RT_FAILURE(rcLock))
     468                rcLock = tmVirtualSyncTryLock(pVM);
     469            if (RT_SUCCESS(rcLock))
    421470            {
    422                 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
    423                 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    424 #ifdef IN_RING3
    425                 REMR3NotifyTimerPending(pVM, pVCpuDst);
    426                 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
    427 #endif
    428                 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
    429             }
    430         }
    431 
    432         /*
    433          * Read the offset and adjust if we're playing catch-up.
    434          *
    435          * The catch-up adjusting work by us decrementing the offset by a percentage of
    436          * the time elapsed since the previous TMVirtualGetSync call.
    437          *
    438          * It's possible to get a very long or even negative interval between two read
    439          * for the following reasons:
    440          *  - Someone might have suspended the process execution, frequently the case when
    441          *    debugging the process.
    442          *  - We might be on a different CPU which TSC isn't quite in sync with the
    443          *    other CPUs in the system.
    444          *  - Another thread is racing us and we might have been preemnted while inside
    445          *    this function.
    446          *
    447          * Assuming nano second virtual time, we can simply ignore any intervals which has
    448          * any of the upper 32 bits set.
    449          */
    450         AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
    451         uint64_t off = pVM->tm.s.offVirtualSync;
    452         if (pVM->tm.s.fVirtualSyncCatchUp)
    453         {
    454             int rc = tmVirtualSyncTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */
    455 
    456             const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
    457             uint64_t u64Delta = u64 - u64Prev;
    458             if (RT_LIKELY(!(u64Delta >> 32)))
    459             {
    460                 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
    461                 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
     471                /* We own the lock and may make updates. */
     472                const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
     473                uint64_t u64Delta = u64 - u64Prev;
     474                if (RT_LIKELY(!(u64Delta >> 32)))
    462475                {
    463                     off -= u64Sub;
    464                     ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
    465                     pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
    466                     Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
     476                    uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
     477                    if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
     478                    {
     479                        off -= u64Sub;
     480                        ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
     481                        ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
     482                        Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
     483                    }
     484                    else
     485                    {
     486                        /* we've completely caught up. */
     487                        STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
     488                        off = pVM->tm.s.offVirtualSyncGivenUp;
     489                        ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
     490                        ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
     491                        ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
     492                        Log4(("TM: %RU64/0: caught up\n", u64));
     493                    }
    467494                }
    468495                else
    469496                {
    470                     /* we've completely caught up. */
    471                     STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
    472                     off = pVM->tm.s.offVirtualSyncGivenUp;
    473                     ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
    474                     ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
    475                     pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
    476                     Log4(("TM: %RU64/0: caught up\n", u64));
     497                    /* More than 4 seconds since last time (or negative), ignore it. */
     498                    if (!(u64Delta & RT_BIT_64(63)))
     499                        ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
     500                    Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
    477501                }
     502                break;
    478503            }
    479             else
     504
     505            /* No changes allowed, try get a consistent set of parameters. */
     506            uint64_t const u64Prev    = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
     507            uint64_t const offGivenUp = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
     508            uint32_t const u32Pct     = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
     509            if (    (   u64Prev    == ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
     510                     && offGivenUp == ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
     511                     && u32Pct     == ASMAtomicUoReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
     512                     && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
     513                ||  cOuterTries <= 0)
    480514            {
    481                 /* More than 4 seconds since last time (or negative), ignore it. */
    482                 if (!(u64Delta & RT_BIT_64(63)))
    483                     pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
    484                 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
     515                uint64_t u64Delta = u64 - u64Prev;
     516                if (RT_LIKELY(!(u64Delta >> 32)))
     517                {
     518                    uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
     519                    if (off > u64Sub + offGivenUp)
     520                    {
     521                        off -= u64Sub;
     522                        Log4(("TM: %RU64/%RU64: sub %RU32 (NoLock)\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
     523                    }
     524                    else
     525                    {
     526                        /* we've completely caught up. */
     527                        STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
     528                        off = offGivenUp;
     529                        Log4(("TM: %RU64/0: caught up\n", u64));
     530                    }
     531                }
     532                else
     533                    /* More than 4 seconds since last time (or negative), ignore it. */
     534                    Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
     535
     536                /* Check that we're still running and in catch up. */
     537                if (pVM->tm.s.fVirtualSyncCatchUp)
     538                    break;
     539                if (cOuterTries <= 0)
     540                    break;
    485541            }
    486 
    487             if (RT_SUCCESS(rc))
    488                 tmVirtualSyncUnlock(pVM);
    489542        }
    490 
    491         /*
    492          * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
    493          * approach is to never pass the head timer. So, when we do stop the clock and
    494          * set the timer pending flag.
    495          */
    496         u64 -= off;
    497         const uint64_t u64Expire = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
    498         if (u64 >= u64Expire)
     543        else if (   off == ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync)
     544                 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
     545            break; /* Got an consistent offset */
     546    }
     547    if (cOuterTries <= 0)
     548        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
     549
     550    /*
     551     * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
     552     * approach is to never pass the head timer. So, when we do stop the clock and
     553     * set the timer pending flag.
     554     */
     555    u64 -= off;
     556    const uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
     557    if (u64 >= u64Expire)
     558    {
     559        u64 = u64Expire;
     560        PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
     561        if (RT_FAILURE(rcLock))
     562            rcLock = tmVirtualSyncTryLock(pVM);
     563        if (RT_SUCCESS(rcLock))
    499564        {
    500             u64 = u64Expire;
    501             int rc = tmVirtualSyncTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. FIXME */
    502             if (RT_SUCCESS(rc))
    503             {
    504                 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64);
    505                 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
    506                 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
    507                 tmVirtualSyncUnlock(pVM);
    508             }
    509             if (    fCheckTimers
    510                 &&  !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
    511             {
    512                 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
    513                 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     565            ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
     566            ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
     567            VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
     568            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     569            Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     570            tmVirtualSyncUnlock(pVM);
    514571#ifdef IN_RING3
    515                 REMR3NotifyTimerPending(pVM, pVCpuDst);
    516                 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
    517 #endif
    518                 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
    519                 Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
    520             }
    521             else
    522                 Log4(("TM: %RU64/%RU64: exp tmr\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
     572            REMR3NotifyTimerPending(pVM, pVCpuDst);
     573            VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
     574#endif
     575            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
     576            Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
    523577        }
    524     }
    525     else
    526     {
    527         u64 = pVM->tm.s.u64VirtualSync;
    528 
    529     }
     578        else if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
     579        {
     580            Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     581            VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
     582#ifdef IN_RING3
     583            REMR3NotifyTimerPending(pVM, pVCpuDst);
     584            VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
     585#endif
     586            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
     587            Log4(("TM: %RU64/%RU64: exp tmr=>ff (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
     588        }
     589        else
     590            Log4(("TM: %RU64/%RU64: exp tmr (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
     591    }
     592    else if (RT_SUCCESS(rcLock))
     593        tmVirtualSyncUnlock(pVM);
    530594
    531595    return u64;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette