VirtualBox

Changeset 2283 in vbox for trunk


Ignore:
Timestamp:
Apr 20, 2007 10:27:52 PM (18 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
20648
Message:

Working TMCLOCK_VIRTUAL_SYNC.

Location:
trunk
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vm.h

    r2248 r2283  
    477477        struct TM   s;
    478478#endif
    479         char        padding[1056];      /* multiple of 32 */
     479        char        padding[1280];      /* multiple of 32 */
    480480    } tm;
    481481
  • trunk/src/VBox/VMM/TM.cpp

    r2248 r2283  
    8484 *    - Employ a thread which periodically (100Hz) polls all the timer queues.
    8585 *
     86 *
     87 * @section sec_tm_timer    Logging
     88 *
     89 * Level 2: Logs a most of the timer state transitions and queue servicing.
     90 * Level 3: Logs a few oddments.
     91 * Level 4: Logs TMCLOCK_VIRTUAL_SYNC catch-up events.
     92 *
    8693 */
    8794
     
    120127*******************************************************************************/
    121128/** The current saved state version.*/
    122 #define TM_SAVED_STATE_VERSION  2
     129#define TM_SAVED_STATE_VERSION  3
    123130
    124131
     
    132139static void                 tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue);
    133140static void                 tmR3TimerQueueRunVirtualSync(PVM pVM);
    134 static uint64_t             tmR3TimerQueueRunVirtualSyncGiveup(PVM pVM, uint64_t offNew);
    135141static DECLCALLBACK(void)   tmR3TimerInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    136142static DECLCALLBACK(void)   tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
     
    283289    rc = CFGMR3QueryU32(pCfgHandle, "ScheduleSlack", &pVM->tm.s.u32VirtualSyncScheduleSlack);
    284290    if (rc == VERR_CFGM_VALUE_NOT_FOUND)
    285         pVM->tm.s.u32VirtualSyncScheduleSlack           =   250000; /* 0.25ms (ASSUMES virtual time is nanoseconds) */
     291        pVM->tm.s.u32VirtualSyncScheduleSlack           =   100000; /* 0.100ms (ASSUMES virtual time is nanoseconds) */
    286292    else if (VBOX_FAILURE(rc))
    287293        return VMSetError(pVM, rc, RT_SRC_POS,
     
    297303    rc = CFGMR3QueryU64(pCfgHandle, "CatchUpGiveUpThreshold", &pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold);
    298304    if (rc == VERR_CFGM_VALUE_NOT_FOUND)
    299         pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold  = 1500000000; /* 1.5 sec */
     305        pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold  = UINT64_C(60000000000); /* 60 sec */
    300306    else if (VBOX_FAILURE(rc))
    301307        return VMSetError(pVM, rc, RT_SRC_POS,
     
    309315        rc = CFGMR3QueryU64(pCfgHandle, "CatchUpStartThreshold" #iPeriod, &u64); \
    310316        if (rc == VERR_CFGM_VALUE_NOT_FOUND) \
    311             u64 = (DefStart); \
     317            u64 = UINT64_C(DefStart); \
    312318        else if (VBOX_FAILURE(rc)) \
    313319            return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpThreshold" #iPeriod "\". (%Vrc)"), rc); \
     
    322328            return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 32-bit integer value \"CatchUpPrecentage" #iPeriod "\". (%Vrc)"), rc); \
    323329    } while (0)
    324     TM_CFG_PERIOD(0,   25000000,  25); /*   25ms at 1.25x */
    325     TM_CFG_PERIOD(1,   75000000,  50); /*   75ms at 1.50x */
    326     TM_CFG_PERIOD(2,  100000000,  75); /*   75ms at 1.75x */
    327     TM_CFG_PERIOD(3,  150000000, 100); /*  150ms at 2x */
    328     TM_CFG_PERIOD(4,  400000000, 200); /*  400ms at 3x */
    329     TM_CFG_PERIOD(5,  800000000, 300); /*  800ms at 4x */
    330     TM_CFG_PERIOD(6, 1200000000, 400); /* 1200ms at 6x */
    331     TM_CFG_PERIOD(7, 1400000000, 500); /* 1400ms at 8x */
    332     AssertCompile(RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) == 8);
     330    /* This needs more tuning. Not sure if we really need so many period and be so gentle. */
     331    TM_CFG_PERIOD(0,     750000,   5); /* 0.75ms at 1.05x */
     332    TM_CFG_PERIOD(1,    1500000,  10); /* 1.50ms at 1.10x */
     333    TM_CFG_PERIOD(2,    8000000,  25); /*    8ms at 1.25x */
     334    TM_CFG_PERIOD(3,   30000000,  50); /*   30ms at 1.50x */
     335    TM_CFG_PERIOD(4,  100000000,  75); /*  100ms at 1.75x */
     336    TM_CFG_PERIOD(5,  175000000, 100); /*  175ms at 2x */
     337    TM_CFG_PERIOD(6,  500000000, 200); /*  500ms at 3x */
     338    TM_CFG_PERIOD(7, 3000000000, 300); /*    3s  at 4x */
     339    TM_CFG_PERIOD(8,30000000000, 400); /*   30s  at 5x */
     340    TM_CFG_PERIOD(9,55000000000, 500); /*   55s  at 6x */
     341    AssertCompile(RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) == 10);
    333342#undef TM_CFG_PERIOD
    334343
     
    411420    STAM_REG(pVM, &pVM->tm.s.StatTimerStopR3,       STAMTYPE_PROFILE,       "/TM/TimerStopR3",      STAMUNIT_TICKS_PER_CALL,    "Profiling TMTimerStop calls made in ring-3.");
    412421
    413     STAM_REG(pVM, &pVM->tm.s.StatVirtualGet,        STAMTYPE_COUNTER,       "/TM/VirtualGet",       STAMUNIT_OCCURENCES,        "The number of times TMR3TimerGet was called when the clock was running.");
    414     STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSync,    STAMTYPE_COUNTER,       "/TM/VirtualGetSync",   STAMUNIT_OCCURENCES,        "The number of times TMR3TimerGetSync was called when the clock was running.");
     422    STAM_REG(pVM, &pVM->tm.s.StatVirtualGet,        STAMTYPE_COUNTER,       "/TM/VirtualGet",       STAMUNIT_OCCURENCES,        "The number of times TMTimerGet was called when the clock was running.");
     423    STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSetFF,   STAMTYPE_COUNTER,       "/TM/VirtualGetSetFF",  STAMUNIT_OCCURENCES,        "Times we set the FF when calling TMTimerGet.");
     424    STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSync,    STAMTYPE_COUNTER,       "/TM/VirtualGetSync",   STAMUNIT_OCCURENCES,        "The number of times TMTimerGetSync was called when the clock was running.");
     425    STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSyncSetFF,STAMTYPE_COUNTER,      "/TM/VirtualGetSyncSetFF",STAMUNIT_OCCURENCES,      "Times we set the FF when calling TMTimerGetSync.");
    415426    STAM_REG(pVM, &pVM->tm.s.StatVirtualPause,      STAMTYPE_COUNTER,       "/TM/VirtualPause",     STAMUNIT_OCCURENCES,        "The number of times TMR3TimerPause was called.");
    416427    STAM_REG(pVM, &pVM->tm.s.StatVirtualResume,     STAMTYPE_COUNTER,       "/TM/VirtualResume",    STAMUNIT_OCCURENCES,        "The number of times TMR3TimerResume was called.");
     
    419430
    420431
     432    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncCatchup,        STAMTYPE_PROFILE_ADV,   "/TM/VirtualSync/CatchUp",              STAMUNIT_TICKS_PER_OCCURENCE, "Counting and measuring the times spent catching up.");
     433    STAM_REG(pVM, (void *)&pVM->tm.s.fVirtualSyncCatchUp,       STAMTYPE_U8,        "/TM/VirtualSync/CatchUpActive",        STAMUNIT_NONE,           "Catch-Up active indicator.");
     434    STAM_REG(pVM, (void *)&pVM->tm.s.u32VirtualSyncCatchUpPercentage, STAMTYPE_U32, "/TM/VirtualSync/CatchUpPercentage",    STAMUNIT_PCT,           "The catch-up percentage. (+100/100 to get clock multiplier)");
     435    STAM_REG(pVM, (void *)&pVM->tm.s.offVirtualSync,            STAMTYPE_U64,       "/TM/VirtualSync/CurrentOffset",        STAMUNIT_NS,            "The current offset. (subtract GivenUp to get the lag)");
     436    STAM_REG(pVM, (void *)&pVM->tm.s.offVirtualSyncGivenUp,     STAMTYPE_U64,       "/TM/VirtualSync/GivenUp",              STAMUNIT_NS,            "Nanoseconds of the 'CurrentOffset' that's been given up and won't ever be attemted caught up with.");
     437    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUp,             STAMTYPE_COUNTER,   "/TM/VirtualSync/GiveUp",               STAMUNIT_OCCURENCES,    "Times the catch-up was abandoned.");
     438    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting,STAMTYPE_COUNTER,  "/TM/VirtualSync/GiveUpBeforeStarting", STAMUNIT_OCCURENCES,    "Times the catch-up was abandoned before even starting. (Typically debugging++.)");
    421439    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRun,                STAMTYPE_COUNTER,   "/TM/VirtualSync/Run",                  STAMUNIT_OCCURENCES,    "Times the virtual sync timer queue was considered.");
    422440    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunRestart,         STAMTYPE_COUNTER,   "/TM/VirtualSync/Run/Restarts",         STAMUNIT_OCCURENCES,    "Times the clock was restarted after a run.");
     
    424442    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunStoppedAlready,  STAMTYPE_COUNTER,   "/TM/VirtualSync/Run/StoppedAlready",   STAMUNIT_OCCURENCES,    "Times the clock was already stopped elsewhere (TMVirtualSyncGet).");
    425443    STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunSlack,           STAMTYPE_PROFILE,   "/TM/VirtualSync/Run/Slack",            STAMUNIT_NS_PER_OCCURENCE, "The scheduling slack. (Catch-up handed out when running timers.)");
    426     STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUp,             STAMTYPE_COUNTER,   "/TM/VirtualSync/GiveUp",               STAMUNIT_OCCURENCES,    "Times the catch-up was abandoned.");
    427     STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting,STAMTYPE_COUNTER,  "/TM/VirtualSync/GiveUpBeforeStarting", STAMUNIT_OCCURENCES,    "Times the catch-up was abandoned before even starting. (Typically debugging++.)");
    428     STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncCatchup,        STAMTYPE_PROFILE_ADV,   "/TM/VirtualSync/CatchUp",              STAMUNIT_TICKS_PER_OCCURENCE, "Counting and measuring the times spent catching up.");
    429444    for (unsigned i = 0; i < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods); i++)
    430445    {
    431         STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage,    STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT,          "The catch-up percentage.",         "/TM/VirtualSync/%u", i);
    432         STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupAdjust[i],           STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,   "Times adjusted to this period.",   "/TM/VirtualSync/%u/Adjust", i);
    433         STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupInitial[i],          STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,   "Times started in this period.",    "/TM/VirtualSync/%u/Initial", i);
    434         STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u64Start,         STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS,           "Start of this period (lag).",      "/TM/VirtualSync/%u/Start", i);
     446        STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage,    STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT,          "The catch-up percentage.",         "/TM/VirtualSync/Periods/%u", i);
     447        STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupAdjust[i],           STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,   "Times adjusted to this period.",   "/TM/VirtualSync/Periods/%u/Adjust", i);
     448        STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupInitial[i],          STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,   "Times started in this period.",    "/TM/VirtualSync/Periods/%u/Initial", i);
     449        STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u64Start,         STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS,           "Start of this period (lag).",      "/TM/VirtualSync/Periods/%u/Start", i);
    435450    }
    436451
     
    440455     * Register info handlers.
    441456     */
    442     DBGFR3InfoRegisterInternal(pVM, "timers",       "Dumps all timers. No arguments.",          tmR3TimerInfo);
    443     DBGFR3InfoRegisterInternal(pVM, "activetimers", "Dumps active all timers. No arguments.",   tmR3TimerInfoActive);
    444     DBGFR3InfoRegisterInternal(pVM, "clocks",       "Display the time of the various clocks.",  tmR3InfoClocks);
     457    DBGFR3InfoRegisterInternalEx(pVM, "timers",       "Dumps all timers. No arguments.",          tmR3TimerInfo,        DBGFINFO_FLAGS_RUN_ON_EMT);
     458    DBGFR3InfoRegisterInternalEx(pVM, "activetimers", "Dumps active all timers. No arguments.",   tmR3TimerInfoActive,  DBGFINFO_FLAGS_RUN_ON_EMT);
     459    DBGFR3InfoRegisterInternalEx(pVM, "clocks",       "Display the time of the various clocks.",  tmR3InfoClocks,       DBGFINFO_FLAGS_RUN_ON_EMT);
    445460
    446461    return VINF_SUCCESS;
     
    654669    /* the virtual timer synchronous clock. */
    655670    SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSync);
    656     SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSyncOffset);
     671    SSMR3PutU64(pSSM, pVM->tm.s.offVirtualSync);
     672    SSMR3PutU64(pSSM, pVM->tm.s.offVirtualSyncGivenUp);
    657673    SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSyncCatchUpPrev);
    658674    SSMR3PutBool(pSSM, pVM->tm.s.fVirtualSyncCatchUp);
     
    715731    pVM->tm.s.u64VirtualSync = u64;
    716732    SSMR3GetU64(pSSM, &u64);
    717     pVM->tm.s.u64VirtualSyncOffset = u64;
     733    pVM->tm.s.offVirtualSync = u64;
     734    SSMR3GetU64(pSSM, &u64);
     735    pVM->tm.s.offVirtualSyncGivenUp = u64;
    718736    SSMR3GetU64(pSSM, &u64);
    719737    pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
     
    10301048        return true;
    10311049    u64Now = pVM->tm.s.fVirtualSyncTicking
    1032            ? u64Now - pVM->tm.s.u64VirtualSyncOffset
     1050           ? u64Now - pVM->tm.s.offVirtualSync
    10331051           : pVM->tm.s.u64VirtualSync;
    10341052    if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64Now)
     
    12531271     *
    12541272     * We use a time frame ranging from the current sync time (which is most likely the
    1255      * same as the head timer) and some configurable period (250000ns) up towards the
     1273     * same as the head timer) and some configurable period (100000ns) up towards the
    12561274     * current virtual time. This period might also need to be restricted by the catch-up
    12571275     * rate so frequent calls to this function won't accelerate the time too much, however
    1258      * this will be implemented at a later point.
     1276     * this will be implemented at a later point if neccessary.
    12591277     *
    12601278     * Without this frame we would 1) having to run timers much more frequently
     
    12631281    const uint64_t u64VirtualNow = TMVirtualGetEx(pVM, false /* don't check timers */);
    12641282    uint64_t u64Now;
    1265     uint64_t u64Max;
    12661283    if (!pVM->tm.s.fVirtualSyncTicking)
    12671284    {
    12681285        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStoppedAlready);
    12691286        u64Now = pVM->tm.s.u64VirtualSync;
    1270         Assert(u64Now >= pNext->u64Expire);
    1271 
    1272         u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack;
    1273         if (u64Max > u64VirtualNow)
    1274             u64Max = u64VirtualNow;
     1287        Assert(u64Now <= pNext->u64Expire);
    12751288    }
    12761289    else
    12771290    {
    1278         /* Calc now. */
    1279         uint64_t off = pVM->tm.s.u64VirtualSyncOffset;
     1291        /* Calc 'now'. (update order doesn't really matter here) */
     1292        uint64_t off = pVM->tm.s.offVirtualSync;
    12801293        if (pVM->tm.s.fVirtualSyncCatchUp)
    12811294        {
    1282             const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
    1283             uint64_t u64Delta = u64VirtualNow - u64Prev;
     1295            uint64_t u64Delta = u64VirtualNow - pVM->tm.s.u64VirtualSyncCatchUpPrev;
    12841296            if (RT_LIKELY(!(u64Delta >> 32)))
    12851297            {
    1286                 uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage),
    1287                                                        100);
    1288                 if (off > u32Sub)
    1289                     off -= u32Sub;
     1298                uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
     1299                if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
     1300                {
     1301                    off -= u64Sub;
     1302                    Log4(("TM: %RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
     1303                }
    12901304                else
    1291                     off = 0;
     1305                {
     1306                    STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
     1307                    ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
     1308                    off = pVM->tm.s.offVirtualSyncGivenUp;
     1309                    Log4(("TM: %RU64/0: caught up (run)\n", u64VirtualNow));
     1310                }
    12921311            }
     1312            ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
     1313            pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow;
    12931314        }
    12941315        u64Now = u64VirtualNow - off;
    12951316
    1296         /* Check if stopped by expired timer and calc the frame end. */
    1297         if (u64Now <= pNext->u64Expire)
    1298         {
    1299             if (pVM->tm.s.u64VirtualSyncOffset <= pVM->tm.s.u32VirtualSyncScheduleSlack)
    1300                 u64Max = pVM->tm.s.u64VirtualSyncOffset;
    1301             else
    1302                 u64Max = pVM->tm.s.u32VirtualSyncScheduleSlack;
    1303         }
    1304         else
     1317        /* Check if stopped by expired timer. */
     1318        if (u64Now >= pNext->u64Expire)
    13051319        {
    13061320            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStop);
     
    13081322            ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64Now);
    13091323            ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
    1310 
    1311             u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack;
    1312             if (u64Max > u64VirtualNow)
    1313                 u64Max = u64VirtualNow;
     1324            Log4(("TM: %RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - pVM->tm.s.offVirtualSyncGivenUp));
     1325
    13141326        }
    13151327    }
    13161328
     1329    /* calc end of frame. */
     1330    uint64_t u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack;
     1331    if (u64Max > u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp)
     1332        u64Max = u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp;
     1333
     1334    /* assert sanity */
     1335    Assert(u64Now <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);
     1336    Assert(u64Max <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);
     1337    Assert(u64Now <= u64Max);
     1338
    13171339    /*
    13181340     * Process the expired timers moving the clock along as we progress.
    13191341     */
     1342#ifdef VBOX_STRICT
     1343    uint64_t u64Prev = u64Now; NOREF(u64Prev);
     1344#endif
    13201345    while (pNext && pNext->u64Expire <= u64Max)
    13211346    {
     
    13281353        if (fRc)
    13291354        {
    1330             Assert(!pTimer->offScheduleNext); /* this can trigger falsely */
    1331 
    13321355            /* unlink */
    13331356            const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
     
    13441367            pTimer->offPrev = 0;
    13451368
    1346             /* advance the clock */
     1369            /* advance the clock - don't permit timers to be out of order or armed in the 'past'. */
     1370#ifdef VBOX_STRICT
     1371            AssertMsg(pTimer->u64Expire >= u64Prev, ("%RU64 < %RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc));
     1372            u64Prev = pTimer->u64Expire;
     1373#endif
    13471374            ASMAtomicXchgSize(&pVM->tm.s.fVirtualSyncTicking, false);
    1348             ASMAtomicXchgU64(&pVM->tm.s.u64Virtual, pTimer->u64Expire);
     1375            ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, pTimer->u64Expire);
    13491376
    13501377            /* fire */
     
    13751402        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunRestart);
    13761403
     1404        /* calc the slack we've handed out. */
    13771405        const uint64_t u64VirtualNow2 = TMVirtualGetEx(pVM, false /* don't check timers */);
    13781406        Assert(u64VirtualNow2 >= u64VirtualNow);
     1407        AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%RU64 < %RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
    13791408        const uint64_t offSlack = pVM->tm.s.u64VirtualSync - u64Now;
    1380         STAM_STATS( {
    1381             if (offSlack) {
     1409        STAM_STATS({
     1410            if (offSlack)
     1411            {
    13821412                PSTAMPROFILE p = &pVM->tm.s.StatVirtualSyncRunSlack;
    1383                 p->cPeriods++; 
     1413                p->cPeriods++;
    13841414                p->cTicks += offSlack;
    13851415                if (p->cTicksMax < offSlack) p->cTicksMax = offSlack;
    1386                 if (p->cTicksMin > offSlack) p->cTicksMax = offSlack;
    1387             } 
     1416                if (p->cTicksMin > offSlack) p->cTicksMin = offSlack;
     1417            }
    13881418        });
    13891419
    13901420        /* Let the time run a little bit while we were busy running timers(?). */
    13911421        uint64_t u64Elapsed;
    1392 #define MAX_ELAPSED 15000 /*ns*/
     1422#define MAX_ELAPSED 30000 /* ns */
    13931423        if (offSlack > MAX_ELAPSED)
    13941424            u64Elapsed = 0;
     
    14041434        /* Calc the current offset. */
    14051435        uint64_t offNew = u64VirtualNow2 - pVM->tm.s.u64VirtualSync - u64Elapsed;
    1406 
    1407         /* Deal with starting, adjusting and stopping catchup. */
     1436        Assert(!(offNew & RT_BIT_64(63)));
     1437        uint64_t offLag = offNew - pVM->tm.s.offVirtualSyncGivenUp;
     1438        Assert(!(offLag & RT_BIT_64(63)));
     1439
     1440        /*
     1441         * Deal with starting, adjusting and stopping catchup.
     1442         */
    14081443        if (pVM->tm.s.fVirtualSyncCatchUp)
    14091444        {
    1410             if (offNew <= pVM->tm.s.u64VirtualSyncCatchUpStopThreshold)
     1445            if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpStopThreshold)
    14111446            {
    14121447                /* stop */
    14131448                STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
    14141449                ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
     1450                Log4(("TM: %RU64/%RU64: caught up\n", u64VirtualNow2 - offNew, offLag));
    14151451            }
    1416             else if (offNew <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
     1452            else if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
    14171453            {
    14181454                /* adjust */
    14191455                unsigned i = 0;
    14201456                while (     i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods)
    1421                        &&   offNew >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)
     1457                       &&   offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)
    14221458                    i++;
    14231459                if (pVM->tm.s.u32VirtualSyncCatchUpPercentage < pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage)
     
    14251461                    STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]);
    14261462                    ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
     1463                    Log4(("TM: %RU64/%RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
    14271464                }
    14281465                pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow2;
     
    14321469                /* give up */
    14331470                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUp);
    1434                 offNew = tmR3TimerQueueRunVirtualSyncGiveup(pVM, offNew);
     1471                STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
     1472                ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
     1473                ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
     1474                Log4(("TM: %RU64/%RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
     1475                LogRel(("TM: Giving up catch-up attempt at a %RU64 ns lag; new total: %RU64 ns\n", offLag, offNew));
    14351476            }
    14361477        }
    1437         else if (offNew >= pVM->tm.s.aVirtualSyncCatchUpPeriods[0].u64Start)
     1478        else if (offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[0].u64Start)
    14381479        {
    1439             if (offNew <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
     1480            if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
    14401481            {
    14411482                /* start */
     
    14431484                unsigned i = 0;
    14441485                while (     i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods)
    1445                        &&   offNew >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)
     1486                       &&   offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)
    14461487                    i++;
    14471488                STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupInitial[i]);
    14481489                ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
    14491490                ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, true);
     1491                Log4(("TM: %RU64/%RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
    14501492            }
    14511493            else
     
    14531495                /* not bother */
    14541496                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting);
    1455                 offNew = tmR3TimerQueueRunVirtualSyncGiveup(pVM, offNew);
     1497                ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
     1498                Log4(("TM: %RU64/%RU64: give up\n", u64VirtualNow2 - offNew, offLag));
     1499                LogRel(("TM: Not bothering to attempt catching up a %RU64 ns lag; new total: %RU64\n", offLag, offNew));
    14561500            }
    14571501        }
    14581502
    1459         /* Update the offset and start the clock. */
    1460         ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, offNew);
     1503        /*
     1504         * Update the offset and restart the clock.
     1505         */
     1506        ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, offNew);
    14611507        ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, true);
    14621508    }
    1463 }
    1464 
    1465 
    1466 /**
    1467  * Give up the chase.
    1468  *
    1469  * Not quite sure how to let the devices know about this, but somehow they will have
    1470  * to (quietly) drop interrupts en masse and not cause any interrupt storms...
    1471  *
    1472  * @returns New offset.
    1473  *
    1474  * @param   pVM         The VM handle.
    1475  * @param   offNew      The current offset.
    1476  */
    1477 static uint64_t tmR3TimerQueueRunVirtualSyncGiveup(PVM pVM, uint64_t offNew)
    1478 {
    1479     /** @todo deal with this. */
    1480     ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
    1481     return 0;
    14821509}
    14831510
     
    16581685                            pTimer->offPrev,
    16591686                            pTimer->offScheduleNext,
    1660                             pTimer->enmClock == TMCLOCK_REAL ? "Real " : "Virt ",
     1687                            pTimer->enmClock == TMCLOCK_REAL
     1688                            ? "Real "
     1689                            : pTimer->enmClock == TMCLOCK_VIRTUAL
     1690                            ? "Virt "
     1691                            : pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC
     1692                            ? "VrSy "
     1693                            : "TSC  ",
    16611694                            TMTimerGet(pTimer),
    16621695                            pTimer->u64Expire,
     
    16791712    NOREF(pszArgs);
    16801713
    1681     /* TSC */
    1682     uint64_t u64 = TMCpuTickGet(pVM);
     1714    /*
     1715     * Read the times first to avoid more than necessary time variation.
     1716     */
     1717    const uint64_t u64TSC = TMCpuTickGet(pVM);
     1718    const uint64_t u64Virtual = TMVirtualGet(pVM);
     1719    const uint64_t u64VirtualSync = TMVirtualSyncGet(pVM);
     1720    const uint64_t u64Real = TMRealGet(pVM);
     1721
     1722    /*
     1723     * TSC
     1724     */
    16831725    pHlp->pfnPrintf(pHlp,
    1684                     "Cpu Tick: %#RX64 (%RU64) %RU64Hz %s%s",
    1685                     u64, u64, TMCpuTicksPerSecond(pVM),
     1726                    "Cpu Tick: %18RU64 (%#016RX64) %RU64Hz %s%s",
     1727                    u64TSC, u64TSC, TMCpuTicksPerSecond(pVM),
    16861728                    pVM->tm.s.fTSCTicking ? "ticking" : "paused",
    16871729                    pVM->tm.s.fTSCVirtualized ? " - virtualized" : "");
     
    16901732        pHlp->pfnPrintf(pHlp, " - real tsc");
    16911733        if (pVM->tm.s.u64TSCOffset)
    1692             pHlp->pfnPrintf(pHlp, "\n          offset %#RX64", pVM->tm.s.u64TSCOffset);
     1734            pHlp->pfnPrintf(pHlp, "\n          offset %RU64", pVM->tm.s.u64TSCOffset);
    16931735    }
    16941736    else
     
    16961738    pHlp->pfnPrintf(pHlp, "\n");
    16971739
    1698     /* virtual */
    1699     u64 = TMVirtualGet(pVM);
     1740    /*
     1741     * virtual
     1742     */
    17001743    pHlp->pfnPrintf(pHlp,
    1701                     " Virtual: %#RX64 (%RU64) %RU64Hz %s",
    1702                     u64, u64, TMVirtualGetFreq(pVM),
     1744                    " Virtual: %18RU64 (%#016RX64) %RU64Hz %s",
     1745                    u64Virtual, u64Virtual, TMVirtualGetFreq(pVM),
    17031746                    pVM->tm.s.fVirtualTicking ? "ticking" : "paused");
    17041747    if (pVM->tm.s.fVirtualWarpDrive)
     
    17061749    pHlp->pfnPrintf(pHlp, "\n");
    17071750
    1708     /* virtual sync */
    1709     u64 = TMVirtualSyncGet(pVM);
     1751    /*
     1752     * virtual sync
     1753     */
    17101754    pHlp->pfnPrintf(pHlp,
    1711                     "VirtSync: %#RX64 (%RU64) %s%s",
    1712                     u64, u64,
     1755                    "VirtSync: %18RU64 (%#016RX64) %s%s",
     1756                    u64VirtualSync, u64VirtualSync,
    17131757                    pVM->tm.s.fVirtualSyncTicking ? "ticking" : "paused",
    17141758                    pVM->tm.s.fVirtualSyncCatchUp ? " - catchup" : "");
    1715     if (pVM->tm.s.u64VirtualSyncOffset)
    1716         pHlp->pfnPrintf(pHlp, "\n          offset %#RX64", pVM->tm.s.u64VirtualSyncOffset);
     1759    if (pVM->tm.s.offVirtualSync)
     1760    {
     1761        pHlp->pfnPrintf(pHlp, "\n          offset %RU64", pVM->tm.s.offVirtualSync);
     1762        if (pVM->tm.s.u32VirtualSyncCatchUpPercentage)
     1763            pHlp->pfnPrintf(pHlp, "  catch-up rate %u %%", pVM->tm.s.u32VirtualSyncCatchUpPercentage);
     1764    }
    17171765    pHlp->pfnPrintf(pHlp, "\n");
    17181766
    1719     /* real */
    1720     u64 = TMRealGet(pVM);
     1767    /*
     1768     * real
     1769     */
    17211770    pHlp->pfnPrintf(pHlp,
    1722                     "    Real: %#RX64 (%RU64) %RU64Hz\n",
    1723                     u64, u64, TMRealGetFreq(pVM));
    1724 }
     1771                    "    Real: %18RU64 (%#016RX64) %RU64Hz\n",
     1772                    u64Real, u64Real, TMRealGetFreq(pVM));
     1773}
     1774
  • trunk/src/VBox/VMM/TMInternal.h

    r2248 r2283  
    329329    uint64_t                    u64VirtualWarpDriveStart;
    330330
    331     /** The offset of the virtual timer synchronous clock (TMCLOCK_VIRTUAL_SYNC) relative
    332      * to the virtual clock. */
    333     uint64_t volatile           u64VirtualSyncOffset;
     331    /** The guest virtual timer synchronous time when fVirtualSyncTicking is cleared. */
     332    uint64_t volatile           u64VirtualSync;
     333    /** The offset of the timer synchronous virtual clock (TMCLOCK_VIRTUAL_SYNC) relative
     334     * to the virtual clock (TMCLOCK_VIRTUAL).
     335     * (This is accessed by the timer thread and must be updated atomically.) */
     336    uint64_t volatile           offVirtualSync;
     337    /** The offset into offVirtualSync that's been irrevocably given up by failed catch-up attempts.
     338     * Thus the current lag is offVirtualSync - offVirtualSyncGivenUp. */
     339    uint64_t                    offVirtualSyncGivenUp;
    334340    /** The TMCLOCK_VIRTUAL at the previous TMVirtualGetSync call when catch-up is active. */
    335341    uint64_t volatile           u64VirtualSyncCatchUpPrev;
    336     /** The guest virtual timer synchronous time when fVirtualSyncTicking is cleared. */
    337     uint64_t volatile           u64VirtualSync;
    338342    /** The current catch-up percentage. */
    339343    uint32_t volatile           u32VirtualSyncCatchUpPercentage;
     
    346350/** @def TM_MAX_CATCHUP_PERIODS
    347351 * The number of catchup rates. */
    348 #define TM_MAX_CATCHUP_PERIODS  8
     352#define TM_MAX_CATCHUP_PERIODS  10
    349353    /** The agressivness of the catch-up relative to how far we've lagged behind.
    350354     * The idea is to have increasing catch-up percentage as the lag increases. */
     
    387391
    388392    /** Alignment padding to ensure that the statistics are 64-bit aligned when using GCC. */
    389     uint32_t                    u32Padding;
     393    uint32_t                    u32Padding1;
    390394
    391395    /** TMR3TimerQueuesDo
     
    408412     * @{ */
    409413    STAMCOUNTER                 StatVirtualGet;
     414    STAMCOUNTER                 StatVirtualGetSetFF;
    410415    STAMCOUNTER                 StatVirtualGetSync;
     416    STAMCOUNTER                 StatVirtualGetSyncSetFF;
    411417    STAMCOUNTER                 StatVirtualPause;
    412418    STAMCOUNTER                 StatVirtualResume;
  • trunk/src/VBox/VMM/VM.cpp

    r2233 r2283  
    25612561DECLCALLBACK(void) vmR3SetErrorV(PVM pVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
    25622562{
     2563#ifdef LOG_ENABLED
     2564    /*
     2565     * Log the error.
     2566     */
     2567    RTLogPrintf("VMSetError: %s(%d) %s\n", pszFile, iLine, pszFunction);
     2568    va_list va3;
     2569    va_copy(va3, *pArgs);
     2570    RTLogPrintfV(pszFormat, va3);
     2571    va_end(va3);
     2572#endif
     2573
    25632574    /*
    25642575     * Make a copy of the message.
  • trunk/src/VBox/VMM/VMEmt.cpp

    r1633 r2283  
    3939#include <iprt/semaphore.h>
    4040#include <iprt/thread.h>
     41#include <iprt/time.h>
    4142
    4243
     
    308309     * and the yielder is suspended.
    309310     */
    310 //    TMCpuTickResume(pVM);
    311311    VMMR3YieldSuspend(pVM);
    312312
     
    316316    int rc = VINF_SUCCESS;
    317317    ASMAtomicXchgU32(&pVM->vm.s.fWait, 1);
    318     //unsigned cLoops = 0;
     318    unsigned cLoops = 0;
    319319    for (;;)
    320320    {
    321 #ifdef VBOX_HIGH_RES_TIMERS_HACK
    322321        /*
    323322         * Work the timers and check if we can exit.
     
    343342        if (u64NanoTS < 50000)
    344343        {
    345             //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
     344            RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
    346345            /* spin */;
    347346        }
     
    349348        {
    350349            VMMR3YieldStop(pVM);
     350            uint64_t u64Start = RTTimeNanoTS();
    351351            if (u64NanoTS <  870000) /* this is a bit speculative... works fine on linux. */
    352352            {
    353                 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield\n", u64NanoTS, cLoops++);
     353                RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
    354354                STAM_PROFILE_ADV_START(&pVM->vm.s.StatHaltYield, a);
    355355                RTThreadYield(); /* this is the best we can do here */
     
    358358            else if (u64NanoTS < 2000000)
    359359            {
    360                 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms\n", u64NanoTS, cLoops++);
     360                RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
    361361                STAM_PROFILE_ADV_START(&pVM->vm.s.StatHaltBlock, a);
    362362                rc = RTSemEventWait(pVM->vm.s.EventSemWait, 1);
     
    365365            else
    366366            {
    367                 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms\n", u64NanoTS, cLoops++, (uint32_t)RT_MIN(u64NanoTS / 1000000, 15));
     367                RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
    368368                STAM_PROFILE_ADV_START(&pVM->vm.s.StatHaltBlock, a);
    369                 rc = RTSemEventWait(pVM->vm.s.EventSemWait, RT_MIN(u64NanoTS / 1000000, 15));
     369                rc = RTSemEventWait(pVM->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
    370370                STAM_PROFILE_ADV_STOP(&pVM->vm.s.StatHaltBlock, a);
    371371            }
    372         }
    373 #else
    374 
    375         /*
    376          * We have to check if we can exit, run timers, and then recheck.
    377          */
    378         /** @todo
    379          * The other thing we have to check is how long it is till the next timer
    380          * can be serviced and not wait any longer than that.
    381          */
    382         if (VM_FF_ISPENDING(pVM, fMask))
    383             break;
    384         STAM_PROFILE_ADV_START(&pVM->vm.s.StatHaltTimers, b);
    385         TMR3TimerQueuesDo(pVM);
    386         STAM_PROFILE_ADV_STOP(&pVM->vm.s.StatHaltTimers, b);
    387         if (VM_FF_ISPENDING(pVM, fMask))
    388             break;
    389         /* hacking */
    390         RTThreadYield();
    391         TMR3TimerQueuesDo(pVM);
    392         if (VM_FF_ISPENDING(pVM, fMask))
    393             break;
    394 
    395         /*
    396          * Wait for a while. Someone will wake us up or interrupt the call if
    397          * anything needs our attention.
    398          */
    399         STAM_PROFILE_ADV_START(&pVM->vm.s.StatHaltBlock, a);
    400         rc = RTSemEventWait(pVM->vm.s.EventSemWait, 10);
    401         STAM_PROFILE_ADV_STOP(&pVM->vm.s.StatHaltBlock, a);
    402 #endif
     372            uint64_t u64Slept = RTTimeNanoTS() - u64Start;
     373            RTLogPrintf(" -> rc=%Vrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
     374        }
    403375        if (rc == VERR_TIMEOUT)
    404376            rc = VINF_SUCCESS;
     
    419391     * and resume the yielder.
    420392     */
    421 //    TMCpuTickPause(pVM);
    422393    VMMR3YieldResume(pVM);
    423394
  • trunk/src/VBox/VMM/VMM.cpp

    r2228 r2283  
    17471747        TMTimerStop(pVM->vmm.s.pYieldTimer);
    17481748    }
     1749    pVM->vmm.s.u64LastYield = RTTimeNanoTS();
    17491750}
    17501751
     
    17601761        TMTimerStop(pVM->vmm.s.pYieldTimer);
    17611762    pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
     1763    pVM->vmm.s.u64LastYield = RTTimeNanoTS();
    17621764}
    17631765
     
    17871789static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
    17881790{
     1791    /*
     1792     * This really needs some careful tuning. While we shouldn't be too gready since
     1793     * that'll cause the rest of the system to stop up, we shouldn't be too nice either
     1794     * because that'll cause us to stop up.
     1795     *
     1796     * The current logic is to use the default interval when there is no lag worth
     1797     * mentioning, but when we start accumulating lag we don't bother yielding at all.
     1798     *
     1799     * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
     1800     * so the lag is up to date.)
     1801     */
     1802    const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
     1803    if (    u64Lag     <   50000000 /* 50ms */
     1804        ||  (   u64Lag < 1000000000 /*  1s */
     1805             && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
     1806       )
     1807    {
     1808        uint64_t u64Elapsed = RTTimeNanoTS();
     1809        pVM->vmm.s.u64LastYield = u64Elapsed;
     1810
     1811        RTThreadYield();
     1812
    17891813#ifdef LOG_ENABLED
    1790     uint64_t u64Elapsed = RTTimeNanoTS();
    1791 #endif
    1792     RTThreadYield();
     1814        u64Elapsed = RTTimeNanoTS() - u64Elapsed;
     1815        Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
     1816#endif
     1817    }
    17931818    TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
    1794     Log(("vmmR3YieldEMT: %RI64 ns\n", RTTimeNanoTS() - u64Elapsed));
    17951819}
    17961820
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r2248 r2283  
    161161    const uint64_t u64Now = TMVirtualGet(pVM);
    162162
    163     /* TMCLOCK_VIRTUAL */
     163    /*
     164     * TMCLOCK_VIRTUAL
     165     */
    164166    const uint64_t u64Expire1 = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
    165167    const int64_t i64Delta1 = u64Expire1 - u64Now;
     
    175177    }
    176178
    177     /* TMCLOCK_VIRTUAL_SYNC */
     179    /*
     180     * TMCLOCK_VIRTUAL_SYNC
     181     * This isn't quite as stright forward if in a catch-up, not only do
     182     * we have to adjust the 'now' but when have to adjust the delta as well.
     183     */
    178184    const uint64_t u64Expire2 = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
    179     const int64_t i64Delta2 = u64Expire2 - (u64Now - pVM->tm.s.u64VirtualSyncOffset);
     185    uint64_t u64VirtualSyncNow;
     186    if (!pVM->tm.s.fVirtualSyncTicking)
     187        u64VirtualSyncNow = pVM->tm.s.u64VirtualSync;
     188    else
     189    {
     190        if (!pVM->tm.s.fVirtualSyncCatchUp)
     191            u64VirtualSyncNow = u64Now - pVM->tm.s.offVirtualSync;
     192        else
     193        {
     194            uint64_t off = pVM->tm.s.offVirtualSync;
     195            uint64_t u64Delta = u64Now - pVM->tm.s.u64VirtualSyncCatchUpPrev;
     196            if (RT_LIKELY(!(u64Delta >> 32)))
     197            {
     198                uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
     199                if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
     200                    off -= u64Sub;
     201                else
     202                    off = pVM->tm.s.offVirtualSyncGivenUp;
     203            }
     204            u64VirtualSyncNow = u64Now - off;
     205        }
     206    }
     207    int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
    180208    if (i64Delta2 <= 0)
    181209    {
     
    188216        return 0;
    189217    }
     218    if (pVM->tm.s.fVirtualSyncCatchUp)
     219        i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
    190220
    191221    /*
    192      * Return the time self to the next event.
     222     * Return the time left to the next event.
    193223     */
    194224    STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
     
    359389                    Assert(!pTimer->offPrev);
    360390                    Assert(!pTimer->offNext);
     391                    AssertMsg(      pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
     392                              ||    pTimer->CTXALLSUFF(pVM)->tm.s.fVirtualSyncTicking
     393                              ||    u64Expire >= pTimer->CTXALLSUFF(pVM)->tm.s.u64VirtualSync,
     394                              ("%RU64 < %RU64 %s\n", u64Expire, pTimer->CTXALLSUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
    361395                    pTimer->u64Expire = u64Expire;
    362396                    TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r2248 r2283  
    9696{
    9797    uint64_t u64;
    98     if (pVM->tm.s.fVirtualTicking)
     98    if (RT_LIKELY(pVM->tm.s.fVirtualTicking))
    9999    {
    100100        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
     
    108108            &&  (   pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
    109109                 || (   pVM->tm.s.fVirtualSyncTicking
    110                      && pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.u64VirtualSyncOffset
     110                     && pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
    111111                    )
    112112                )
     
    114114        {
    115115            VM_FF_SET(pVM, VM_FF_TIMER);
     116            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
    116117#ifdef IN_RING3
    117118            REMR3NotifyTimerPending(pVM);
     
    190191            VMR3NotifyFF(pVM, true);
    191192#endif
     193            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
    192194        }
    193195
     
    210212         *
    211213         * Assuming nano second virtual time, we can simply ignore any intervals which has
    212          * any of the upper 32 bits set. This will have the nice sideeffect of allowing us
    213          * to use (faster) 32-bit math.
     214         * any of the upper 32 bits set.
    214215         */
    215         AssertCompile(TMCLOCK_FREQ_VIRTUAL <= 2000000000); /* (assumes low 32-bit >= 2 seconds) */
    216         uint64_t u64Offset = pVM->tm.s.u64VirtualSyncOffset;
     216        AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
     217        uint64_t off = pVM->tm.s.offVirtualSync;
    217218        if (pVM->tm.s.fVirtualSyncCatchUp)
    218219        {
     
    221222            if (RT_LIKELY(!(u64Delta >> 32)))
    222223            {
    223                 uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage),
    224                                                        100);
    225                 if (u64Offset > u32Sub)
     224                uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
     225                if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
    226226                {
    227                     u64Offset -= u32Sub;
    228                     ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, u64Offset);
     227                    off -= u64Sub;
     228                    ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
    229229                    pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
     230                    Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
    230231                }
    231232                else
    232233                {
    233234                    /* we've completely caught up. */
    234                     u64Offset = 0;
    235                     ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, 0);
     235                    STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
     236                    off = pVM->tm.s.offVirtualSyncGivenUp;
     237                    ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
    236238                    ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
    237239                    pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
     240                    Log4(("TM: %RU64/0: caught up\n", u64));
    238241                }
    239242            }
     
    252255         * set the the timer pending flag.
    253256         */
    254         u64 -= u64Offset;
     257        u64 -= off;
    255258        const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
    256259        if (u64 >= u64Expire)
     
    266269                VMR3NotifyFF(pVM, true);
    267270#endif
     271                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
     272                Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
    268273            }
     274            else
     275                Log4(("TM: %RU64/%RU64: exp tmr\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
    269276        }
    270277    }
     
    283290TMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM)
    284291{
    285     return pVM->tm.s.u64VirtualSyncOffset;
     292    return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
    286293}
    287294
  • trunk/src/VBox/VMM/VMMInternal.h

    r1210 r2283  
    238238    uint32_t                    u32Padding0; /**< Alignment padding. */
    239239#endif
     240    /** The timestamp of the previous yield. (nano) */
     241    uint64_t                    u64LastYield;
    240242
    241243    /** @name CallHost
  • trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp

    r2270 r2283  
    598598    GEN_CHECK_OFF(TM, u64Virtual);
    599599    GEN_CHECK_OFF(TM, u64VirtualWarpDriveStart);
    600     GEN_CHECK_OFF(TM, u64VirtualSyncOffset);
     600    GEN_CHECK_OFF(TM, u64VirtualSync);
     601    GEN_CHECK_OFF(TM, offVirtualSync);
     602    GEN_CHECK_OFF(TM, offVirtualSyncGivenUp);
    601603    GEN_CHECK_OFF(TM, u64VirtualSyncCatchUpPrev);
    602     GEN_CHECK_OFF(TM, u64VirtualSync);
    603604    GEN_CHECK_OFF(TM, u32VirtualSyncCatchUpPercentage);
    604605    GEN_CHECK_OFF(TM, u32VirtualSyncScheduleSlack);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette