VirtualBox

Changeset 20050 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
May 26, 2009 5:12:12 PM (16 years ago)
Author:
vboxsync
Message:

TM: new state and more logging (gee).

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/TM.cpp

    r19820 r20050  
    396396    else
    397397        CPUMR3SetCR4Feature(pVM, 0, ~X86_CR4_TSD);
    398     LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool\n"
     398    LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool\n"
    399399            "TM: fMaybeUseOffsettedHostTSC=%RTbool TSCTiedToExecution=%RTbool TSCNotTiedToHalt=%RTbool\n",
    400400            pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC,
     
    447447        if (    (iPeriod > 0 && u64 <= pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod - 1].u64Start) \
    448448            ||  u64 >= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) \
    449             return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Configuration error: Invalid start of period #" #iPeriod ": %RU64"), u64); \
     449            return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Configuration error: Invalid start of period #" #iPeriod ": %'RU64"), u64); \
    450450        pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u64Start = u64; \
    451451        rc = CFGMR3QueryU32(pCfgHandle, "CatchUpPrecentage" #iPeriod, &pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage); \
     
    542542    STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR0.c1nsSteps,STAMTYPE_U32, "/TM/R0/1nsSteps",                     STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
    543543    STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR0.cBadPrev, STAMTYPE_U32, "/TM/R0/cBadPrev",                     STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
    544     STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.c1nsSteps,STAMTYPE_U32, "/TM/GC/1nsSteps",                     STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
    545     STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.cBadPrev, STAMTYPE_U32, "/TM/GC/cBadPrev",                     STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
     544    STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.c1nsSteps,STAMTYPE_U32, "/TM/RC/1nsSteps",                     STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
     545    STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.cBadPrev, STAMTYPE_U32, "/TM/RC/cBadPrev",                     STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
    546546    STAM_REL_REG(     pVM,(void*)&pVM->tm.s.offVirtualSync,               STAMTYPE_U64, "/TM/VirtualSync/CurrentOffset",               STAMUNIT_NS, "The current offset. (subtract GivenUp to get the lag)");
    547547    STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.offVirtualSyncGivenUp,        STAMTYPE_U64, "/TM/VirtualSync/GivenUp",                     STAMUNIT_NS, "Nanoseconds of the 'CurrentOffset' that's been given up and won't ever be attemted caught up with.");
     
    552552    STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR0.cExpired,    STAMTYPE_U32, "/TM/R0/cExpired",                     STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
    553553    STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR0.cUpdateRaces,STAMTYPE_U32, "/TM/R0/cUpdateRaces",                 STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
    554     STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cExpired,    STAMTYPE_U32, "/TM/GC/cExpired",                     STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
    555     STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cUpdateRaces,STAMTYPE_U32, "/TM/GC/cUpdateRaces",                 STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
     554    STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cExpired,    STAMTYPE_U32, "/TM/RC/cExpired",                     STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
     555    STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cUpdateRaces,STAMTYPE_U32, "/TM/RC/cUpdateRaces",                 STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
    556556    STAM_REG(pVM, &pVM->tm.s.StatDoQueues,                            STAMTYPE_PROFILE, "/TM/DoQueues",                    STAMUNIT_TICKS_PER_CALL, "Profiling timer TMR3TimerQueuesDo.");
    557557    STAM_REG(pVM, &pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL],      STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Virtual",            STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual clock queue.");
     
    968968            ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSync, offNew);
    969969            ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
    970             LogRel(("TM: Aborting catch-up attempt on reset with a %RU64 ns lag on reset; new total: %RU64 ns\n", offNew - offOld, offNew));
     970            LogRel(("TM: Aborting catch-up attempt on reset with a %'RU64 ns lag on reset; new total: %'RU64 ns\n", offNew - offOld, offNew));
    971971        }
    972972    }
     
    10981098    if (u64Hz != TMCLOCK_FREQ_VIRTUAL)
    10991099    {
    1100         AssertMsgFailed(("The virtual clock frequency differs! Saved: %RU64 Binary: %RU64\n",
     1100        AssertMsgFailed(("The virtual clock frequency differs! Saved: %'RU64 Binary: %'RU64\n",
    11011101                         u64Hz, TMCLOCK_FREQ_VIRTUAL));
    11021102        return VERR_SSM_VIRTUAL_CLOCK_HZ;
     
    11261126    if (u64Hz != TMCLOCK_FREQ_REAL)
    11271127    {
    1128         AssertMsgFailed(("The real clock frequency differs! Saved: %RU64 Binary: %RU64\n",
     1128        AssertMsgFailed(("The real clock frequency differs! Saved: %'RU64 Binary: %'RU64\n",
    11291129                         u64Hz, TMCLOCK_FREQ_REAL));
    11301130        return VERR_SSM_VIRTUAL_CLOCK_HZ; /* missleading... */
     
    11491149        pVM->tm.s.cTSCTicksPerSecond = u64Hz;
    11501150
    1151     LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool (state load)\n",
     1151    LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool (state load)\n",
    11521152            pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC));
    11531153
     
    13861386        {
    13871387            case TMTIMERSTATE_STOPPED:
    1388             case TMTIMERSTATE_EXPIRED:
     1388            case TMTIMERSTATE_EXPIRED_DELIVER:
    13891389                break;
    13901390
     
    14071407             * This shouldn't happen as the caller should make sure there are no races.
    14081408             */
     1409            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
    14091410            case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
    14101411            case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
     
    18031804              pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc));
    18041805        bool fRc;
    1805         TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED, TMTIMERSTATE_ACTIVE, fRc);
     1806        TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_GET_UNLINK, TMTIMERSTATE_ACTIVE, fRc);
    18061807        if (fRc)
    18071808        {
     
    18221823            pTimer->offPrev = 0;
    18231824
    1824 
    18251825            /* fire */
     1826            TM_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_DELIVER);
    18261827//            tmUnlock(pVM);
    18271828            switch (pTimer->enmType)
     
    18431844
    18441845            /* change the state if it wasn't changed already in the handler. */
    1845             TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED, fRc);
     1846            TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED_DELIVER, fRc);
    18461847            Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState)));
    18471848        }
     
    19021903    {
    19031904        /* Calc 'now'. */
    1904         bool     fStopCatchup  = false;
    1905         bool     fUpdateStuff  = false;
    1906         uint64_t off           = pVM->tm.s.offVirtualSync;
     1905        bool        fStopCatchup   = false;
     1906        bool        fUpdateStuff   = false;
     1907        uint64_t    off            = pVM->tm.s.offVirtualSync;
    19071908        if (pVM->tm.s.fVirtualSyncCatchUp)
    19081909        {
     
    19141915                {
    19151916                    off -= u64Sub;
    1916                     Log4(("TM: %RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - offSyncGivenUp, u64Sub));
     1917                    Log4(("TM: %'RU64/-%'8RU64: sub %'RU64 [tmR3TimerQueueRunVirtualSync]\n", u64VirtualNow - off, off - offSyncGivenUp, u64Sub));
    19171918                }
    19181919                else
     
    19211922                    fStopCatchup = true;
    19221923                    off = offSyncGivenUp;
    1923                     Log4(("TM: %RU64/0: caught up (run)\n", u64VirtualNow));
    19241924                }
    19251925            }
     
    19351935            ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64Now);
    19361936            ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
    1937             Log4(("TM: %RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - offSyncGivenUp));
     1937            Log4(("TM: %'RU64/-%'8RU64: exp tmr [tmR3TimerQueueRunVirtualSync]\n", u64Now, u64VirtualNow - u64Now - offSyncGivenUp));
    19381938        }
    19391939        else if (fUpdateStuff)
     
    19421942            ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64VirtualNow);
    19431943            if (fStopCatchup)
     1944            {
    19441945                ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
     1946                Log4(("TM: %'RU64/0: caught up [tmR3TimerQueueRunVirtualSync]\n", u64VirtualNow));
     1947            }
    19451948        }
    19461949    }
     
    19701973              pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc));
    19711974        bool fRc;
    1972         TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED, TMTIMERSTATE_ACTIVE, fRc);
     1975        TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_GET_UNLINK, TMTIMERSTATE_ACTIVE, fRc);
    19731976        if (fRc)
    19741977        {
     
    19891992            /* advance the clock - don't permit timers to be out of order or armed in the 'past'. */
    19901993#ifdef VBOX_STRICT
    1991             AssertMsg(pTimer->u64Expire >= u64Prev, ("%RU64 < %RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc));
     1994            AssertMsg(pTimer->u64Expire >= u64Prev, ("%'RU64 < %'RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc));
    19921995            u64Prev = pTimer->u64Expire;
    19931996#endif
     
    19961999
    19972000            /* fire */
     2001            TM_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_DELIVER);
    19982002            switch (pTimer->enmType)
    19992003            {
     
    20082012
    20092013            /* change the state if it wasn't changed already in the handler. */
    2010             TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED, fRc);
     2014            TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED_DELIVER, fRc);
    20112015            Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState)));
    20122016        }
     
    20252029        const uint64_t u64VirtualNow2 = TMVirtualGetNoCheck(pVM);
    20262030        Assert(u64VirtualNow2 >= u64VirtualNow);
    2027         AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%RU64 < %RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
     2031        AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%'RU64 < %'RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
    20282032        const uint64_t offSlack = pVM->tm.s.u64VirtualSync - u64Now;
    20292033        STAM_STATS({
     
    20682072                STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
    20692073                ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
    2070                 Log4(("TM: %RU64/%RU64: caught up\n", u64VirtualNow2 - offNew, offLag));
     2074                Log4(("TM: %'RU64/-%'8RU64: caught up [pt]\n", u64VirtualNow2 - offNew, offLag));
    20712075            }
    20722076            else if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
     
    20812085                    STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]);
    20822086                    ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
    2083                     Log4(("TM: %RU64/%RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
     2087                    Log4(("TM: %'RU64/%'8RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
    20842088                }
    20852089                pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow2;
     
    20922096                ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
    20932097                ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
    2094                 Log4(("TM: %RU64/%RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
    2095                 LogRel(("TM: Giving up catch-up attempt at a %RU64 ns lag; new total: %RU64 ns\n", offLag, offNew));
     2098                Log4(("TM: %'RU64/%'8RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
     2099                LogRel(("TM: Giving up catch-up attempt at a %'RU64 ns lag; new total: %'RU64 ns\n", offLag, offNew));
    20962100            }
    20972101        }
     
    21092113                ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
    21102114                ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, true);
    2111                 Log4(("TM: %RU64/%RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
     2115                Log4(("TM: %'RU64/%'8RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
    21122116            }
    21132117            else
     
    21162120                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting);
    21172121                ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
    2118                 Log4(("TM: %RU64/%RU64: give up\n", u64VirtualNow2 - offNew, offLag));
    2119                 LogRel(("TM: Not bothering to attempt catching up a %RU64 ns lag; new total: %RU64\n", offLag, offNew));
     2122                Log4(("TM: %'RU64/%'8RU64: give up\n", u64VirtualNow2 - offNew, offLag));
     2123                LogRel(("TM: Not bothering to attempt catching up a %'RU64 ns lag; new total: %'RU64\n", offLag, offNew));
    21202124            }
    21212125        }
     
    22312235            return SSMR3PutU64(pSSM, pTimer->u64Expire);
    22322236
    2233         case TMTIMERSTATE_EXPIRED:
     2237        case TMTIMERSTATE_EXPIRED_GET_UNLINK:
     2238        case TMTIMERSTATE_EXPIRED_DELIVER:
    22342239        case TMTIMERSTATE_DESTROY:
    22352240        case TMTIMERSTATE_FREE:
  • trunk/src/VBox/VMM/TMInternal.h

    r19820 r20050  
    7070    /** Timer is active. */
    7171    TMTIMERSTATE_ACTIVE,
    72     /** Timer is expired, is being delivered. */
    73     TMTIMERSTATE_EXPIRED,
     72    /** Timer is expired, getting expire and unlinking. */
     73    TMTIMERSTATE_EXPIRED_GET_UNLINK,
     74    /** Timer is expired and is being delivered. */
     75    TMTIMERSTATE_EXPIRED_DELIVER,
    7476
    7577    /** Timer is stopped but still in the active list.
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r19821 r20050  
    466466#endif
    467467        }
    468         LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
     468        LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
    469469        return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
    470470    }
     
    514514
    515515                STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
    516                 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
     516                LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
    517517                return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
    518518            }
     
    612612        }
    613613        STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
    614         LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
     614        LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
    615615        return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
    616616    }
     
    740740         */
    741741        TMTIMERSTATE    enmState = pTimer->enmState;
    742         Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%llu\n",
     742        Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
    743743              pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
    744744        switch (enmState)
    745745        {
    746             case TMTIMERSTATE_EXPIRED:
     746            case TMTIMERSTATE_EXPIRED_DELIVER:
    747747            case TMTIMERSTATE_STOPPED:
    748748                if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
     
    753753                              ||    pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking
    754754                              ||    u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync,
    755                               ("%RU64 < %RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
     755                              ("%'RU64 < %'RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
    756756                    pTimer->u64Expire = u64Expire;
    757757                    TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
     
    799799
    800800
     801            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
    801802            case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
    802803            case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
     
    953954        switch (enmState)
    954955        {
    955             case TMTIMERSTATE_EXPIRED:
     956            case TMTIMERSTATE_EXPIRED_DELIVER:
    956957                //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
    957958                return VERR_INVALID_PARAMETER;
     
    989990                break;
    990991
     992            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
    991993            case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
    992994            case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
     
    10511053            return ~(uint64_t)0;
    10521054    }
    1053     //Log2(("TMTimerGet: returns %llu (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
     1055    //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
    10541056    //      u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
    10551057    return u64;
     
    13301332        switch (enmState)
    13311333        {
    1332             case TMTIMERSTATE_EXPIRED:
     1334            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
     1335            case TMTIMERSTATE_EXPIRED_DELIVER:
    13331336            case TMTIMERSTATE_STOPPED:
    13341337            case TMTIMERSTATE_PENDING_STOP:
     
    13411344            case TMTIMERSTATE_PENDING_RESCHEDULE:
    13421345            case TMTIMERSTATE_PENDING_SCHEDULE:
    1343                 Log2(("TMTimerGetExpire: returns %llu (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
     1346                Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
    13441347                      pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
    13451348                return pTimer->u64Expire;
     
    13881391    {
    13891392        case TMTIMERSTATE_STOPPED:
    1390         case TMTIMERSTATE_EXPIRED:
     1393        case TMTIMERSTATE_EXPIRED_GET_UNLINK:
     1394        case TMTIMERSTATE_EXPIRED_DELIVER:
    13911395        case TMTIMERSTATE_PENDING_STOP:
    13921396        case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
     
    14361440        CASE( 1,STOPPED);
    14371441        CASE( 2,ACTIVE);
    1438         CASE( 3,EXPIRED);
    1439         CASE( 4,PENDING_STOP);
    1440         CASE( 5,PENDING_STOP_SCHEDULE);
    1441         CASE( 6,PENDING_SCHEDULE_SET_EXPIRE);
    1442         CASE( 7,PENDING_SCHEDULE);
    1443         CASE( 8,PENDING_RESCHEDULE_SET_EXPIRE);
    1444         CASE( 9,PENDING_RESCHEDULE);
    1445         CASE(10,DESTROY);
    1446         CASE(11,FREE);
     1442        CASE( 3,EXPIRED_GET_UNLINK);
     1443        CASE( 4,EXPIRED_DELIVER);
     1444        CASE( 5,PENDING_STOP);
     1445        CASE( 6,PENDING_STOP_SCHEDULE);
     1446        CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
     1447        CASE( 8,PENDING_SCHEDULE);
     1448        CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
     1449        CASE(10,PENDING_RESCHEDULE);
     1450        CASE(11,DESTROY);
     1451        CASE(12,FREE);
    14471452        default:
    14481453            AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
     
    15971602            case TMTIMERSTATE_STOPPED:
    15981603            case TMTIMERSTATE_ACTIVE:
    1599             case TMTIMERSTATE_EXPIRED:
     1604            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
     1605            case TMTIMERSTATE_EXPIRED_DELIVER:
    16001606            default:
    16011607                AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
     
    16231629     */
    16241630    int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
    1625     Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32}\n", pQueue, pQueue->enmClock, offNext));
     1631    Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
    16261632    if (!offNext)
    16271633        return;
     
    16441650        Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
    16451651    } /* foreach timer in current schedule batch. */
     1652    Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
    16461653}
    16471654
     
    17201727            case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
    17211728            case TMTIMERSTATE_STOPPED:
    1722             case TMTIMERSTATE_EXPIRED:
     1729            case TMTIMERSTATE_EXPIRED_DELIVER:
    17231730            {
    17241731                Assert(!pCur->offNext);
     
    17401747
    17411748            /* shouldn't get here! */
     1749            case TMTIMERSTATE_EXPIRED_GET_UNLINK:
    17421750            case TMTIMERSTATE_DESTROY:
    17431751            default:
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r19810 r20050  
    419419        {
    420420            off -= u64Sub;
    421             Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
     421            Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
    422422        }
    423423        else
     
    427427            off = pVM->tm.s.offVirtualSyncGivenUp;
    428428            fStop = true;
    429             Log4(("TM: %RU64/0: caught up\n", u64));
     429            Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
    430430        }
    431431    }
     
    465465        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    466466        Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
    467         Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
     467        Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
    468468        tmVirtualSyncUnlock(pVM);
    469469
     
    477477    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
    478478
     479    Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
    479480    return u64;
    480481}
     
    498499        tmVirtualSyncUnlock(pVM);
    499500        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
     501        Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
    500502        return u64;
    501503    }
     
    526528        PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
    527529        VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
    528         Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
    529         Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
     530        Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
     531        Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
    530532        tmVirtualSyncUnlock(pVM);
    531533
     
    538540    }
    539541    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
     542    Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
    540543    return u64;
    541544}
     
    596599                {
    597600                    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
     601                    Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
    598602                    return off;
    599603                }
     
    607611        {
    608612            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
     613            Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
    609614            return off;
    610615        }
     
    645650                && cOuterTries > 0)
    646651                continue;
     652            Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
    647653            return off;
    648654        }
     
    668674                    {
    669675                        off -= u64Sub;
    670                         Log4(("TM: %RU64/%RU64: sub %RU32 (NoLock)\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
     676                        Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
    671677                    }
    672678                    else
     
    675681                        STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
    676682                        off = offGivenUp;
    677                         Log4(("TM: %RU64/0: caught up\n", u64));
     683                        Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
    678684                    }
    679685                }
     
    719725#endif
    720726            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
    721             Log4(("TM: %RU64/%RU64: exp tmr=>ff (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
     727            Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
    722728        }
    723729        else
    724             Log4(("TM: %RU64/%RU64: exp tmr (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
     730            Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
    725731        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
    726732    }
    727733
     734    Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
    728735    return u64;
    729736}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette