VirtualBox

Changeset 2869 in vbox for trunk/src/VBox


Ignore:
Timestamp:
May 25, 2007 1:15:39 PM (18 years ago)
Author:
vboxsync
Message:

Create a speciallized version of the RTTimeNanoTS code in timesup.cpp for calculating the virtual time. I hope this will eliminate the w32_2 trouble and related issues seen on the black box and my laptop.

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/TM.cpp

    r2861 r2869  
    202202
    203203    /*
    204      * We indirectly - thru RTTimeNanoTS and RTTimeMilliTS - use the global
    205      * info page (GIP) for both the virtual and the real clock. By mapping
    206      * the GIP into guest context we can get just as accurate time even there.
    207      * All that's required is that the g_pSUPGlobalInfoPage symbol is available
    208      * to the GC Runtime.
     204     * We directly use the GIP to calculate the virtual time. We map the
     205     * the GIP into the guest context so we can do this calculation there
     206     * as well and save costly world switches.
    209207     */
    210208    pVM->tm.s.pvGIPR3 = (void *)g_pSUPGlobalInfoPage;
     
    223221    MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
    224222
     223    /* Check assumptions made in TMAllVirtual.cpp about the GIP update interval. */
     224    if (    g_pSUPGlobalInfoPage->u32Magic == SUPGLOBALINFOPAGE_MAGIC
     225        &&  g_pSUPGlobalInfoPage->u32UpdateIntervalNS >= 250000000 /* 0.25s */)
     226        return VMSetError(pVM, VERR_INTERNAL_ERROR, RT_SRC_POS,
     227                          N_("The GIP update interval is too big. u32UpdateIntervalNS=%RU32 (u32UpdateHz=%RU32)\n"),
     228                          g_pSUPGlobalInfoPage->u32UpdateIntervalNS, g_pSUPGlobalInfoPage->u32UpdateHz);
    225229
    226230    /*
     
    416420        return rc;
    417421
     422    /*
     423     * Register statistics.
     424     */
     425    STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.c1nsVirtualRawSteps,   STAMTYPE_U32, "/TM/1nsSteps",    STAMUNIT_OCCURENCES,      "Virtual time 1ns steps (due to TSC / GIP variations)");
     426    STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.cVirtualRawBadRawPrev, STAMTYPE_U32, "/TM/BadPrevTime", STAMUNIT_OCCURENCES,      "Times the previous virtual time was considered erratic (shouldn't ever happen).");
     427
    418428#ifdef VBOX_WITH_STATISTICS
    419     /*
    420      * Register statistics.
    421      */
    422429    STAM_REG(pVM, &pVM->tm.s.StatDoQueues,          STAMTYPE_PROFILE,       "/TM/DoQueues",         STAMUNIT_TICKS_PER_CALL,    "Profiling timer TMR3TimerQueuesDo.");
    423430    STAM_REG(pVM, &pVM->tm.s.StatDoQueuesSchedule,  STAMTYPE_PROFILE_ADV,   "/TM/DoQueues/Schedule",STAMUNIT_TICKS_PER_CALL,    "The scheduling part.");
     
    573580            if (tmR3HasFixedTSC())
    574581                /* Sleep a bit to get a more reliable CpuHz value. */
    575                 RTThreadSleep(32);             
     582                RTThreadSleep(32);
    576583            else
    577584            {
     
    14561463            "            fVirtualSyncCatchUp=%RTbool (prev=%RTbool)\n",
    14571464            u64Now,
    1458             u64Max, 
     1465            u64Max,
    14591466            pNext->u64Expire,
    14601467            pVM->tm.s.u64VirtualSync,
     
    14681475            pVM->tm.s.u64VirtualSyncStoppedTS,
    14691476            pVM->tm.s.u32VirtualSyncCatchUpPercentage,
    1470             pVM->tm.s.fVirtualSyncTicking, fWasTicking, 
     1477            pVM->tm.s.fVirtualSyncTicking, fWasTicking,
    14711478            pVM->tm.s.fVirtualSyncCatchUp, fWasInCatchup));
    14721479    Assert(u64Now <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);
     
    16351642                /* don't bother */
    16361643if (offLag & BIT64(63)) //debugging - remove.
    1637     LogRel(("TM: offLag is negative! offLag=%RI64 (%#RX64) offNew=%#RX64 u64Elapsed=%#RX64 offSlack=%#RX64 u64VirtualNow2=%#RX64 u64VirtualNow=%#RX64 u64VirtualSync=%#RX64 offVirtualSyncGivenUp=%#RX64 u64Now=%#RX64 u64Max=%#RX64\n", 
     1644    LogRel(("TM: offLag is negative! offLag=%RI64 (%#RX64) offNew=%#RX64 u64Elapsed=%#RX64 offSlack=%#RX64 u64VirtualNow2=%#RX64 u64VirtualNow=%#RX64 u64VirtualSync=%#RX64 offVirtualSyncGivenUp=%#RX64 u64Now=%#RX64 u64Max=%#RX64\n",
    16381645            offLag, offLag, offNew, u64Elapsed, offSlack, u64VirtualNow2, u64VirtualNow, pVM->tm.s.u64VirtualSync, pVM->tm.s.offVirtualSyncGivenUp, u64Now, u64Max));
    16391646                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting);
  • trunk/src/VBox/VMM/TMInternal.h

    r2861 r2869  
    331331     * Only valid when fVirtualWarpDrive is set. */
    332332    uint64_t                    u64VirtualWarpDriveStart;
     333    /** The previously returned nano TS.
     334     * This handles TSC drift on SMP systems and expired interval.
     335     * This is a valid range u64NanoTS to u64NanoTS + 1000000000 (ie. 1sec). */
     336    uint64_t volatile           u64VirtualRawPrev;
     337    /** The number of times we've had to resort to 1ns walking. */
     338    uint32_t volatile           c1nsVirtualRawSteps;
     339    /** Number of times u64VirtualRawPrev has been considered bad. */
     340    uint32_t volatile           cVirtualRawBadRawPrev;
    333341
    334342    /** The guest virtual timer synchronous time when fVirtualSyncTicking is cleared. */
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r2861 r2869  
    4747
    4848
     49/**
     50 * This is (mostly) the same as rtTimeNanoTSInternal() except
     51 * for the two globals which live in TM.
     52 *
     53 * @returns Nanosecond timestamp.
     54 * @param   pVM     The VM handle.
     55 */
     56static uint64_t tmVirtualGetRawNanoTS(PVM pVM)
     57{
     58    uint64_t    u64Delta;
     59    uint32_t    u32NanoTSFactor0;
     60    uint64_t    u64TSC;
     61    uint64_t    u64NanoTS;
     62    uint32_t    u32UpdateIntervalTSC;
     63
     64    /*
     65     * Read the GIP data.
     66     */
     67    for (;;)
     68    {
     69        uint32_t u32TransactionId;
     70        PCSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
     71#ifdef IN_RING3
     72        if (RT_UNLIKELY(!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC))
     73            return RTTimeSystemNanoTS();
     74#endif
     75
     76        if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
     77        {
     78            u32TransactionId = pGip->aCPUs[0].u32TransactionId;
     79#ifdef __L4__
     80            Assert((u32TransactionId & 1) == 0);
     81#endif
     82            u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
     83            u64NanoTS = pGip->aCPUs[0].u64NanoTS;
     84            u64TSC = pGip->aCPUs[0].u64TSC;
     85            u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
     86            u64Delta = ASMReadTSC();
     87            if (RT_UNLIKELY(    pGip->aCPUs[0].u32TransactionId != u32TransactionId
     88                            ||  (u32TransactionId & 1)))
     89                continue;
     90        }
     91        else
     92        {
     93            /* SUPGIPMODE_ASYNC_TSC */
     94            PCSUPGIPCPU pGipCpu;
     95
     96            uint8_t u8ApicId = ASMGetApicId();
     97            if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs)))
     98                pGipCpu = &pGip->aCPUs[u8ApicId];
     99            else
     100            {
     101                AssertMsgFailed(("%x\n", u8ApicId));
     102                pGipCpu = &pGip->aCPUs[0];
     103            }
     104
     105            u32TransactionId = pGipCpu->u32TransactionId;
     106#ifdef __L4__
     107            Assert((u32TransactionId & 1) == 0);
     108#endif
     109            u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
     110            u64NanoTS = pGipCpu->u64NanoTS;
     111            u64TSC = pGipCpu->u64TSC;
     112            u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
     113            u64Delta = ASMReadTSC();
     114            if (RT_UNLIKELY(u8ApicId != ASMGetApicId()))
     115                continue;
     116            if (RT_UNLIKELY(    pGipCpu->u32TransactionId != u32TransactionId
     117                            ||  (u32TransactionId & 1)))
     118                continue;
     119        }
     120        break;
     121    }
     122
     123    /*
     124     * Calc NanoTS delta.
     125     */
     126    u64Delta -= u64TSC;
     127    if (u64Delta > u32UpdateIntervalTSC)
     128    {
     129        /*
     130         * We've expired the interval, cap it. If we're here for the 2nd
     131         * time without any GIP update inbetween, the checks against
     132         * pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping.
     133         */
     134        u64Delta = u32UpdateIntervalTSC;
     135    }
     136#if !defined(_MSC_VER) || defined(__AMD64__) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
     137    u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
     138    u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
     139#else
     140    __asm
     141    {
     142        mov     eax, dword ptr [u64Delta]
     143        mul     dword ptr [u32NanoTSFactor0]
     144        div     dword ptr [u32UpdateIntervalTSC]
     145        mov     dword ptr [u64Delta], eax
     146        xor     edx, edx
     147        mov     dword ptr [u64Delta + 4], edx
     148    }
     149#endif
     150
     151    /*
     152     * Calculate the time and compare it with the previously returned value.
     153     *
     154     * Since this function is called *very* frequently when the VM is running
     155     * and then mostly on EMT, we can restrict the valid range of the delta
     156     * (-1s to 2*GipUpdates) and simplify/optimize the default path.
     157     */
     158    u64NanoTS += u64Delta;
     159    uint64_t u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
     160    uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
     161    if (RT_LIKELY(u64DeltaPrev < 1000000000 /* 1s */))
     162        /* frequent - less than 1s since last call. */;
     163    else if (   (int64_t)u64DeltaPrev < 0
     164             && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 > 0)
     165    {
     166        /* occasional - u64NanoTS is in the 'past' relative to previous returns. */
     167        ASMAtomicIncU32(&pVM->tm.s.c1nsVirtualRawSteps);
     168        u64NanoTS = u64PrevNanoTS + 1;
     169    }
     170    else if (u64PrevNanoTS)
     171    {
     172        /* Something has gone bust, if negative offset it's real bad.*/
     173        ASMAtomicIncU32(&pVM->tm.s.cVirtualRawBadRawPrev);
     174        if ((int64_t)u64DeltaPrev < 0)
     175            LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
     176                    u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
     177        else
     178            Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n",
     179                 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
     180#ifdef DEBUG_bird
     181        AssertMsgFailed(("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
     182                         u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
     183#endif
     184    }
     185    /* else: We're resuming (see TMVirtualResume). */
     186    if (RT_LIKELY(ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS)))
     187        return u64NanoTS;
     188
     189    /*
     190     * Attempt updating the previous value, provided we're still ahead of it.
     191     *
     192     * There is no point in recalculating u64NanoTS because we got preemted or if
     193     * we raced somebody while the GIP was updated, since these are events
     194     * that might occure at any point in the return path as well.
     195     */
     196    for (int cTries = 100;;)
     197    {
     198        u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
     199        if (u64PrevNanoTS >= u64NanoTS)
     200            break;
     201        if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS))
     202            break;
     203        AssertBreak(--cTries <= 0, );
     204    }
     205
     206    return u64NanoTS;
     207}
     208
     209
    49210
    50211/**
     
    60221     * warp drive has been enabled.
    61222     */
    62     uint64_t u64 = RTTimeNanoTS();
     223    uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
    63224    u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
    64225    u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
     
    68229    /*
    69230     * Now we apply the virtual time offset.
    70      * (Which is the negate RTTimeNanoTS() value for when the virtual machine
    71      * started if it had been running continuously without any suspends.)
     231     * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
     232     * machine started if it had been running continuously without any suspends.)
    72233     */
    73234    u64 -= pVM->tm.s.u64VirtualOffset;
     
    85246{
    86247    if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
    87         return RTTimeNanoTS() - pVM->tm.s.u64VirtualOffset;
     248        return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
    88249    return tmVirtualGetRawNonNormal(pVM);
    89250}
     
    209370         *  - We might be on a different CPU which TSC isn't quite in sync with the
    210371         *    other CPUs in the system.
    211          *  - RTTimeNanoTS() is returning sligtly different values in GC, R0 and R3 because
    212          *    of the static variable it uses with the previous read time.
    213372         *  - Another thread is racing us and we might have been preemnted while inside
    214373         *    this function.
     
    350509    {
    351510        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
    352         pVM->tm.s.u64VirtualWarpDriveStart = RTTimeNanoTS();
     511        pVM->tm.s.u64VirtualRawPrev = 0;
     512        pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
    353513        pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
    354514        pVM->tm.s.fVirtualTicking = true;
  • trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp

    r2283 r2869  
    597597    GEN_CHECK_OFF(TM, u64VirtualOffset);
    598598    GEN_CHECK_OFF(TM, u64Virtual);
     599    GEN_CHECK_OFF(TM, u64VirtualRawPrev);
     600    GEN_CHECK_OFF(TM, c1nsVirtualRawSteps);
     601    GEN_CHECK_OFF(TM, cVirtualRawBadRawPrev);
    599602    GEN_CHECK_OFF(TM, u64VirtualWarpDriveStart);
    600603    GEN_CHECK_OFF(TM, u64VirtualSync);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette