VirtualBox

Changeset 92857 in vbox for trunk


Ignore:
Timestamp:
Dec 10, 2021 12:43:03 AM (3 years ago)
Author:
vboxsync
Message:

IPRT/timer-r0drv-nt.cpp: Implemented RTTimerChangeInterval, improved high-res accuracy by using KeQueryInterruptTimePrecise, fixed potentially (unlikely) incorrect master DPC pointer when reschduling omni timers, and adjusted the catchup logic.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp

    r92825 r92857  
    102102    /** User argument. */
    103103    void                   *pvUser;
     104
     105    /** @name Periodic scheduling / RTTimerChangeInterval.
     106     *  @{  */
     107    /** Spinlock protecting the u64NanoInterval, iMasterTick, uNtStartTime,
     108     *  uNtDueTime and (at least for updating) fSuspended. */
     109    KSPIN_LOCK              Spinlock;
    104110    /** The timer interval. 0 if one-shot. */
    105     uint64_t                u64NanoInterval;
     111    uint64_t volatile       u64NanoInterval;
     112    /** The the current master tick.  This does not necessarily follow that of
     113     *  the subtimer, as RTTimerChangeInterval may cause it to reset. */
     114    uint64_t volatile       iMasterTick;
    106115#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
    107116    /** The desired NT time of the first tick. */
    108     uint64_t                uNtStartTime;
    109 #endif
     117    uint64_t volatile       uNtStartTime;
     118    /** The current due time (absolute interrupt time). */
     119    uint64_t volatile       uNtDueTime;
     120#endif
     121    /** @} */
     122
    110123    /** The NT timer object. */
    111124    KTIMER                  NtTimer;
     
    125138
    126139#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
     140
    127141/**
    128142 * Get current NT interrupt time.
     
    147161# endif
    148162}
     163
     164/**
     165 * Get current NT interrupt time, high resolution variant.
     166 * @return High resolution NT interrupt time
     167 */
     168static uint64_t rtTimerNtQueryInterruptTimeHighRes(void)
     169{
     170    if (g_pfnrtKeQueryInterruptTimePrecise)
     171    {
     172        ULONG64 uQpcIgnored;
     173        return g_pfnrtKeQueryInterruptTimePrecise(&uQpcIgnored);
     174    }
     175    return rtTimerNtQueryInterruptTime();
     176}
     177
    149178#endif /* RTR0TIMER_NT_MANUAL_RE_ARM */
    150179
    151180
    152181/**
     182 * Worker for rtTimerNtRearmInternval that calculates the next due time.
     183 *
     184 * @returns The next due time (relative, so always negative).
     185 * @param   uNtNow                  The current time.
     186 * @param   uNtStartTime            The start time of the timer.
     187 * @param   iTick                   The next tick number (zero being @a uNtStartTime).
     188 * @param   cNtInterval             The timer interval in NT ticks.
     189 * @param   cNtNegDueSaftyMargin    The due time safety margin in negative NT
     190 *                                  ticks.
     191 * @param   cNtMinNegInterval       The minium interval to use when in catchup
     192 *                                  mode, also negative NT ticks.
     193 */
     194DECLINLINE(int64_t) rtTimerNtCalcNextDueTime(uint64_t uNtNow, uint64_t uNtStartTime, uint64_t iTick, uint64_t cNtInterval,
     195                                             int32_t const cNtNegDueSaftyMargin, int32_t const cNtMinNegInterval)
     196{
     197    /* Calculate the actual time elapsed since timer start: */
     198    int64_t iDueTime = uNtNow - uNtStartTime;
     199    if (iDueTime < 0)
     200        iDueTime = 0;
     201
     202    /* Now calculate the nominal time since timer start for the next tick: */
     203    uint64_t const uNtNextRelStart = iTick * cNtInterval;
     204
     205    /* Calulate now much time we have to the next tick: */
     206    iDueTime -= uNtNextRelStart;
     207
     208    /* If we haven't already overshot the due time, including some safety margin, we're good: */
     209    if (iDueTime < cNtNegDueSaftyMargin)
     210        return iDueTime;
     211
     212    /* Okay, we've overshot it and are in catchup mode: */
     213    if (iDueTime < (int64_t)cNtInterval)
     214        iDueTime = -(int64_t)(cNtInterval / 2); /* double time */
     215    else if (iDueTime < (int64_t)(cNtInterval * 4))
     216        iDueTime = -(int64_t)(cNtInterval / 4); /* quadruple time */
     217    else
     218        return cNtMinNegInterval;
     219
     220    /* Make sure we don't try intervals smaller than the minimum specified by the caller: */
     221    if (iDueTime > cNtMinNegInterval)
     222        iDueTime = cNtMinNegInterval;
     223    return iDueTime;
     224}
     225
     226/**
    153227 * Manually re-arms an internval timer.
    154228 *
    155229 * Turns out NT doesn't necessarily do a very good job at re-arming timers
    156  * accurately.
     230 * accurately, this is in part due to KeSetTimerEx API taking the interval in
     231 * milliseconds.
    157232 *
    158233 * @param   pTimer              The timer.
    159  * @param   iTick               The current timer tick.
    160  */
    161 DECLINLINE(void) rtTimerNtRearmInternval(PRTTIMER pTimer, uint64_t iTick)
     234 * @param   pMasterDpc          The master timer DPC for passing to KeSetTimerEx
     235 *                              in low-resolution mode.  Ignored for high-res.
     236 */
     237static void rtTimerNtRearmInternval(PRTTIMER pTimer, PKDPC pMasterDpc)
    162238{
    163239#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
    164240    Assert(pTimer->u64NanoInterval);
    165241
    166     uint64_t uNtNext = (iTick * pTimer->u64NanoInterval) / 100 - 10; /* 1us fudge */
    167     LARGE_INTEGER DueTime;
    168     DueTime.QuadPart = rtTimerNtQueryInterruptTime() - pTimer->uNtStartTime;
    169     if (DueTime.QuadPart < 0)
    170         DueTime.QuadPart = 0;
    171     if ((uint64_t)DueTime.QuadPart < uNtNext)
    172         DueTime.QuadPart -= uNtNext;
    173     else
    174         DueTime.QuadPart = -2500; /* 0.25ms */
    175 
     242    /*
     243     * For simplicity we acquire the spinlock for the whole operation.
     244     * This should be perfectly fine as it doesn't change the IRQL.
     245     */
     246    Assert(KeGetCurrentIrql() >= DISPATCH_LEVEL);
     247    KeAcquireSpinLockAtDpcLevel(&pTimer->Spinlock);
     248
     249    /*
     250     * Make sure it wasn't suspended
     251     */
     252    if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
     253    {
     254        uint64_t const cNtInterval  = ASMAtomicUoReadU64(&pTimer->u64NanoInterval) / 100;
     255        uint64_t const uNtStartTime = ASMAtomicUoReadU64(&pTimer->uNtStartTime);
     256        uint64_t const iTick        = ++pTimer->iMasterTick;
     257
     258        /*
     259         * Calculate the deadline for the next timer tick and arm the timer.
     260         * We always use a relative tick, i.e. negative DueTime value.  This is
     261         * crucial for the the high resolution API as it will bugcheck otherwise.
     262         */
     263        int64_t  iDueTime;
     264        uint64_t uNtNow;
    176265# ifdef RTR0TIMER_NT_HIGH_RES
    177     if (pTimer->pHighResTimer)
    178         g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, 0, NULL);
    179     else
     266        if (pTimer->pHighResTimer)
     267        {
     268            /* Must use highres time here. */
     269            uNtNow   = rtTimerNtQueryInterruptTimeHighRes();
     270            iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval,
     271                                                -100 /* 10us safety */, -2000 /* 200us min interval*/);
     272            g_pfnrtExSetTimer(pTimer->pHighResTimer, iDueTime, 0, NULL);
     273        }
     274        else
    180275# endif
    181         KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, &pTimer->aSubTimers[0].NtDpc);
     276        {
     277            /* Expect interrupt time and timers to expire at the same time, so
     278               don't use high res time api here. */
     279            uNtNow   = rtTimerNtQueryInterruptTime();
     280            iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval,
     281                                                -100 /* 10us safety */, -2500 /* 250us min interval*/); /** @todo use max interval here */
     282            LARGE_INTEGER DueTime;
     283            DueTime.QuadPart = iDueTime;
     284            KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
     285        }
     286
     287        pTimer->uNtDueTime = uNtNow + -iDueTime;
     288    }
     289
     290    KeReleaseSpinLockFromDpcLevel(&pTimer->Spinlock);
    182291#else
    183     RT_NOREF(pTimer, iTick);
     292    RT_NOREF(pTimer, iTick, pMasterDpc);
    184293#endif
    185294}
     
    205314            ASMAtomicWriteBool(&pTimer->fSuspended, true);
    206315        uint64_t iTick = ++pTimer->aSubTimers[0].iTick;
    207         if (pTimer->u64NanoInterval)
    208             rtTimerNtRearmInternval(pTimer, iTick);
     316
    209317        pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
     318
     319        /* We re-arm the timer after calling pfnTimer, as it may stop the timer
     320           or change the interval, which would mean doing extra work. */
     321        if (!pTimer->fSuspended && pTimer->u64NanoInterval)
     322            rtTimerNtRearmInternval(pTimer, &pTimer->aSubTimers[0].NtDpc);
    210323
    211324        ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD);
     
    322435static void rtTimerNtOmniMasterCallbackWorker(PRTTIMER pTimer, PRTTIMERNTSUBTIMER pSubTimer, int iCpuSelf)
    323436{
    324 #ifdef RT_STRICT
    325     if (KeGetCurrentIrql() < DISPATCH_LEVEL)
    326         RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
    327     if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
    328         RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
    329 #endif
    330 
    331437    /*
    332438     * Check that we haven't been suspended before scheduling the other DPCs
    333439     * and doing the callout.
    334440     */
    335     if (    !ASMAtomicUoReadBool(&pTimer->fSuspended)
    336         &&  pTimer->u32Magic == RTTIMER_MAGIC)
    337     {
    338         RTCPUSET    OnlineSet;
     441    if (   !ASMAtomicUoReadBool(&pTimer->fSuspended)
     442        && pTimer->u32Magic == RTTIMER_MAGIC)
     443    {
     444        RTCPUSET OnlineSet;
    339445        RTMpGetOnlineSet(&OnlineSet);
    340446
     
    347453             */
    348454            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
    349                 if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
    350                     &&  iCpuSelf != iCpu)
     455                if (   RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
     456                    && iCpuSelf != iCpu)
    351457                    KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);
    352458
    353             uint64_t iTick = ++pSubTimer->iTick;
    354             rtTimerNtRearmInternval(pTimer, iTick);
    355             pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
     459            pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
     460
     461            /* We re-arm the timer after calling pfnTimer, as it may stop the timer
     462               or change the interval, which would mean doing extra work. */
     463            if (!pTimer->fSuspended && pTimer->u64NanoInterval)
     464                rtTimerNtRearmInternval(pTimer, &pSubTimer->NtDpc);
    356465        }
    357466        else
     
    367476
    368477            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
    369                 if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
    370                     &&  iCpuSelf != iCpu)
     478                if (   RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
     479                    && iCpuSelf != iCpu)
    371480                    if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
    372481                        ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */
     
    395504    PRTTIMERNTSUBTIMER const pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
    396505    PRTTIMER const           pTimer    = pSubTimer->pParent;
    397     int const                iCpuSelf  = RTMpCpuIdToSetIndex(RTMpCpuId());
     506    RTCPUID                  idCpu     = RTMpCpuId();
     507    int const                iCpuSelf  = RTMpCpuIdToSetIndex(idCpu);
    398508
    399509    AssertPtr(pTimer);
     
    401511    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
    402512        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
     513    /* We must be called on the master CPU or the tick variable goes south. */
    403514    if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
    404515        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
     516    if (pTimer->idCpu != idCpu)
     517        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: pTimer->idCpu=%d vs idCpu=%d\n", pTimer->idCpu, idCpu);
    405518#endif
    406519
     
    446559    AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
    447560
    448     if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
     561    /*
     562     * The operation is protected by the spinlock.
     563     */
     564    KIRQL bSavedIrql;
     565    KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
     566
     567    /*
     568     * Check the state.
     569     */
     570    if (ASMAtomicUoReadBool(&pTimer->fSuspended))
     571    { /* likely */ }
     572    else
     573    {
     574        KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
    449575        return VERR_TIMER_ACTIVE;
    450     if (   pTimer->fSpecificCpu
    451         && !RTMpIsCpuOnline(pTimer->idCpu))
     576    }
     577    if (   !pTimer->fSpecificCpu
     578        || RTMpIsCpuOnline(pTimer->idCpu))
     579    { /* likely */ }
     580    else
     581    {
     582        KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
    452583        return VERR_CPU_OFFLINE;
    453 
    454     /*
    455      * Start the timer.
    456      */
    457     PKDPC pMasterDpc = pTimer->fOmniTimer
    458                      ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc
    459                      : &pTimer->aSubTimers[0].NtDpc;
    460 
     584    }
     585
     586    /*
     587     * Do the starting.
     588     */
    461589#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
     590    /* Calculate the interval time: */
    462591    uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
    463592    ULONG ulInterval = (ULONG)u64Interval;
     
    468597#endif
    469598
     599    /* Translate u64First to a DueTime: */
    470600    LARGE_INTEGER DueTime;
    471601    DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
    472602    if (!DueTime.QuadPart)
    473         DueTime.QuadPart = -1;
    474 
     603        DueTime.QuadPart = -10; /* 1us */
     604
     605    /* Reset tick counters: */
    475606    unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
    476607    for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
    477608        pTimer->aSubTimers[iCpu].iTick = 0;
     609    pTimer->iMasterTick = 0;
     610
     611    /* Update timer state: */
    478612#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
    479     pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + u64First / 100;
     613    pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + -DueTime.QuadPart;
    480614#endif
    481615    ASMAtomicWriteS32(&pTimer->cOmniSuspendCountDown, 0);
    482616    ASMAtomicWriteBool(&pTimer->fSuspended, false);
     617
     618    /*
     619     * Finally start the NT timer.
     620     *
     621     * We do this without holding the spinlock to err on the side of
     622     * caution in case ExSetTimer or KeSetTimerEx ever should have the idea
     623     * of running the callback before returning.
     624     */
     625    KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
    483626
    484627#ifdef RTR0TIMER_NT_HIGH_RES
     
    494637#endif
    495638    {
     639        PKDPC const pMasterDpc = &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc;
    496640#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
    497641        KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
     
    511655 * @param   pTimer      The active timer.
    512656 */
    513 static void rtTimerNtStopWorker(PRTTIMER pTimer)
    514 {
    515     /*
    516      * Just cancel the timer, dequeue the DPCs and flush them (if this is supported).
    517      */
    518     ASMAtomicWriteBool(&pTimer->fSuspended, true);
    519 
     657static int rtTimerNtStopWorker(PRTTIMER pTimer)
     658{
     659    /*
     660     * Update the state from with the spinlock context.
     661     */
     662    KIRQL bSavedIrql;
     663    KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
     664
     665    bool const fWasSuspended = ASMAtomicXchgBool(&pTimer->fSuspended, true);
     666
     667    KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
     668    if (!fWasSuspended)
     669    {
     670        /*
     671         * We should cacnel the timer and dequeue DPCs.
     672         */
    520673#ifdef RTR0TIMER_NT_HIGH_RES
    521     if (pTimer->pHighResTimer)
    522         g_pfnrtExCancelTimer(pTimer->pHighResTimer, NULL);
    523     else
    524 #endif
    525         KeCancelTimer(&pTimer->NtTimer);
    526 
    527     for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
    528         KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
     674        if (pTimer->pHighResTimer)
     675            g_pfnrtExCancelTimer(pTimer->pHighResTimer, NULL);
     676        else
     677#endif
     678            KeCancelTimer(&pTimer->NtTimer);
     679
     680        for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
     681            KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
     682        return VINF_SUCCESS;
     683    }
     684    return VERR_TIMER_SUSPENDED;
    529685}
    530686
     
    538694    AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
    539695
    540     if (ASMAtomicUoReadBool(&pTimer->fSuspended))
    541         return VERR_TIMER_SUSPENDED;
    542 
    543696    /*
    544697     * Call the worker we share with RTTimerDestroy.
    545698     */
    546     rtTimerNtStopWorker(pTimer);
    547     return VINF_SUCCESS;
     699    return rtTimerNtStopWorker(pTimer);
    548700}
    549701
     
    553705    AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
    554706    AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
    555     RT_NOREF1(u64NanoInterval);
    556 
    557     return VERR_NOT_SUPPORTED;
     707
     708    /*
     709     * We do all the state changes while holding the spinlock.
     710     */
     711    int   rc = VINF_SUCCESS;
     712    KIRQL bSavedIrql;
     713    KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
     714
     715    /*
     716     * When the timer isn't running, this is an simple job:
     717     */
     718    if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
     719        pTimer->u64NanoInterval = u64NanoInterval;
     720    else
     721    {
     722        /*
     723         * We only implement changing the interval in RTR0TIMER_NT_MANUAL_RE_ARM
     724         * mode right now. We typically let the new interval take effect after
     725         * the next timer callback, unless that's too far ahead.
     726         */
     727#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
     728        pTimer->u64NanoInterval  = u64NanoInterval;
     729        pTimer->iMasterTick      = 0;
     730# ifdef RTR0TIMER_NT_HIGH_RES
     731        uint64_t const uNtNow = pTimer->pHighResTimer ? rtTimerNtQueryInterruptTimeHighRes() : rtTimerNtQueryInterruptTime();
     732# else
     733        uint64_t const uNtNow = rtTimerNtQueryInterruptTime();
     734# endif
     735        if (uNtNow >= pTimer->uNtDueTime)
     736            pTimer->uNtStartTime = uNtNow;
     737        else
     738        {
     739            pTimer->uNtStartTime = pTimer->uNtDueTime;
     740
     741            /*
     742             * Re-arm the timer if the next DueTime is both more than 1.25 new
     743             * intervals and at least 0.5 ms ahead.
     744             */
     745            uint64_t cNtToNext = pTimer->uNtDueTime - uNtNow;
     746            if (   cNtToNext >= RT_NS_1MS / 2 / 100 /* 0.5 ms */
     747                && cNtToNext * 100 > u64NanoInterval + u64NanoInterval / 4)
     748            {
     749                pTimer->uNtStartTime = pTimer->uNtDueTime = uNtNow + u64NanoInterval / 100;
     750# ifdef RTR0TIMER_NT_HIGH_RES
     751                if (pTimer->pHighResTimer)
     752                    g_pfnrtExSetTimer(pTimer->pHighResTimer, -(int64_t)u64NanoInterval / 100, 0, NULL);
     753                else
     754# endif
     755                {
     756                    LARGE_INTEGER DueTime;
     757                    DueTime.QuadPart = -(int64_t)u64NanoInterval / 100;
     758                    KeSetTimerEx(&pTimer->NtTimer, DueTime, 0,
     759                                 &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc);
     760                }
     761            }
     762        }
     763#else
     764        rc = VERR_NOT_SUPPORTED;
     765#endif
     766    }
     767
     768    KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
     769
     770    return rc;
    558771}
    559772
     
    574787
    575788    /*
    576      * Invalidate the timer, stop it if it's running and finally
    577      * free up the memory.
     789     * Invalidate the timer, stop it if it's running and finally free up the memory.
    578790     */
    579791    ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
    580     if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
    581         rtTimerNtStopWorker(pTimer);
     792    rtTimerNtStopWorker(pTimer);
    582793
    583794#ifdef RTR0TIMER_NT_HIGH_RES
     
    650861    pTimer->pfnTimer = pfnTimer;
    651862    pTimer->pvUser = pvUser;
     863    KeInitializeSpinLock(&pTimer->Spinlock);
    652864    pTimer->u64NanoInterval = u64NanoInterval;
    653865
     
    684896             *       called on.
    685897             */
    686             pTimer->idCpu = NIL_RTCPUID;
     898            pTimer->iMasterTick = 0;
     899            pTimer->idCpu       = NIL_RTCPUID;
    687900            for (unsigned iCpu = 0; iCpu < cSubTimers && RT_SUCCESS(rc); iCpu++)
    688901            {
    689                 pTimer->aSubTimers[iCpu].iTick = 0;
     902                pTimer->aSubTimers[iCpu].iTick   = 0;
    690903                pTimer->aSubTimers[iCpu].pParent = pTimer;
    691904
     
    715928             * if requested to do so.
    716929             */
    717             pTimer->aSubTimers[0].iTick = 0;
     930            pTimer->iMasterTick           = 0;
     931            pTimer->aSubTimers[0].iTick   = 0;
    718932            pTimer->aSubTimers[0].pParent = pTimer;
    719933
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette