VirtualBox

Changeset 37414 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jun 10, 2011 3:53:59 PM (14 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
72213
Message:

TM: Added TMTimerLock, TMTimerUnlock and TMTimerIsLockOwner for locking the virtual sync clock to avoid races.

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r37324 r37414  
    802802{
    803803    return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
     804}
     805
     806
     807/**
     808 * Locks the timer clock.
     809 *
     810 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
     811 *          if the clock does not have a lock.
     812 * @param   pTimer              The timer which clock lock we wish to take.
     813 * @param   rcBusy              What to return in ring-0 and raw-mode context
     814 *                              if the lock is busy.
     815 *
     816 * @remarks Currently only supported on timers using the virtual sync clock.
     817 */
     818VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
     819{
     820    AssertPtr(pTimer);
     821    AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
     822    return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
     823}
     824
     825
     826/**
     827 * Unlocks a timer clock locked by TMTimerLock.
     828 *
     829 * @param   pTimer              The timer which clock to unlock.
     830 */
     831VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
     832{
     833    AssertPtr(pTimer);
     834    AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
     835    PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
     836}
     837
     838
     839/**
     840 * Checks if the current thread owns the timer clock lock.
     841 *
     842 * @returns @c true if its the owner, @c false if not.
     843 * @param   pTimer              The timer handle.
     844 */
     845VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
     846{
     847    AssertPtr(pTimer);
     848    AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
     849    return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
    804850}
    805851
  • trunk/src/VBox/VMM/VMMR0/VMMR0.def

    r37321 r37414  
    5050    TMTimerGetFreq
    5151    TMTimerIsActive
     52    TMTimerIsLockOwner
     53    TMTimerLock
    5254    TMTimerSet
    5355    TMTimerSetRelative
     
    5759    TMTimerSetFrequencyHint
    5860    TMTimerStop
     61    TMTimerUnlock
    5962    VMMGetSvnRev
    6063    vmmR0LoggerFlush
  • trunk/src/VBox/VMM/VMMR3/TM.cpp

    r37358 r37414  
    130130#include <VBox/vmm/pdmapi.h>
    131131#include <VBox/vmm/iom.h>
     132#include <VBox/vmm/dbgftrace.h>
    132133#include "TMInternal.h"
    133134#include <VBox/vmm/vm.h>
     
    20762077        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStoppedAlready);
    20772078        u64Now = pVM->tm.s.u64VirtualSync;
    2078 #ifdef DEBUG_bird
     2079#if 0 //def DEBUG_bird
    20792080        Assert(u64Now <= pNext->u64Expire);
    20802081#endif
     2082        if (u64Now > pNext->u64Expire)
     2083            DBGFTRACE_U64_TAG2(pVM, u64Now - pNext->u64Expire, "stopped after timer expired", pNext->pszDesc);
    20812084    }
    20822085    else
     
    21792182
    21802183            /* advance the clock - don't permit timers to be out of order or armed in the 'past'. */
     2184            if (pTimer->u64Expire < u64Prev)
     2185                DBGFTRACE_U64_TAG2(pVM, u64Prev - pTimer->u64Expire, "timer expires in the past", pNext->pszDesc);
    21812186#ifdef DEBUG_bird
    21822187#ifdef VBOX_STRICT
    2183             AssertMsg(pTimer->u64Expire >= u64Prev, ("%'RU64 < %'RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc));
     2188            //AssertMsg(pTimer->u64Expire >= u64Prev, ("%'RU64 < %'RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc));
    21842189            u64Prev = pTimer->u64Expire;
    21852190#endif
     
    21902195            /* fire */
    21912196            TM_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_DELIVER);
     2197            DBGFTRACE_U64_TAG(pVM, pVM->tm.s.u64VirtualSync, pTimer->pszDesc);
    21922198            switch (pTimer->enmType)
    21932199            {
     
    22302236        Assert(u64VirtualNow2 >= u64VirtualNow);
    22312237#ifdef DEBUG_bird
    2232         AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%'RU64 < %'RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
     2238        //AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%'RU64 < %'RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
    22332239#endif
     2240        if (pVM->tm.s.u64VirtualSync < u64Now)
     2241            DBGFTRACE_U64_TAG(pVM, u64Now - pVM->tm.s.u64VirtualSync, "u64VirtualSync < u64Now");
     2242
    22342243        const uint64_t offSlack = pVM->tm.s.u64VirtualSync - u64Now;
    22352244        STAM_STATS({
     
    22752284                ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
    22762285                Log4(("TM: %'RU64/-%'8RU64: caught up [pt]\n", u64VirtualNow2 - offNew, offLag));
     2286                DBGFTRACE_U64_TAG(pVM, u64VirtualNow2 - offNew, "vs caught up [pt]");
    22772287            }
    22782288            else if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
     
    22882298                    ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
    22892299                    Log4(("TM: %'RU64/%'8RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
     2300                    DBGFTRACE_U64_TAG(pVM, pVM->tm.s.u32VirtualSyncCatchUpPercentage, "vs adj");
    22902301                }
     2302                else
     2303                    DBGFTRACE_U64_TAG(pVM, u64VirtualNow2 - offNew, "vs resuming catch up");
    22912304                pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow2;
    22922305            }
     
    23002313                Log4(("TM: %'RU64/%'8RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
    23012314                LogRel(("TM: Giving up catch-up attempt at a %'RU64 ns lag; new total: %'RU64 ns\n", offLag, offNew));
     2315                DBGFTRACE_U64_TAG(pVM, u64VirtualNow2 - offNew, "vs gave up");
    23022316            }
    23032317        }
     
    23162330                ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, true);
    23172331                Log4(("TM: %'RU64/%'8RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
     2332                DBGFTRACE_U64_TAG(pVM, pVM->tm.s.u32VirtualSyncCatchUpPercentage, "vs start");
    23182333            }
    23192334            else
     
    23242339                Log4(("TM: %'RU64/%'8RU64: give up\n", u64VirtualNow2 - offNew, offLag));
    23252340                LogRel(("TM: Not bothering to attempt catching up a %'RU64 ns lag; new total: %'RU64\n", offLag, offNew));
     2341                DBGFTRACE_U64_TAG(pVM, u64VirtualNow2 - offNew, "vs don't bother");
    23262342            }
    23272343        }
     2344        else
     2345            DBGFTRACE_U64_TAG(pVM, u64VirtualNow2 - offNew, "vs resuming");
    23282346
    23292347        /*
  • trunk/src/VBox/VMM/VMMRC/VMMRC.def

    r35335 r37414  
    4646    TMTimerGetFreq
    4747    TMTimerIsActive
     48    TMTimerIsLockOwner
     49    TMTimerLock
    4850    TMTimerSet
    4951    TMTimerSetRelative
     
    5355    TMTimerSetFrequencyHint
    5456    TMTimerStop
     57    TMTimerUnlock
    5558    TRPMGCHandlerGeneric
    5659    TRPMGCHandlerInterupt
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette