VirtualBox

Changeset 21540 in vbox for trunk/src/VBox/Runtime


Ignore:
Timestamp:
Jul 13, 2009 2:51:23 PM (16 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
50013
Message:

IPRT: RTSemSpinMutex implementation.

Location:
trunk/src/VBox/Runtime
Files:
2 edited
3 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/Makefile.kmk

    r21502 r21540  
    133133        win/amd64/ASMGetDR6.asm \
    134134        win/amd64/ASMGetDR7.asm \
    135         common/asm/ASMMultU64ByU32DivByU32.asm
     135        common/asm/ASMMultU64ByU32DivByU32.asm \
     136        common/asm/ASMNopPause.asm
    136137
    137138#
     
    304305        r3/test.cpp \
    305306        r3/testi.cpp \
    306         r3/tcp.cpp
     307        r3/tcp.cpp \
     308        r3/generic/semspinmutex-r3-generic.cpp
    307309
    308310#if1of ($(KBUILD_TARGET_ARCH),amd64 x86)
     
    10921094        r0drv/alloc-r0drv.cpp \
    10931095        r0drv/initterm-r0drv.cpp \
     1096        r0drv/generic/semspinmutex-r0drv-generic.c \
    10941097        VBox/log-vbox.cpp \
    10951098        VBox/strformat-vbox.cpp
  • trunk/src/VBox/Runtime/include/internal/magics.h

    r20360 r21540  
    9191/** Dead magic for the mutex semaphore structure. */
    9292#define RTSEMMUTEX_MAGIC_DEAD       0x20010511
     93/** Magic for the spinning mutex semaphore structure. (Natsume Soseki) */
     94#define RTSEMSPINMUTEX_MAGIC        0x18670209
     95/** Dead magic value for RTSEMSPINMUTEXINTERNAL::u32Magic. */
     96#define RTSEMSPINMUTEX_MAGIC_DEAD   0x19161209
    9397/** RTSEMRWINTERNAL::u32Magic value. (Kosuke Fujishima) */
    9498#define RTSEMRW_MAGIC               0x19640707
  • trunk/src/VBox/Runtime/r0drv/generic/semspinmutex-r0drv-generic.c

    r21515 r21540  
    11/* $Id$ */
    22/** @file
    3  * IPRT - Critical Section, Generic.
     3 * IPRT - Spinning Mutex Semaphores, Ring-0 Driver, Generic.
    44 */
    55
    66/*
    7  * Copyright (C) 2006-2007 Sun Microsystems, Inc.
     7 * Copyright (C) 2009 Sun Microsystems, Inc.
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    3333*   Header Files                                                               *
    3434*******************************************************************************/
    35 #include <iprt/critsect.h>
     35#ifdef RT_OS_WINDOWS
     36# include "../nt/the-nt-kernel.h"
     37#endif
    3638#include "internal/iprt.h"
    3739
    3840#include <iprt/semaphore.h>
     41#include <iprt/asm.h>
     42#include <iprt/assert.h>
     43#include <iprt/err.h>
     44#include <iprt/mem.h>
    3945#include <iprt/thread.h>
    40 #include <iprt/assert.h>
    41 #include <iprt/asm.h>
    42 #include <iprt/err.h>
    43 #include "internal/thread.h"
    44 #include "internal/strict.h"
    45 
    46 
    47 /* in strict mode we're redefining this, so undefine it now for the implementation. */
    48 #undef RTCritSectEnter
    49 #undef RTCritSectTryEnter
    50 #undef RTCritSectEnterMultiple
    51 
    52 
     46#include "internal/magics.h"
     47
     48
     49/*******************************************************************************
     50*   Structures and Typedefs                                                    *
     51*******************************************************************************/
    5352/**
    54  * Initialize a critical section.
    55  */
    56 RTDECL(int) RTCritSectInit(PRTCRITSECT pCritSect)
    57 {
    58     return RTCritSectInitEx(pCritSect, 0);
    59 }
    60 RT_EXPORT_SYMBOL(RTCritSectInit);
    61 
     53 * Saved state information.
     54 */
     55typedef struct RTSEMSPINMUTEXSTATE
     56{
     57    /** Saved flags register. */
     58    RTCCUINTREG             fSavedFlags;
     59    /** Preemption state.  */
     60    RTTHREADPREEMPTSTATE    PreemptState;
     61    /** Whether to spin or sleep. */
     62    bool                    fSpin;
     63    /** Whether the flags have been saved. */
     64    bool                    fValidFlags;
     65} RTSEMSPINMUTEXSTATE;
    6266
    6367/**
    64  * Initialize a critical section.
    65  *
    66  * @returns iprt status code.
    67  * @param   pCritSect   Pointer to the critical section structure.
    68  * @param   fFlags      Flags, any combination of the RTCRITSECT_FLAGS \#defines.
    69  */
    70 RTDECL(int) RTCritSectInitEx(PRTCRITSECT pCritSect, uint32_t fFlags)
    71 {
    72     /*
    73      * Initialize the structure and
    74      */
    75     pCritSect->u32Magic             = RTCRITSECT_MAGIC;
    76     pCritSect->fFlags               = fFlags;
    77     pCritSect->cNestings            = 0;
    78     pCritSect->cLockers             = -1;
    79     pCritSect->NativeThreadOwner    = NIL_RTNATIVETHREAD;
    80     pCritSect->Strict.ThreadOwner   = NIL_RTTHREAD;
    81     pCritSect->Strict.pszEnterFile  = NULL;
    82     pCritSect->Strict.u32EnterLine  = 0;
    83     pCritSect->Strict.uEnterId      = 0;
    84     int rc = RTSemEventCreate(&pCritSect->EventSem);
     68 * Spinning mutex semaphore.
     69 */
     70typedef struct RTSEMSPINMUTEXINTERNAL
     71{
     72    /** Magic value (RTSEMSPINMUTEX_MAGIC)
     73     * RTCRITSECT_MAGIC is the value of an initialized & operational section. */
     74    uint32_t volatile       u32Magic;
     75    /** Flags. This is a combination of RTSEMSPINMUTEX_FLAGS_XXX and
     76     *  RTSEMSPINMUTEX_INT_FLAGS_XXX. */
     77    uint32_t volatile       fFlags;
     78    /** The owner thread.
     79     * This is NIL if the semaphore is not owned by anyone. */
     80    RTNATIVETHREAD volatile hOwner;
     81    /** Number of threads waiting for the lock. */
     82    int32_t volatile        cLockers;
     83    /** The semaphore to block on. */
     84    RTSEMEVENT              hEventSem;
     85    /** Saved state information of the owner.
     86     * This will be restored by RTSemSpinRelease. */
     87    RTSEMSPINMUTEXSTATE     SavedState;
     88} RTSEMSPINMUTEXINTERNAL;
     89
     90
     91/*******************************************************************************
     92*   Defined Constants And Macros                                               *
     93*******************************************************************************/
     94//#define RTSEMSPINMUTEX_INT_FLAGS_MUST
     95
     96/** Validates the handle, returning if invalid. */
     97#define RTSEMSPINMUTEX_VALIDATE_RETURN(pThis) \
     98    do \
     99    { \
     100        uint32_t u32Magic; \
     101        AssertPtr(pThis); \
     102        u32Magic = (pThis)->u32Magic; \
     103        if (u32Magic != RTSEMSPINMUTEX_MAGIC) \
     104        { \
     105            AssertMsgFailed(("u32Magic=%#x pThis=%p\n", u32Magic, pThis)); \
     106            return u32Magic == RTSEMSPINMUTEX_MAGIC_DEAD ? VERR_SEM_DESTROYED : VERR_INVALID_HANDLE; \
     107        } \
     108    } while (0)
     109
     110
     111RTDECL(int) RTSemSpinMutexCreate(PRTSEMSPINMUTEX phSpinMtx, uint32_t fFlags)
     112{
     113    RTSEMSPINMUTEXINTERNAL *pThis;
     114    int                     rc;
     115
     116    AssertReturn(!(fFlags & ~RTSEMSPINMUTEX_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
     117    AssertPtr(phSpinMtx);
     118
     119    /*
     120     * Allocate and initialize the structure.
     121     */
     122    pThis = (RTSEMSPINMUTEXINTERNAL *)RTMemAllocZ(sizeof(*pThis));
     123    if (!pThis)
     124        return VERR_NO_MEMORY;
     125    pThis->u32Magic  = RTSEMSPINMUTEX_MAGIC;
     126    pThis->fFlags    = fFlags;
     127    pThis->hOwner    = NIL_RTNATIVETHREAD;
     128    pThis->cLockers  = -1;
     129    rc = RTSemEventCreate(&pThis->hEventSem);
    85130    if (RT_SUCCESS(rc))
     131    {
     132        *phSpinMtx = pThis;
    86133        return VINF_SUCCESS;
    87 
    88     AssertRC(rc);
    89     pCritSect->EventSem = NULL;
    90     pCritSect->u32Magic = (uint32_t)rc;
     134    }
     135
     136    RTMemFree(pThis);
    91137    return rc;
    92138}
    93 RT_EXPORT_SYMBOL(RTCritSectInitEx);
     139RT_EXPORT_SYMBOL(RTSemSpinMutexCreate);
    94140
    95141
    96142/**
    97  * Enter multiple critical sections.
    98  *
    99  * This function will enter ALL the specified critical sections before returning.
    100  *
    101  * @returns VINF_SUCCESS on success.
    102  * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
    103  * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
    104  * @param   cCritSects      Number of critical sections in the array.
    105  * @param   papCritSects    Array of critical section pointers.
    106  *
    107  * @remark  Please note that this function will not necessarily come out favourable in a
    108  *          fight with other threads which are using the normal RTCritSectEnter() function.
    109  *          Therefore, avoid having to enter multiple critical sections!
    110  */
    111 RTDECL(int) RTCritSectEnterMultiple(unsigned cCritSects, PRTCRITSECT *papCritSects)
    112 #ifdef RTCRITSECT_STRICT
    113 {
    114     return RTCritSectEnterMultipleDebug(cCritSects, papCritSects, __FILE__, __LINE__, 0);
    115 }
    116 RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    117 #endif /* RTCRITSECT_STRICT */
    118 {
    119     Assert(cCritSects > 0);
    120     Assert(VALID_PTR(papCritSects));
    121 
    122     /*
    123      * Try get them all.
    124      */
    125     int rc = VERR_INVALID_PARAMETER;
    126     unsigned i;
    127     for (i = 0; i < cCritSects; i++)
    128     {
    129 #ifdef RTCRITSECT_STRICT
    130         rc = RTCritSectTryEnterDebug(papCritSects[i], pszFile, uLine, uId);
     143 * Helper for RTSemSpinMutexTryRequest and RTSemSpinMutexRequest.
     144 *
     145 * This will check the current context and see if it's usui
     146 *
     147 * @returns VINF_SUCCESS or VERR_SEM_BAD_CONTEXT.
     148 * @param   pState      Output structure.
     149 */
     150static int rtSemSpinMutexEnter(RTSEMSPINMUTEXSTATE *pState, RTSEMSPINMUTEXINTERNAL *pThis)
     151{
     152    /** @todo Later #1: When entering in interrupt context and we're not able to
     153     *        wake up threads from it, we could try switch the lock into pure
     154     *        spinlock mode. This would require that there are no other threads
     155     *        currently waiting on it and that the RTSEMSPINMUTEX_FLAGS_IRQ_SAFE
     156     *        flag is set.
     157     *
     158     *        Later #2: Similarly, it is possible to turn on the
     159     *        RTSEMSPINMUTEX_FLAGS_IRQ_SAFE at run time if we manage to grab the
     160     *        semaphore ownership at interrupt time. We might want to try delay the
     161     *        RTSEMSPINMUTEX_FLAGS_IRQ_SAFE even, since we're fine if we get it...
     162     */
     163
     164#ifdef RT_OS_WINDOWS
     165    /*
     166     * NT: IRQL <= DISPATCH_LEVEL for waking up threads; IRQL < DISPATCH_LEVEL for sleeping.
     167     */
     168    pState->PreemptState.uchOldIrql = KeGetCurrentIrql();
     169    if (pState->PreemptState.uchOldIrql > DISPATCH_LEVEL)
     170        return VERR_SEM_BAD_CONTEXT;
     171
     172    if (pState->PreemptState.uchOldIrql >= DISPATCH_LEVEL)
     173        pThis->fSpin = true;
     174    else
     175    {
     176        pThis->fSpin = false;
     177        KeRaiseIrql(DISPATCH_LEVEL, &pState->PreemptState.uchOldIrql);
     178        Assert(pState->PreemptState.uchOldIrql < DISPATCH_LEVEL);
     179    }
     180
     181#elif defined(RT_OS_LINUX) || defined(RT_OS_OS2) || defined(RT_OS_SOLARIS)
     182    /*
     183     * OSes on which RTSemEventSignal can be called from any context.
     184     */
     185    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
     186    if (RTThreadIsInInterrupt(NIL_RTTHREAD))
     187    {
     188        if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
     189            return VERR_SEM_BAD_CONTEXT;
     190        pState->fSpin = true;
     191    }
     192    RTThreadPreemptDisable(&pState->PreemptState);
     193
     194#else /* PORTME: Check for context where we cannot wake up threads. */
     195    /*
     196     * Default: ASSUME thread can be woken up from all context except interrupt.
     197     *          ASSUME that we can go to sleep if preemption is enabled.
     198     */
     199    if (RTThreadIsInInterrupt(NIL_RTTHREAD))
     200        return VERR_SEM_BAD_CONTEXT;
     201    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
     202    RTThreadPreemptDisable(&pState->PreemptState);
     203#endif
     204
     205    /*
     206     * Disable interrupts if necessary.
     207     */
     208    pState->fValidFlags = !!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE);
     209    if (pState->fValidFlags)
     210        pState->fSavedFlags = ASMIntDisableFlags();
     211    else
     212        pState->fSavedFlags = 0;
     213
     214    return VINF_SUCCESS;
     215}
     216
     217
     218/**
     219 * Helper for RTSemSpinMutexTryRequest, RTSemSpinMutexRequest and
     220 * RTSemSpinMutexRelease.
     221 *
     222 * @param  pState
     223 */
     224DECL_FORCE_INLINE(void) rtSemSpinMutexLeave(RTSEMSPINMUTEXSTATE *pState)
     225{
     226    /*
     227     * Restore the interrupt flag.
     228     */
     229    if (pState->fValidFlags)
     230        ASMSetFlags(pState->fSavedFlags);
     231
     232#ifdef RT_OS_WINDOWS
     233    /*
     234     * NT: Lower the IRQL if we raised it.
     235     */
     236    if (pState->PreemptState.uchOldIrql < DISPATCH_LEVEL)
     237        KeLowerIrql(PreemptState.uchOldIrql);
    131238#else
    132         rc = RTCritSectTryEnter(papCritSects[i]);
     239    /*
     240     * Default: Restore preemption.
     241     */
     242    RTThreadPreemptRestore(&pState->PreemptState);
    133243#endif
    134         if (RT_FAILURE(rc))
    135             break;
    136     }
     244}
     245
     246
     247RTDECL(int) RTSemSpinMutexTryRequest(RTSEMSPINMUTEX hSpinMtx)
     248{
     249    RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
     250    RTNATIVETHREAD          hSelf = RTThreadNativeSelf();
     251    RTSEMSPINMUTEXSTATE     State;
     252    bool                    fRc;
     253    int                     rc;
     254
     255    RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
     256
     257    /*
     258     * Check context, disable preemption and save flags if necessary.
     259     */
     260    rc = rtSemSpinMutexEnter(&State, pThis);
     261    if (RT_FAILURE(rc))
     262        return rc;
     263
     264    /*
     265     * Try take the ownership.
     266     */
     267    ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
     268    if (!fRc)
     269    {
     270        /* Busy, too bad. Check for attempts at nested access. */
     271        int rc = VERR_SEM_BUSY;
     272        if (RT_UNLIKELY(pThis->hOwner == hSelf))
     273        {
     274            AssertMsgFailed(("%p attempt at nested access\n"));
     275            rc = VERR_SEM_NESTED;
     276        }
     277
     278        rtSemSpinMutexLeave(&State);
     279        return rc;
     280    }
     281
     282    /*
     283     * We're the semaphore owner.
     284     */
     285    ASMAtomicIncS32(&pThis->cLockers);
     286    pThis->SavedState = State;
     287    return VINF_SUCCESS;
     288}
     289RT_EXPORT_SYMBOL(RTSemSpinMutexTryRequest);
     290
     291
     292RTDECL(int) RTSemSpinMutexRequest(RTSEMSPINMUTEX hSpinMtx)
     293{
     294    RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
     295    RTNATIVETHREAD          hSelf = RTThreadNativeSelf();
     296    RTSEMSPINMUTEXSTATE     State;
     297    bool                    fRc;
     298    int                     rc;
     299
     300    RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
     301
     302    /*
     303     * Check context, disable preemption and save flags if necessary.
     304     */
     305    rc = rtSemSpinMutexEnter(&State, pThis);
    137306    if (RT_SUCCESS(rc))
    138307        return rc;
    139308
    140309    /*
    141      * The retry loop.
    142      */
    143     for (unsigned cTries = 0; ; cTries++)
    144     {
     310     * Try take the ownership.
     311     */
     312    ASMAtomicIncS32(&pThis->cLockers);
     313    ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
     314    if (!fRc)
     315    {
     316        uint32_t cSpins;
     317
    145318        /*
    146          * We've failed, release any locks we might have gotten. ('i' is the lock that failed btw.)
     319         * It's busy. Check if it's an attempt at nested access.
    147320         */
    148         unsigned j = i;
    149         while (j-- > 0)
     321        if (RT_UNLIKELY(pThis->hOwner == hSelf))
    150322        {
    151             int rc2 = RTCritSectLeave(papCritSects[j]);
    152             AssertRC(rc2);
     323            AssertMsgFailed(("%p attempt at nested access\n"));
     324            rtSemSpinMutexLeave(&State);
     325            return VERR_SEM_NESTED;
    153326        }
    154         if (rc != VERR_SEM_BUSY)
    155             return rc;
    156327
    157328        /*
    158          * Try prevent any theoretical synchronous races with other threads.
     329         * Ok, we have to wait.
    159330         */
    160         Assert(cTries < 1000000);
    161         if (cTries > 10000)
    162             RTThreadSleep(cTries % 3);
    163 
    164         /*
    165          * Wait on the one we failed to get.
    166          */
    167 #ifdef RTCRITSECT_STRICT
    168         rc = RTCritSectEnterDebug(papCritSects[i], pszFile, uLine, uId);
    169 #else
    170         rc = RTCritSectEnter(papCritSects[i]);
    171 #endif
    172         if (RT_FAILURE(rc))
    173             return rc;
    174 
    175         /*
    176          * Try take the others.
    177          */
    178         for (j = 0; j < cCritSects; j++)
     331        for (cSpins = 0;; cSpins++)
    179332        {
    180             if (j != i)
     333            ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
     334            if (fRc)
     335                break;
     336
     337            if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC))
    181338            {
    182 #ifdef RTCRITSECT_STRICT
    183                 rc = RTCritSectTryEnterDebug(papCritSects[j], pszFile, uLine, uId);
    184 #else
    185                 rc = RTCritSectTryEnter(papCritSects[j]);
    186 #endif
    187                 if (RT_FAILURE(rc))
    188                     break;
     339                rtSemSpinMutexLeave(&State);
     340                return VERR_SEM_DESTROYED;
     341            }
     342
     343            if (    State.fSpin
     344                ||  (cSpins & 15) != 15 /* spin a bit everytime we wake up. */)
     345                ASMNopPause();
     346            else
     347            {
     348                rtSemSpinMutexLeave(&State);
     349
     350                rc = RTSemEventWait(pThis->hEventSem, RT_INDEFINITE_WAIT);
     351                ASMCompilerBarrier();
     352                if (RT_SUCCESS(rc))
     353                    AssertReturn(pThis->u32Magic == RTSEMSPINMUTEX_MAGIC, VERR_SEM_DESTROYED);
     354                else if (rc == VERR_INTERRUPTED)
     355                    AssertRC(rc);       /* shouldn't happen */
     356                else
     357                {
     358                    AssertRC(rc);
     359                    return rc;
     360                }
     361
     362                rc = rtSemSpinMutexEnter(&State, pThis);
     363                AssertRCReturn(rc, rc);
    189364            }
    190365        }
    191         if (RT_SUCCESS(rc))
    192             return rc;
    193 
    194         /*
    195          * We failed.
    196          */
    197         if (i > j)
    198         {
    199             int rc2 = RTCritSectLeave(papCritSects[i]);
    200             AssertRC(rc2);
    201         }
    202         i = j;
    203     }
    204 }
    205 RT_EXPORT_SYMBOL(RTCritSectEnterMultiple);
    206 
    207 
    208 /**
    209  * Try enter a critical section.
    210  *
    211  * @returns VINF_SUCCESS on success.
    212  * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
    213  * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
    214  * @param   pCritSect   The critical section.
    215  */
    216 RTDECL(int) RTCritSectTryEnter(PRTCRITSECT pCritSect)
    217 #ifdef RTCRITSECT_STRICT
    218 {
    219     return RTCritSectTryEnterDebug(pCritSect, __FILE__, __LINE__, 0);
    220 }
    221 RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    222 #endif /* RTCRITSECT_STRICT */
    223 {
    224     Assert(pCritSect);
    225     Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    226     RTNATIVETHREAD  NativeThreadSelf = RTThreadNativeSelf();
    227 #ifdef RTCRITSECT_STRICT
    228     RTTHREAD        ThreadSelf = RTThreadSelf();
    229     if (ThreadSelf == NIL_RTTHREAD)
    230         RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &ThreadSelf);
    231 #endif
    232 
    233     /*
    234      * Try take the lock. (cLockers is -1 if it's free)
    235      */
    236     if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1))
    237     {
    238         /*
    239          * Somebody is owning it (or will be soon). Perhaps it's us?
    240          */
    241         if (pCritSect->NativeThreadOwner == NativeThreadSelf)
    242         {
    243             if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
    244             {
    245                 ASMAtomicIncS32(&pCritSect->cLockers);
    246                 pCritSect->cNestings++;
    247                 return VINF_SUCCESS;
    248             }
    249             AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
    250             return VERR_SEM_NESTED;
    251         }
    252         return VERR_SEM_BUSY;
    253     }
    254 
    255     /*
    256      * First time
    257      */
    258     pCritSect->cNestings = 1;
    259     ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
    260 #ifdef RTCRITSECT_STRICT
    261     pCritSect->Strict.pszEnterFile = pszFile;
    262     pCritSect->Strict.u32EnterLine = uLine;
    263     pCritSect->Strict.uEnterId     = uId;
    264     ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf);
    265 #endif
    266 
     366    }
     367
     368    /*
     369     * We're the semaphore owner.
     370     */
     371    pThis->SavedState = State;
    267372    return VINF_SUCCESS;
    268373}
    269 RT_EXPORT_SYMBOL(RTCritSectTryEnter);
    270 
    271 
    272 /**
    273  * Enter a critical section.
    274  *
    275  * @returns VINF_SUCCESS on success.
    276  * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
    277  * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
    278  * @param   pCritSect   The critical section.
    279  */
    280 RTDECL(int) RTCritSectEnter(PRTCRITSECT pCritSect)
    281 #ifdef RTCRITSECT_STRICT
    282 {
    283     return RTCritSectEnterDebug(pCritSect, __FILE__, __LINE__, 0);
    284 }
    285 RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    286 #endif /* RTCRITSECT_STRICT */
    287 {
    288     Assert(pCritSect);
    289     Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    290     RTNATIVETHREAD  NativeThreadSelf = RTThreadNativeSelf();
    291 #ifdef RTCRITSECT_STRICT
    292     RTTHREAD        ThreadSelf = RTThreadSelf();
    293     if (ThreadSelf == NIL_RTTHREAD)
    294         RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &ThreadSelf);
    295 #endif
    296 
    297     /** If the critical section has already been destroyed, then inform the caller. */
    298     if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
    299         return VERR_SEM_DESTROYED;
    300 
    301     /*
    302      * Increment the waiter counter.
    303      * This becomes 0 when the section is free.
    304      */
    305     if (ASMAtomicIncS32(&pCritSect->cLockers) > 0)
    306     {
    307         /*
    308          * Nested?
    309          */
    310         if (pCritSect->NativeThreadOwner == NativeThreadSelf)
    311         {
    312             if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
    313             {
    314                 pCritSect->cNestings++;
    315                 return VINF_SUCCESS;
    316             }
    317             else
    318             {
    319                 AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
    320                 ASMAtomicDecS32(&pCritSect->cLockers);
    321                 return VERR_SEM_NESTED;
    322             }
    323         }
    324 
    325         for (;;)
    326         {
    327 #ifdef RTCRITSECT_STRICT
    328             RTThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId);
    329 #endif
    330             int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
    331 #ifdef RTCRITSECT_STRICT
    332             RTThreadUnblocked(ThreadSelf, RTTHREADSTATE_CRITSECT);
    333 #endif
    334             if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
    335                 return VERR_SEM_DESTROYED;
    336             if (rc == VINF_SUCCESS)
    337                 break;
    338             AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
    339         }
    340         AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
    341     }
    342 
    343     /*
    344      * First time
    345      */
    346     pCritSect->cNestings = 1;
    347     ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
    348 #ifdef RTCRITSECT_STRICT
    349     pCritSect->Strict.pszEnterFile = pszFile;
    350     pCritSect->Strict.u32EnterLine = uLine;
    351     pCritSect->Strict.uEnterId     = uId;
    352     ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf);
    353     RTThreadWriteLockInc(ThreadSelf);
    354 #endif
    355 
     374RT_EXPORT_SYMBOL(RTSemSpinMutexRequest);
     375
     376
     377RTDECL(int) RTSemSpinMutexRelease(RTSEMSPINMUTEX hSpinMtx)
     378{
     379    RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
     380    RTNATIVETHREAD          hSelf = RTThreadNativeSelf();
     381    uint32_t                cLockers;
     382    RTSEMSPINMUTEXSTATE     State;
     383    bool                    fRc;
     384
     385    RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
     386
     387    /*
     388     * Get the saved state and try release the semaphore.
     389     */
     390    State = pThis->SavedState;
     391    ASMCompilerBarrier();
     392    ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
     393    AssertReturn(fRc, VERR_NOT_OWNER);
     394
     395    cLockers = ASMAtomicDecS32(&pThis->cLockers);
     396    if (cLockers > 0)
     397    {
     398        int rc = RTSemEventSignal(pThis->hEventSem);
     399        AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
     400    }
    356401    return VINF_SUCCESS;
    357402}
    358 RT_EXPORT_SYMBOL(RTCritSectEnter);
    359 
    360 
    361 /**
    362  * Leave a critical section.
    363  *
    364  * @returns VINF_SUCCESS.
    365  * @param   pCritSect   The critical section.
    366  */
    367 RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect)
    368 {
    369     /*
    370      * Assert ownership and so on.
    371      */
    372     Assert(pCritSect);
    373     Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    374     Assert(pCritSect->cNestings > 0);
    375     Assert(pCritSect->cLockers >= 0);
    376     Assert(pCritSect->NativeThreadOwner == RTThreadNativeSelf());
    377 
    378     /*
    379      * Decrement nestings, if <= 0 when we'll release the critsec.
    380      */
    381     pCritSect->cNestings--;
    382     if (pCritSect->cNestings > 0)
    383         ASMAtomicDecS32(&pCritSect->cLockers);
    384     else
    385     {
    386         /*
    387          * Set owner to zero.
    388          * Decrement waiters, if >= 0 then we have to wake one of them up.
    389          */
    390 #ifdef RTCRITSECT_STRICT
    391         if (pCritSect->Strict.ThreadOwner != NIL_RTTHREAD) /* May happen for PDMCritSects when entering GC/R0. */
    392             RTThreadWriteLockDec(pCritSect->Strict.ThreadOwner);
    393         ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD);
    394 #endif
    395         ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
    396         if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
    397         {
    398             int rc = RTSemEventSignal(pCritSect->EventSem);
    399             AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
    400         }
    401     }
    402     return VINF_SUCCESS;
    403 }
    404 RT_EXPORT_SYMBOL(RTCritSectLeave);
    405 
    406 
    407 /**
    408  * Leave multiple critical sections.
    409  *
    410  * @returns VINF_SUCCESS.
    411  * @param   cCritSects      Number of critical sections in the array.
    412  * @param   papCritSects    Array of critical section pointers.
    413  */
    414 RTDECL(int) RTCritSectLeaveMultiple(unsigned cCritSects, PRTCRITSECT *papCritSects)
    415 {
    416     int rc = VINF_SUCCESS;
    417     for (unsigned i = 0; i < cCritSects; i++)
    418     {
    419         int rc2 = RTCritSectLeave(papCritSects[i]);
    420         if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
    421             rc = rc2;
    422     }
     403RT_EXPORT_SYMBOL(RTSemSpinMutexRelease);
     404
     405
     406RTDECL(int) RTSemSpinMutexDestroy(RTSEMSPINMUTEX hSpinMtx)
     407{
     408    RTSEMSPINMUTEXINTERNAL *pThis;
     409    RTSEMEVENT              hEventSem;
     410    int                     rc;
     411
     412    if (hSpinMtx == NIL_RTSEMSPINMUTEX)
     413        return VINF_SUCCESS;
     414    pThis = hSpinMtx;
     415    RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
     416
     417    /* No destruction races allowed! */
     418    AssertMsg(   pThis->cLockers  == -1
     419              && pThis->hOwner    == NIL_RTNATIVETHREAD,
     420              ("pThis=%p cLockers=%d hOwner=%p\n", pThis, pThis->cLockers, pThis->hOwner));
     421
     422    /*
     423     * Invalidate the structure, free the mutex and free the structure.
     424     */
     425    ASMAtomicWriteU32(&pThis->u32Magic, RTSEMSPINMUTEX_MAGIC_DEAD);
     426    hEventSem        = pThis->hEventSem;
     427    pThis->hEventSem = NIL_RTSEMEVENT;
     428    rc = RTSemEventDestroy(hEventSem); AssertRC(rc);
     429
     430    RTMemFree(pThis);
    423431    return rc;
    424432}
    425 RT_EXPORT_SYMBOL(RTCritSectLeaveMultiple);
    426 
    427 
    428 #ifndef RTCRITSECT_STRICT
    429 RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    430 {
    431     return RTCritSectEnter(pCritSect);
    432 }
    433 
    434 RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    435 {
    436     return RTCritSectTryEnter(pCritSect);
    437 }
    438 
    439 RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    440 {
    441     return RTCritSectEnterMultiple(cCritSects, papCritSects);
    442 }
    443 #endif /* RT_STRICT */
    444 RT_EXPORT_SYMBOL(RTCritSectEnterDebug);
    445 RT_EXPORT_SYMBOL(RTCritSectTryEnterDebug);
    446 RT_EXPORT_SYMBOL(RTCritSectEnterMultipleDebug);
    447 
    448 
    449 /**
    450  * Deletes a critical section.
    451  *
    452  * @returns VINF_SUCCESS.
    453  * @param   pCritSect   The critical section.
    454  */
    455 RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect)
    456 {
    457     /*
    458      * Assert free waiters and so on.
    459      */
    460     Assert(pCritSect);
    461     Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    462     Assert(pCritSect->cNestings == 0);
    463     Assert(pCritSect->cLockers == -1);
    464     Assert(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD);
    465 
    466     /*
    467      * Invalidate the structure and free the mutex.
    468      * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
    469      */
    470     ASMAtomicWriteU32(&pCritSect->u32Magic, ~RTCRITSECT_MAGIC);
    471     pCritSect->fFlags           = 0;
    472     pCritSect->cNestings        = 0;
    473     pCritSect->NativeThreadOwner= NIL_RTNATIVETHREAD;
    474     RTSEMEVENT EventSem = pCritSect->EventSem;
    475     pCritSect->EventSem         = NULL;
    476     while (pCritSect->cLockers-- >= 0)
    477         RTSemEventSignal(EventSem);
    478     ASMAtomicWriteS32(&pCritSect->cLockers, -1);
    479     int rc = RTSemEventDestroy(EventSem);
    480     AssertRC(rc);
    481 
    482     return rc;
    483 }
    484 RT_EXPORT_SYMBOL(RTCritSectDelete);
    485 
     433RT_EXPORT_SYMBOL(RTSemSpinMutexDestroy);
     434
  • trunk/src/VBox/Runtime/r0drv/generic/semspinmutex-r0drv-generic.cpp

    r21515 r21540  
    11/* $Id$ */
    22/** @file
    3  * IPRT - Critical Section, Generic.
     3 * IPRT - Spinning Mutex Semaphores, Ring-0 Driver, Generic.
    44 */
    55
    66/*
    7  * Copyright (C) 2006-2007 Sun Microsystems, Inc.
     7 * Copyright (C) 2009 Sun Microsystems, Inc.
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    3333*   Header Files                                                               *
    3434*******************************************************************************/
    35 #include <iprt/critsect.h>
     35#ifdef RT_OS_WINDOWS
     36# include "../nt/the-nt-kernel.h"
     37#endif
    3638#include "internal/iprt.h"
    3739
     
    4143#include <iprt/asm.h>
    4244#include <iprt/err.h>
    43 #include "internal/thread.h"
    44 #include "internal/strict.h"
    45 
    46 
    47 /* in strict mode we're redefining this, so undefine it now for the implementation. */
    48 #undef RTCritSectEnter
    49 #undef RTCritSectTryEnter
    50 #undef RTCritSectEnterMultiple
    51 
    52 
    53 /**
    54  * Initialize a critical section.
    55  */
    56 RTDECL(int) RTCritSectInit(PRTCRITSECT pCritSect)
    57 {
    58     return RTCritSectInitEx(pCritSect, 0);
    59 }
    60 RT_EXPORT_SYMBOL(RTCritSectInit);
    61 
    62 
    63 /**
    64  * Initialize a critical section.
    65  *
    66  * @returns iprt status code.
    67  * @param   pCritSect   Pointer to the critical section structure.
    68  * @param   fFlags      Flags, any combination of the RTCRITSECT_FLAGS \#defines.
    69  */
    70 RTDECL(int) RTCritSectInitEx(PRTCRITSECT pCritSect, uint32_t fFlags)
    71 {
    72     /*
    73      * Initialize the structure and
    74      */
    75     pCritSect->u32Magic             = RTCRITSECT_MAGIC;
    76     pCritSect->fFlags               = fFlags;
    77     pCritSect->cNestings            = 0;
    78     pCritSect->cLockers             = -1;
    79     pCritSect->NativeThreadOwner    = NIL_RTNATIVETHREAD;
    80     pCritSect->Strict.ThreadOwner   = NIL_RTTHREAD;
    81     pCritSect->Strict.pszEnterFile  = NULL;
    82     pCritSect->Strict.u32EnterLine  = 0;
    83     pCritSect->Strict.uEnterId      = 0;
    84     int rc = RTSemEventCreate(&pCritSect->EventSem);
     45#include "internal/magic.h"
     46
     47
     48/*******************************************************************************
     49*   Structures and Typedefs                                                    *
     50*******************************************************************************/
     51typedef struct RTSEMSPINMUTEXINTERNAL
     52{
     53    /** Magic value (RTSEMSPINMUTEX_MAGIC)
     54     * RTCRITSECT_MAGIC is the value of an initialized & operational section. */
     55    uint32_t volatile       u32Magic;
     56    /** Number of lockers.
     57     * -1 if the section is free. */
     58    int32_t volatile        cLockers;
     59    /** The owner thread. */
     60    RTNATIVETHREAD volatile hOwner;
     61    /** Flags. This is a combination of RTSEMSPINMUTEX_FLAGS_XXX and
     62     *  RTSEMSPINMUTEX_INT_FLAGS_XXX. */
     63    uint32_t volatile       fFlags;
     64    /** The semaphore to block on. */
     65    RTSEMEVENT              hEventSem;
     66    /** Saved flags register of the owner. (AMD64 & x86) */
     67    RTCCUINTREG             fSavedFlags;
     68    /** The preemption disable state of the owner. */
     69    RTTHREADPREEMPTSTATE    PreemptState;
     70} RTSEMSPINMUTEXINTERNAL;
     71
     72
     73/*******************************************************************************
     74*   Defined Constants And Macros                                               *
     75*******************************************************************************/
     76//#define RTSEMSPINMUTEX_INT_FLAGS_MUST
     77
     78/** Validates the handle, returning if invalid. */
     79#define RTSEMSPINMUTEX_VALIDATE_RETURN(pThis) \
     80    do \
     81    { \
     82        AssertPtr(pThis); \
     83        uint32_t u32Magic = (pThis)->u32Magic; \
     84        if (u32Magic != RTSEMSPINMUTEX_MAGIC) \
     85        { \
     86            AssertMsgFailed(("u32Magic=%#x pThis=%p\n", u32Magic, pThis)); \
     87            return u32Magic == RTSEMSPINMUTEX_MAGIC_DEAD ? VERR_SEM_DESTROYED : VERR_INVALID_HANDLE; \
     88        } \
     89    } while (0)
     90
     91
     92RTDECL(int) RTSemSpinMutexCreate(PRTSEMSPINMUTEX phSpinMtx, uint32_t fFlags)
     93{
     94    AssertReturn(!(fFlags & ~(RTSEMSPINMUTEX_FLAGS_VALID_MASK)), VERR_INVALID_PARAMETER);
     95    AssertPtr(phSpinMtx);
     96
     97    /*
     98     * Allocate and initialize the structure.
     99     */
     100    RTSEMSPINMUTEXINTERNAL *pThis = (RTSEMSPINMUTEXINTERNAL *)RTMemAllocZ(sizeof(*pThis));
     101    if (!pThis)
     102        return VERR_NO_MEMORY;
     103    pThis->u32Magic             = RTSEMSPINMUTEX_MAGIC;
     104    pThis->fFlags               = fFlags;
     105    pThis->cLockers             = -1;
     106    pThis->NativeThreadOwner    = NIL_RTNATIVETHREAD;
     107    int rc = RTSemEventCreate(&pThis->hEventSem);
    85108    if (RT_SUCCESS(rc))
     109    {
     110        *phSpinMtx = pThis;
    86111        return VINF_SUCCESS;
    87 
    88     AssertRC(rc);
    89     pCritSect->EventSem = NULL;
    90     pCritSect->u32Magic = (uint32_t)rc;
     112    }
     113
     114    RTMemFree(pThis);
    91115    return rc;
    92116}
    93 RT_EXPORT_SYMBOL(RTCritSectInitEx);
    94 
    95 
    96 /**
    97  * Enter multiple critical sections.
    98  *
    99  * This function will enter ALL the specified critical sections before returning.
    100  *
    101  * @returns VINF_SUCCESS on success.
    102  * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
    103  * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
    104  * @param   cCritSects      Number of critical sections in the array.
    105  * @param   papCritSects    Array of critical section pointers.
    106  *
    107  * @remark  Please note that this function will not necessarily come out favourable in a
    108  *          fight with other threads which are using the normal RTCritSectEnter() function.
    109  *          Therefore, avoid having to enter multiple critical sections!
    110  */
    111 RTDECL(int) RTCritSectEnterMultiple(unsigned cCritSects, PRTCRITSECT *papCritSects)
    112 #ifdef RTCRITSECT_STRICT
    113 {
    114     return RTCritSectEnterMultipleDebug(cCritSects, papCritSects, __FILE__, __LINE__, 0);
    115 }
    116 RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    117 #endif /* RTCRITSECT_STRICT */
    118 {
    119     Assert(cCritSects > 0);
    120     Assert(VALID_PTR(papCritSects));
    121 
    122     /*
    123      * Try get them all.
    124      */
    125     int rc = VERR_INVALID_PARAMETER;
    126     unsigned i;
    127     for (i = 0; i < cCritSects; i++)
    128     {
    129 #ifdef RTCRITSECT_STRICT
    130         rc = RTCritSectTryEnterDebug(papCritSects[i], pszFile, uLine, uId);
     117RT_EXPORT_SYMBOL(RTSemSpinMutexCreate);
     118
     119
     120
     121RTDECL(int) RTSemSpinMutexTryRequest(RTSEMSPINMUTEX hSpinMtx)
     122{
     123    RTSEMSPINMUTEXINTERNAL *pThis       = hSpinMtx;
     124    RTNATIVETHREAD          hSelf       = RTThreadNativeSelf();
     125    RTCCUINTREG             fSavedFlags = 0;
     126    RTTHREADPREEMPTSTATE    PreemptState;
     127    uint32_t                fFlags;
     128    RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
     129
     130    /*
     131     * Check context, disable preemption and save flags if necessary.
     132     */
     133#ifdef RT_OS_WINDOWS
     134    /* NT: IRQL <= DISPATCH_LEVEL for waking up threads. */
     135    PreemptState.uchOldIrql = KeGetCurrentIrql();
     136    if (PreemptState.uchOldIrql > DISPATCH_LEVEL)
     137        return VERR_SEM_BAD_CONTEXT; /** @todo Can be optimized by switching the thing into spinlock mode. But later. */
     138    if (PreemptState.uchOldIrql < DISPATCH_LEVEL)
     139        KeRaiseIrql(DISPATCH_LEVEL, &PreemptState.uchOldIrql);
     140#else /* PORTME: Check for context where we cannot wake up threads. */
     141    RTThreadPreemptDisable(&PreemptState);
     142#endif
     143    fFlags = pThis->fFlags
     144    if (fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE)
     145        fSavedFlags = ASMIntDisableFlags();
     146
     147    /*
     148     * Try take the lock. (cLockers is -1 if it's free)
     149     */
     150    if (!ASMAtomicCmpXchgS32(&pThis->cLockers, 0, -1))
     151    {
     152        /* busy, too bad. */
     153        int rc = VERR_SEM_BUSY;
     154        if (RT_UNLIKELY(pThis->hOwner != hSelf))
     155        {
     156            AssertMsgFailed(("%p attempt at nested access\n"));
     157            rc = VERR_SEM_NESTED
     158        }
     159
     160        /*
     161         * Restore preemption and flags.
     162         */
     163        if (fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE)
     164            ASMSetFlags(fSavedFlags);
     165#ifdef RT_OS_WINDOWS
     166        if (PreemptState.uchOldIrql < DISPATCH_LEVEL)
     167            KeLowerIrql(PreemptState.uchOldIrql);
    131168#else
    132         rc = RTCritSectTryEnter(papCritSects[i]);
     169        RTThreadPreemptEnabled(&PreemptState);
    133170#endif
    134         if (RT_FAILURE(rc))
    135             break;
    136     }
    137     if (RT_SUCCESS(rc))
    138171        return rc;
    139 
    140     /*
    141      * The retry loop.
    142      */
    143     for (unsigned cTries = 0; ; cTries++)
    144     {
    145         /*
    146          * We've failed, release any locks we might have gotten. ('i' is the lock that failed btw.)
    147          */
    148         unsigned j = i;
    149         while (j-- > 0)
    150         {
    151             int rc2 = RTCritSectLeave(papCritSects[j]);
    152             AssertRC(rc2);
    153         }
    154         if (rc != VERR_SEM_BUSY)
    155             return rc;
    156 
    157         /*
    158          * Try prevent any theoretical synchronous races with other threads.
    159          */
    160         Assert(cTries < 1000000);
    161         if (cTries > 10000)
    162             RTThreadSleep(cTries % 3);
    163 
    164         /*
    165          * Wait on the one we failed to get.
    166          */
    167 #ifdef RTCRITSECT_STRICT
    168         rc = RTCritSectEnterDebug(papCritSects[i], pszFile, uLine, uId);
    169 #else
    170         rc = RTCritSectEnter(papCritSects[i]);
    171 #endif
    172         if (RT_FAILURE(rc))
    173             return rc;
    174 
    175         /*
    176          * Try take the others.
    177          */
    178         for (j = 0; j < cCritSects; j++)
    179         {
    180             if (j != i)
    181             {
    182 #ifdef RTCRITSECT_STRICT
    183                 rc = RTCritSectTryEnterDebug(papCritSects[j], pszFile, uLine, uId);
    184 #else
    185                 rc = RTCritSectTryEnter(papCritSects[j]);
    186 #endif
    187                 if (RT_FAILURE(rc))
    188                     break;
    189             }
    190         }
    191         if (RT_SUCCESS(rc))
    192             return rc;
    193 
    194         /*
    195          * We failed.
    196          */
    197         if (i > j)
    198         {
    199             int rc2 = RTCritSectLeave(papCritSects[i]);
    200             AssertRC(rc2);
    201         }
    202         i = j;
    203     }
    204 }
    205 RT_EXPORT_SYMBOL(RTCritSectEnterMultiple);
    206 
    207 
    208 /**
    209  * Try enter a critical section.
    210  *
    211  * @returns VINF_SUCCESS on success.
    212  * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
    213  * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
    214  * @param   pCritSect   The critical section.
    215  */
    216 RTDECL(int) RTCritSectTryEnter(PRTCRITSECT pCritSect)
    217 #ifdef RTCRITSECT_STRICT
    218 {
    219     return RTCritSectTryEnterDebug(pCritSect, __FILE__, __LINE__, 0);
    220 }
    221 RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    222 #endif /* RTCRITSECT_STRICT */
     172    }
     173
     174    /*
     175     * We've got the lock.
     176     */
     177    ASMAtomicWriteHandle(&pThis->hOwner, hSelf);
     178    pThis->fSavedFlags = fSavedFlags;
     179    pThis->PreemptState = PreemptState;
     180
     181    return VINF_SUCCESS;
     182}
     183RT_EXPORT_SYMBOL(RTSemSpinMutexTryRequest);
     184
     185
     186RTDECL(int) RTSemSpinMutexRequest(PRTCRITSECT pCritSect)
    223187{
    224188    Assert(pCritSect);
    225189    Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    226190    RTNATIVETHREAD  NativeThreadSelf = RTThreadNativeSelf();
    227 #ifdef RTCRITSECT_STRICT
    228     RTTHREAD        ThreadSelf = RTThreadSelf();
    229     if (ThreadSelf == NIL_RTTHREAD)
    230         RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &ThreadSelf);
    231 #endif
    232 
    233     /*
    234      * Try take the lock. (cLockers is -1 if it's free)
    235      */
    236     if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1))
    237     {
    238         /*
    239          * Somebody is owning it (or will be soon). Perhaps it's us?
    240          */
    241         if (pCritSect->NativeThreadOwner == NativeThreadSelf)
    242         {
    243             if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
    244             {
    245                 ASMAtomicIncS32(&pCritSect->cLockers);
    246                 pCritSect->cNestings++;
    247                 return VINF_SUCCESS;
    248             }
    249             AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
    250             return VERR_SEM_NESTED;
    251         }
    252         return VERR_SEM_BUSY;
    253     }
    254 
    255     /*
    256      * First time
    257      */
    258     pCritSect->cNestings = 1;
    259     ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
    260 #ifdef RTCRITSECT_STRICT
    261     pCritSect->Strict.pszEnterFile = pszFile;
    262     pCritSect->Strict.u32EnterLine = uLine;
    263     pCritSect->Strict.uEnterId     = uId;
    264     ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf);
    265 #endif
    266 
    267     return VINF_SUCCESS;
    268 }
    269 RT_EXPORT_SYMBOL(RTCritSectTryEnter);
    270 
    271 
    272 /**
    273  * Enter a critical section.
    274  *
    275  * @returns VINF_SUCCESS on success.
    276  * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
    277  * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
    278  * @param   pCritSect   The critical section.
    279  */
    280 RTDECL(int) RTCritSectEnter(PRTCRITSECT pCritSect)
    281 #ifdef RTCRITSECT_STRICT
    282 {
    283     return RTCritSectEnterDebug(pCritSect, __FILE__, __LINE__, 0);
    284 }
    285 RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    286 #endif /* RTCRITSECT_STRICT */
    287 {
    288     Assert(pCritSect);
    289     Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    290     RTNATIVETHREAD  NativeThreadSelf = RTThreadNativeSelf();
    291 #ifdef RTCRITSECT_STRICT
    292     RTTHREAD        ThreadSelf = RTThreadSelf();
    293     if (ThreadSelf == NIL_RTTHREAD)
    294         RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &ThreadSelf);
    295 #endif
    296191
    297192    /** If the critical section has already been destroyed, then inform the caller. */
     
    325220        for (;;)
    326221        {
    327 #ifdef RTCRITSECT_STRICT
    328             RTThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId);
    329 #endif
    330222            int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
    331 #ifdef RTCRITSECT_STRICT
    332             RTThreadUnblocked(ThreadSelf, RTTHREADSTATE_CRITSECT);
    333 #endif
    334223            if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
    335224                return VERR_SEM_DESTROYED;
     
    346235    pCritSect->cNestings = 1;
    347236    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
    348 #ifdef RTCRITSECT_STRICT
    349     pCritSect->Strict.pszEnterFile = pszFile;
    350     pCritSect->Strict.u32EnterLine = uLine;
    351     pCritSect->Strict.uEnterId     = uId;
    352     ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf);
    353     RTThreadWriteLockInc(ThreadSelf);
    354 #endif
    355237
    356238    return VINF_SUCCESS;
    357239}
    358 RT_EXPORT_SYMBOL(RTCritSectEnter);
     240RT_EXPORT_SYMBOL(RTSemSpinMutexRequest);
    359241
    360242
     
    365247 * @param   pCritSect   The critical section.
    366248 */
    367 RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect)
     249RTDECL(int) RTSemSpinMutexRelease(PRTCRITSECT pCritSect)
    368250{
    369251    /*
     
    388270         * Decrement waiters, if >= 0 then we have to wake one of them up.
    389271         */
    390 #ifdef RTCRITSECT_STRICT
    391         if (pCritSect->Strict.ThreadOwner != NIL_RTTHREAD) /* May happen for PDMCritSects when entering GC/R0. */
    392             RTThreadWriteLockDec(pCritSect->Strict.ThreadOwner);
    393         ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD);
    394 #endif
    395272        ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
    396273        if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
     
    402279    return VINF_SUCCESS;
    403280}
    404 RT_EXPORT_SYMBOL(RTCritSectLeave);
    405 
    406 
    407 /**
    408  * Leave multiple critical sections.
    409  *
    410  * @returns VINF_SUCCESS.
    411  * @param   cCritSects      Number of critical sections in the array.
    412  * @param   papCritSects    Array of critical section pointers.
    413  */
    414 RTDECL(int) RTCritSectLeaveMultiple(unsigned cCritSects, PRTCRITSECT *papCritSects)
    415 {
    416     int rc = VINF_SUCCESS;
    417     for (unsigned i = 0; i < cCritSects; i++)
    418     {
    419         int rc2 = RTCritSectLeave(papCritSects[i]);
    420         if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
    421             rc = rc2;
    422     }
    423     return rc;
    424 }
    425 RT_EXPORT_SYMBOL(RTCritSectLeaveMultiple);
    426 
    427 
    428 #ifndef RTCRITSECT_STRICT
    429 RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    430 {
    431     return RTCritSectEnter(pCritSect);
    432 }
    433 
    434 RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    435 {
    436     return RTCritSectTryEnter(pCritSect);
    437 }
    438 
    439 RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId)
    440 {
    441     return RTCritSectEnterMultiple(cCritSects, papCritSects);
    442 }
    443 #endif /* RT_STRICT */
    444 RT_EXPORT_SYMBOL(RTCritSectEnterDebug);
    445 RT_EXPORT_SYMBOL(RTCritSectTryEnterDebug);
    446 RT_EXPORT_SYMBOL(RTCritSectEnterMultipleDebug);
    447 
    448 
    449 /**
    450  * Deletes a critical section.
    451  *
    452  * @returns VINF_SUCCESS.
    453  * @param   pCritSect   The critical section.
    454  */
    455 RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect)
    456 {
    457     /*
    458      * Assert free waiters and so on.
    459      */
    460     Assert(pCritSect);
    461     Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    462     Assert(pCritSect->cNestings == 0);
    463     Assert(pCritSect->cLockers == -1);
    464     Assert(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD);
    465 
    466     /*
    467      * Invalidate the structure and free the mutex.
    468      * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
    469      */
    470     ASMAtomicWriteU32(&pCritSect->u32Magic, ~RTCRITSECT_MAGIC);
    471     pCritSect->fFlags           = 0;
    472     pCritSect->cNestings        = 0;
    473     pCritSect->NativeThreadOwner= NIL_RTNATIVETHREAD;
    474     RTSEMEVENT EventSem = pCritSect->EventSem;
    475     pCritSect->EventSem         = NULL;
    476     while (pCritSect->cLockers-- >= 0)
    477         RTSemEventSignal(EventSem);
    478     ASMAtomicWriteS32(&pCritSect->cLockers, -1);
    479     int rc = RTSemEventDestroy(EventSem);
    480     AssertRC(rc);
    481 
    482     return rc;
    483 }
    484 RT_EXPORT_SYMBOL(RTCritSectDelete);
    485 
     281RT_EXPORT_SYMBOL(RTSemSpinMutexRelease);
     282
  • trunk/src/VBox/Runtime/r3/generic/semspinmutex-r3-generic.cpp

    r21533 r21540  
    11/* $Id$ */
    22/** @file
    3  * IPRT - Fast Mutex, Generic.
     3 * IPRT - Spinning Mutex Semaphores, Ring-3, Generic.
    44 */
    55
    66/*
    7  * Copyright (C) 2006-2007 Sun Microsystems, Inc.
     7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    3838#include <iprt/alloc.h>
    3939#include <iprt/err.h>
     40#include <iprt/assert.h>
    4041#include <iprt/critsect.h>
    4142
    4243
    4344
    44 RTDECL(int) RTSemFastMutexCreate(PRTSEMFASTMUTEX pMutexSem)
     45RTDECL(int) RTSemSpinMutexCreate(PRTSEMSPINMUTEX phSpinMtx, uint32_t fFlags)
    4546{
     47    AssertReturn(!(fFlags & ~RTSEMSPINMUTEX_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
     48    AssertPtr(phSpinMtx);
     49
    4650    PRTCRITSECT pCritSect = (PRTCRITSECT)RTMemAlloc(sizeof(RTCRITSECT));
    4751    if (!pCritSect)
     
    5054    if (RT_SUCCESS(rc))
    5155    {
    52         /** @todo pCritSect->fFlags |= RTCRITSECT_FLAGS_NO_NESTING; */
    53         *pMutexSem = (RTSEMFASTMUTEX)pCritSect;
     56        pCritSect->fFlags |= RTCRITSECT_FLAGS_NO_NESTING;
     57        *phSpinMtx = (RTSEMSPINMUTEX)pCritSect;
    5458    }
    5559    else
     
    5761    return rc;
    5862}
    59 RT_EXPORT_SYMBOL(RTSemFastMutexCreate);
     63RT_EXPORT_SYMBOL(RTSemSpinMutexCreate);
    6064
    6165
    62 RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX MutexSem)
     66RTDECL(int) RTSemSpinMutexDestroy(RTSEMSPINMUTEX hSpinMtx)
    6367{
    64     if (MutexSem == NIL_RTSEMFASTMUTEX)
     68    if (hSpinMtx == NIL_RTSEMSPINMUTEX)
    6569        return VERR_INVALID_PARAMETER;
    66     PRTCRITSECT pCritSect = (PRTCRITSECT)MutexSem;
     70    PRTCRITSECT pCritSect = (PRTCRITSECT)hSpinMtx;
    6771    int rc = RTCritSectDelete(pCritSect);
    6872    if (RT_SUCCESS(rc))
     
    7074    return rc;
    7175}
    72 RT_EXPORT_SYMBOL(RTSemFastMutexDestroy);
     76RT_EXPORT_SYMBOL(RTSemSpinMutexDestroy);
    7377
    7478
    75 RTDECL(int) RTSemFastMutexRequest(RTSEMFASTMUTEX MutexSem)
     79RTDECL(int) RTSemSpinMutexTryRequest(RTSEMSPINMUTEX hSpinMtx)
    7680{
    77     return RTCritSectEnter((PRTCRITSECT)MutexSem);
     81    return RTCritSectTryEnter((PRTCRITSECT)hSpinMtx);
     82
    7883}
    79 RT_EXPORT_SYMBOL(RTSemFastMutexRequest);
     84RT_EXPORT_SYMBOL(RTSemSpinMutexTryRequest);
    8085
    8186
    82 RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX MutexSem)
     87RTDECL(int) RTSemSpinMutexRequest(RTSEMSPINMUTEX hSpinMtx)
    8388{
    84     return RTCritSectLeave((PRTCRITSECT)MutexSem);
     89    return RTCritSectEnter((PRTCRITSECT)hSpinMtx);
    8590}
    86 RT_EXPORT_SYMBOL(RTSemFastMutexRelease);
     91RT_EXPORT_SYMBOL(RTSemSpinMutexRequest);
    8792
     93
     94RTDECL(int) RTSemSpinMutexRelease(RTSEMSPINMUTEX hSpinMtx)
     95{
     96    return RTCritSectLeave((PRTCRITSECT)hSpinMtx);
     97}
     98RT_EXPORT_SYMBOL(RTSemSpinMutexRelease);
     99
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette