VirtualBox

Changeset 101788 in vbox for trunk/src


Ignore:
Timestamp:
Nov 4, 2023 8:11:04 PM (19 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
159884
Message:

libs/xpcom: Get rid of not enabled _PR_HAVE_ATOMIC_OPS code as our platforms all have it defined, bugref:10545

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/libs/xpcom18a4/nsprpub/pr/src/misc/pratom.c

    r101762 r101788  
    4343#include "pratom.h"
    4444#include "primpl.h"
    45 
    46 #include <string.h>
    47 
    48 /*
    49  * The following is a fallback implementation that emulates
    50  * atomic operations for platforms without atomic operations.
    51  * If a platform has atomic operations, it should define the
    52  * macro _PR_HAVE_ATOMIC_OPS, and the following will not be
    53  * compiled in.
    54  */
    55 
    56 #if !defined(_PR_HAVE_ATOMIC_OPS)
    57 
    58 #if defined(_PR_PTHREADS) && !defined(_PR_DCETHREADS)
    59 /*
    60  * PR_AtomicDecrement() is used in NSPR's thread-specific data
    61  * destructor.  Because thread-specific data destructors may be
    62  * invoked after a PR_Cleanup() call, we need an implementation
    63  * of the atomic routines that doesn't need NSPR to be initialized.
    64  */
    65 
    66 /*
    67  * We use a set of locks for all the emulated atomic operations.
    68  * By hashing on the address of the integer to be locked the
    69  * contention between multiple threads should be lessened.
    70  *
    71  * The number of atomic locks can be set by the environment variable
    72  * NSPR_ATOMIC_HASH_LOCKS
    73  */
    74 
    75 /*
    76  * lock counts should be a power of 2
    77  */
    78 #define DEFAULT_ATOMIC_LOCKS    16      /* should be in sync with the number of initializers
    79                                                                                 below */
    80 #define MAX_ATOMIC_LOCKS                (4 * 1024)
    81 
    82 static pthread_mutex_t static_atomic_locks[DEFAULT_ATOMIC_LOCKS] = {
    83         PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER,
    84         PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER,
    85         PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER,
    86         PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER,
    87         PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER,
    88         PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER,
    89         PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER,
    90         PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER };
    91 
    92 #ifdef DEBUG
    93 static PRInt32 static_hash_lock_counts[DEFAULT_ATOMIC_LOCKS];
    94 static PRInt32 *hash_lock_counts = static_hash_lock_counts;
    95 #endif
    96 
    97 static PRUint32 num_atomic_locks = DEFAULT_ATOMIC_LOCKS;
    98 static pthread_mutex_t *atomic_locks = static_atomic_locks;
    99 static PRUint32 atomic_hash_mask = DEFAULT_ATOMIC_LOCKS - 1;
    100 
    101 #define _PR_HASH_FOR_LOCK(ptr)                                                  \
    102                         ((PRUint32) (((PRUptrdiff) (ptr) >> 2)  ^       \
    103                                                 ((PRUptrdiff) (ptr) >> 8)) &    \
    104                                                 atomic_hash_mask)
    105 
    106 void _PR_MD_INIT_ATOMIC()
    107 {
    108 char *eval;
    109 int index;
    110 
    111 
    112         PR_ASSERT(PR_FloorLog2(MAX_ATOMIC_LOCKS) ==
    113                                                 PR_CeilingLog2(MAX_ATOMIC_LOCKS));
    114 
    115         PR_ASSERT(PR_FloorLog2(DEFAULT_ATOMIC_LOCKS) ==
    116                                                         PR_CeilingLog2(DEFAULT_ATOMIC_LOCKS));
    117 
    118         if (((eval = getenv("NSPR_ATOMIC_HASH_LOCKS")) != NULL)  &&
    119                 ((num_atomic_locks = atoi(eval)) != DEFAULT_ATOMIC_LOCKS)) {
    120 
    121                 if (num_atomic_locks > MAX_ATOMIC_LOCKS)
    122                         num_atomic_locks = MAX_ATOMIC_LOCKS;
    123                 else {
    124                         num_atomic_locks = PR_FloorLog2(num_atomic_locks);
    125                         num_atomic_locks = 1L << num_atomic_locks;
    126                 }
    127                 atomic_locks = (pthread_mutex_t *) PR_Malloc(sizeof(pthread_mutex_t) *
    128                                                 num_atomic_locks);
    129                 if (atomic_locks) {
    130                         for (index = 0; index < num_atomic_locks; index++) {
    131                                 if (pthread_mutex_init(&atomic_locks[index], NULL)) {
    132                                                 PR_DELETE(atomic_locks);
    133                                                 atomic_locks = NULL;
    134                                                 break;
    135                                 }
    136                         }
    137                 }
    138 #ifdef DEBUG
    139                 if (atomic_locks) {
    140                         hash_lock_counts = PR_CALLOC(num_atomic_locks * sizeof(PRInt32));
    141                         if (hash_lock_counts == NULL) {
    142                                 PR_DELETE(atomic_locks);
    143                                 atomic_locks = NULL;
    144                         }
    145                 }
    146 #endif
    147                 if (atomic_locks == NULL) {
    148                         /*
    149                          *      Use statically allocated locks
    150                          */
    151                         atomic_locks = static_atomic_locks;
    152                         num_atomic_locks = DEFAULT_ATOMIC_LOCKS;
    153         #ifdef DEBUG
    154                         hash_lock_counts = static_hash_lock_counts;
    155         #endif
    156                 }
    157                 atomic_hash_mask = num_atomic_locks - 1;
    158         }
    159         PR_ASSERT(PR_FloorLog2(num_atomic_locks) ==
    160                                                                 PR_CeilingLog2(num_atomic_locks));
    161 }
    162 
    163 PRInt32
    164 _PR_MD_ATOMIC_INCREMENT(PRInt32 *val)
    165 {
    166     PRInt32 rv;
    167     PRInt32 idx = _PR_HASH_FOR_LOCK(val);
    168 
    169     pthread_mutex_lock(&atomic_locks[idx]);
    170     rv = ++(*val);
    171 #ifdef DEBUG
    172     hash_lock_counts[idx]++;
    173 #endif
    174     pthread_mutex_unlock(&atomic_locks[idx]);
    175     return rv;
    176 }
    177 
    178 PRInt32
    179 _PR_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val)
    180 {
    181     PRInt32 rv;
    182     PRInt32 idx = _PR_HASH_FOR_LOCK(ptr);
    183 
    184     pthread_mutex_lock(&atomic_locks[idx]);
    185     rv = ((*ptr) += val);
    186 #ifdef DEBUG
    187     hash_lock_counts[idx]++;
    188 #endif
    189     pthread_mutex_unlock(&atomic_locks[idx]);
    190     return rv;
    191 }
    192 
    193 PRInt32
    194 _PR_MD_ATOMIC_DECREMENT(PRInt32 *val)
    195 {
    196     PRInt32 rv;
    197     PRInt32 idx = _PR_HASH_FOR_LOCK(val);
    198 
    199     pthread_mutex_lock(&atomic_locks[idx]);
    200     rv = --(*val);
    201 #ifdef DEBUG
    202     hash_lock_counts[idx]++;
    203 #endif
    204     pthread_mutex_unlock(&atomic_locks[idx]);
    205     return rv;
    206 }
    207 
    208 PRInt32
    209 _PR_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
    210 {
    211     PRInt32 rv;
    212     PRInt32 idx = _PR_HASH_FOR_LOCK(val);
    213 
    214     pthread_mutex_lock(&atomic_locks[idx]);
    215     rv = *val;
    216     *val = newval;
    217 #ifdef DEBUG
    218     hash_lock_counts[idx]++;
    219 #endif
    220     pthread_mutex_unlock(&atomic_locks[idx]);
    221     return rv;
    222 }
    223 #else  /* _PR_PTHREADS && !_PR_DCETHREADS */
    224 /*
    225  * We use a single lock for all the emulated atomic operations.
    226  * The lock contention should be acceptable.
    227  */
    228 static PRLock *atomic_lock = NULL;
    229 void _PR_MD_INIT_ATOMIC(void)
    230 {
    231     if (atomic_lock == NULL) {
    232         atomic_lock = PR_NewLock();
    233     }
    234 }
    235 
    236 PRInt32
    237 _PR_MD_ATOMIC_INCREMENT(PRInt32 *val)
    238 {
    239     PRInt32 rv;
    240 
    241     if (!_pr_initialized) {
    242         _PR_ImplicitInitialization();
    243     }
    244     PR_Lock(atomic_lock);
    245     rv = ++(*val);
    246     PR_Unlock(atomic_lock);
    247     return rv;
    248 }
    249 
    250 PRInt32
    251 _PR_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val)
    252 {
    253     PRInt32 rv;
    254 
    255     if (!_pr_initialized) {
    256         _PR_ImplicitInitialization();
    257     }
    258     PR_Lock(atomic_lock);
    259     rv = ((*ptr) += val);
    260     PR_Unlock(atomic_lock);
    261     return rv;
    262 }
    263 
    264 PRInt32
    265 _PR_MD_ATOMIC_DECREMENT(PRInt32 *val)
    266 {
    267     PRInt32 rv;
    268 
    269     if (!_pr_initialized) {
    270         _PR_ImplicitInitialization();
    271     }
    272     PR_Lock(atomic_lock);
    273     rv = --(*val);
    274     PR_Unlock(atomic_lock);
    275     return rv;
    276 }
    277 
    278 PRInt32
    279 _PR_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval)
    280 {
    281     PRInt32 rv;
    282 
    283     if (!_pr_initialized) {
    284         _PR_ImplicitInitialization();
    285     }
    286     PR_Lock(atomic_lock);
    287     rv = *val;
    288     *val = newval;
    289     PR_Unlock(atomic_lock);
    290     return rv;
    291 }
    292 #endif  /* _PR_PTHREADS && !_PR_DCETHREADS */
    293 
    294 #endif  /* !_PR_HAVE_ATOMIC_OPS */
    29545
    29646void _PR_InitAtomic(void)
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette