VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/semrw-lockless-generic.cpp@ 27105

Last change on this file since 27105 was 25908, checked in by vboxsync, 15 years ago

RTSemRWIsReadOwner: For assertion in main.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.0 KB
Line 
1/* $Id: semrw-lockless-generic.cpp 25908 2010-01-18 22:07:28Z vboxsync $ */
2/** @file
3 * IPRT Testcase - RTSemXRoads, generic implementation.
4 */
5
6/*
7 * Copyright (C) 2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define RTASSERT_QUIET
36#include <iprt/semaphore.h>
37#include "internal/iprt.h"
38
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/err.h>
42#include <iprt/lockvalidator.h>
43#include <iprt/mem.h>
44#include <iprt/thread.h>
45
46#include "internal/magics.h"
47#include "internal/strict.h"
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53typedef struct RTSEMRWINTERNAL
54{
55 /** Magic value (RTSEMRW_MAGIC). */
56 uint32_t volatile u32Magic;
57 uint32_t u32Padding; /**< alignment padding.*/
58 /* The state variable.
59 * All accesses are atomic and it bits are defined like this:
60 * Bits 0..14 - cReads.
61 * Bit 15 - Unused.
62 * Bits 16..31 - cWrites. - doesn't make sense here
63 * Bit 31 - fDirection; 0=Read, 1=Write.
64 * Bits 32..46 - cWaitingReads
65 * Bit 47 - Unused.
66 * Bits 48..62 - cWaitingWrites
67 * Bit 63 - Unused.
68 */
69 uint64_t volatile u64State;
70 /** The write owner. */
71 RTNATIVETHREAD volatile hNativeWriter;
72 /** The number of reads made by the current writer. */
73 uint32_t volatile cWriterReads;
74 /** The number of reads made by the current writer. */
75 uint32_t volatile cWriteRecursions;
76
77 /** What the writer threads are blocking on. */
78 RTSEMEVENT hEvtWrite;
79 /** What the read threads are blocking on when waiting for the writer to
80 * finish. */
81 RTSEMEVENTMULTI hEvtRead;
82 /** Indicates whether hEvtRead needs resetting. */
83 bool volatile fNeedReset;
84
85#ifdef RTSEMRW_STRICT
86 /** The validator record for the writer. */
87 RTLOCKVALRECEXCL ValidatorWrite;
88 /** The validator record for the readers. */
89 RTLOCKVALRECSHRD ValidatorRead;
90#endif
91} RTSEMRWINTERNAL;
92
93
94/*******************************************************************************
95* Defined Constants And Macros *
96*******************************************************************************/
97#define RTSEMRW_CNT_BITS 15
98#define RTSEMRW_CNT_MASK UINT64_C(0x00007fff)
99
100#define RTSEMRW_CNT_RD_SHIFT 0
101#define RTSEMRW_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_RD_SHIFT)
102#define RTSEMRW_CNT_WR_SHIFT 16
103#define RTSEMRW_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_WR_SHIFT)
104#define RTSEMRW_DIR_SHIFT 31
105#define RTSEMRW_DIR_MASK RT_BIT_64(RTSEMRW_DIR_SHIFT)
106#define RTSEMRW_DIR_READ UINT64_C(0)
107#define RTSEMRW_DIR_WRITE UINT64_C(1)
108
109#define RTSEMRW_WAIT_CNT_RD_SHIFT 32
110#define RTSEMRW_WAIT_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_RD_SHIFT)
111//#define RTSEMRW_WAIT_CNT_WR_SHIFT 48
112//#define RTSEMRW_WAIT_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_WR_SHIFT)
113
114
115#undef RTSemRWCreate
116RTDECL(int) RTSemRWCreate(PRTSEMRW phRWSem)
117{
118 return RTSemRWCreateEx(phRWSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTSemRW");
119}
120RT_EXPORT_SYMBOL(RTSemRWCreate);
121
122
123RTDECL(int) RTSemRWCreateEx(PRTSEMRW phRWSem, uint32_t fFlags,
124 RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...)
125{
126 AssertReturn(!(fFlags & ~RTSEMRW_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
127
128 RTSEMRWINTERNAL *pThis = (RTSEMRWINTERNAL *)RTMemAlloc(sizeof(*pThis));
129 if (!pThis)
130 return VERR_NO_MEMORY;
131
132 int rc = RTSemEventMultiCreate(&pThis->hEvtRead);
133 if (RT_SUCCESS(rc))
134 {
135 rc = RTSemEventCreate(&pThis->hEvtWrite);
136 if (RT_SUCCESS(rc))
137 {
138 pThis->u32Magic = RTSEMRW_MAGIC;
139 pThis->u32Padding = 0;
140 pThis->u64State = 0;
141 pThis->hNativeWriter = NIL_RTNATIVETHREAD;
142 pThis->cWriterReads = 0;
143 pThis->cWriteRecursions = 0;
144 pThis->fNeedReset = false;
145#ifdef RTSEMRW_STRICT
146 bool const fLVEnabled = !(fFlags & RTSEMRW_FLAGS_NO_LOCK_VAL);
147 if (!pszNameFmt)
148 {
149 static uint32_t volatile s_iSemRWAnon = 0;
150 uint32_t i = ASMAtomicIncU32(&s_iSemRWAnon) - 1;
151 RTLockValidatorRecExclInit(&pThis->ValidatorWrite, hClass, uSubClass, pThis,
152 fLVEnabled, "RTSemRW-%u", i);
153 RTLockValidatorRecSharedInit(&pThis->ValidatorRead, hClass, uSubClass, pThis,
154 false /*fSignaller*/, fLVEnabled, "RTSemRW-%u", i);
155 }
156 else
157 {
158 va_list va;
159 va_start(va, pszNameFmt);
160 RTLockValidatorRecExclInitV(&pThis->ValidatorWrite, hClass, uSubClass, pThis,
161 fLVEnabled, pszNameFmt, va);
162 va_end(va);
163 va_start(va, pszNameFmt);
164 RTLockValidatorRecSharedInitV(&pThis->ValidatorRead, hClass, uSubClass, pThis,
165 false /*fSignaller*/, fLVEnabled, pszNameFmt, va);
166 va_end(va);
167 }
168 RTLockValidatorRecMakeSiblings(&pThis->ValidatorWrite.Core, &pThis->ValidatorRead.Core);
169#endif
170
171 *phRWSem = pThis;
172 return VINF_SUCCESS;
173 }
174 RTSemEventMultiDestroy(pThis->hEvtRead);
175 }
176 return rc;
177}
178RT_EXPORT_SYMBOL(RTSemRWCreateEx);
179
180
181RTDECL(int) RTSemRWDestroy(RTSEMRW hRWSem)
182{
183 /*
184 * Validate input.
185 */
186 RTSEMRWINTERNAL *pThis = hRWSem;
187 if (pThis == NIL_RTSEMRW)
188 return VINF_SUCCESS;
189 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
190 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
191 Assert(!(ASMAtomicReadU64(&pThis->u64State) & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)));
192
193 /*
194 * Invalidate the object and free up the resources.
195 */
196 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMRW_MAGIC, RTSEMRW_MAGIC), VERR_INVALID_HANDLE);
197
198 RTSEMEVENTMULTI hEvtRead;
199 ASMAtomicXchgHandle(&pThis->hEvtRead, NIL_RTSEMEVENTMULTI, &hEvtRead);
200 int rc = RTSemEventMultiDestroy(hEvtRead);
201 AssertRC(rc);
202
203 RTSEMEVENT hEvtWrite;
204 ASMAtomicXchgHandle(&pThis->hEvtWrite, NIL_RTSEMEVENT, &hEvtWrite);
205 rc = RTSemEventDestroy(hEvtWrite);
206 AssertRC(rc);
207
208#ifdef RTSEMRW_STRICT
209 RTLockValidatorRecSharedDelete(&pThis->ValidatorRead);
210 RTLockValidatorRecExclDelete(&pThis->ValidatorWrite);
211#endif
212 RTMemFree(pThis);
213 return VINF_SUCCESS;
214}
215RT_EXPORT_SYMBOL(RTSemRWDestroy);
216
217
218RTDECL(uint32_t) RTSemRWSetSubClass(RTSEMRW hRWSem, uint32_t uSubClass)
219{
220#ifdef RTSEMRW_STRICT
221 /*
222 * Validate handle.
223 */
224 struct RTSEMRWINTERNAL *pThis = hRWSem;
225 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
226 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
227
228 RTLockValidatorRecSharedSetSubClass(&pThis->ValidatorRead, uSubClass);
229 return RTLockValidatorRecExclSetSubClass(&pThis->ValidatorWrite, uSubClass);
230#else
231 return RTLOCKVAL_SUB_CLASS_INVALID;
232#endif
233}
234RT_EXPORT_SYMBOL(RTSemRWSetSubClass);
235
236
237static int rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
238{
239 /*
240 * Validate input.
241 */
242 RTSEMRWINTERNAL *pThis = hRWSem;
243 if (pThis == NIL_RTSEMRW)
244 return VINF_SUCCESS;
245 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
246 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
247
248#ifdef RTSEMRW_STRICT
249 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
250 if (cMillies > 0)
251 {
252 int rc9;
253 RTNATIVETHREAD hNativeWriter;
254 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
255 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
256 rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
257 else
258 rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
259 if (RT_FAILURE(rc9))
260 return rc9;
261 }
262#endif
263
264 /*
265 * Get cracking...
266 */
267 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
268 uint64_t u64OldState = u64State;
269
270 for (;;)
271 {
272 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
273 {
274 /* It flows in the right direction, try follow it before it changes. */
275 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
276 c++;
277 Assert(c < RTSEMRW_CNT_MASK / 2);
278 u64State &= ~RTSEMRW_CNT_RD_MASK;
279 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
280 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
281 {
282#ifdef RTSEMRW_STRICT
283 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
284#endif
285 break;
286 }
287 }
288 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
289 {
290 /* Wrong direction, but we're alone here and can simply try switch the direction. */
291 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
292 u64State |= (UINT64_C(1) << RTSEMRW_CNT_RD_SHIFT) | (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT);
293 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
294 {
295 Assert(!pThis->fNeedReset);
296#ifdef RTSEMRW_STRICT
297 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
298#endif
299 break;
300 }
301 }
302 else
303 {
304 /* Is the writer perhaps doing a read recursion? */
305 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
306 RTNATIVETHREAD hNativeWriter;
307 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
308 if (hNativeSelf == hNativeWriter)
309 {
310#ifdef RTSEMRW_STRICT
311 int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos);
312 if (RT_FAILURE(rc9))
313 return rc9;
314#endif
315 Assert(pThis->cWriterReads < UINT32_MAX / 2);
316 ASMAtomicIncU32(&pThis->cWriterReads);
317 return VINF_SUCCESS; /* don't break! */
318 }
319
320 /* If the timeout is 0, return already. */
321 if (!cMillies)
322 return VERR_TIMEOUT;
323
324 /* Add ourselves to the queue and wait for the direction to change. */
325 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
326 c++;
327 Assert(c < RTSEMRW_CNT_MASK / 2);
328
329 uint64_t cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
330 cWait++;
331 Assert(cWait <= c);
332 Assert(cWait < RTSEMRW_CNT_MASK / 2);
333
334 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
335 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
336
337 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
338 {
339 for (uint32_t iLoop = 0; ; iLoop++)
340 {
341 int rc;
342#ifdef RTSEMRW_STRICT
343 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true,
344 cMillies, RTTHREADSTATE_RW_READ, false);
345 if (RT_SUCCESS(rc))
346#else
347 RTTHREAD hThreadSelf = RTThreadSelf();
348 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
349#endif
350 {
351 if (fInterruptible)
352 rc = RTSemEventMultiWaitNoResume(pThis->hEvtRead, cMillies);
353 else
354 rc = RTSemEventMultiWait(pThis->hEvtRead, cMillies);
355 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
356 if (pThis->u32Magic != RTSEMRW_MAGIC)
357 return VERR_SEM_DESTROYED;
358 }
359 if (RT_FAILURE(rc))
360 {
361 /* Decrement the counts and return the error. */
362 for (;;)
363 {
364 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
365 c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; Assert(c > 0);
366 c--;
367 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
368 cWait--;
369 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
370 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
371 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
372 break;
373 }
374 return rc;
375 }
376
377 Assert(pThis->fNeedReset);
378 u64State = ASMAtomicReadU64(&pThis->u64State);
379 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
380 break;
381 AssertMsg(iLoop < 1, ("%u\n", iLoop));
382 }
383
384 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
385 for (;;)
386 {
387 u64OldState = u64State;
388
389 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
390 Assert(cWait > 0);
391 cWait--;
392 u64State &= ~RTSEMRW_WAIT_CNT_RD_MASK;
393 u64State |= cWait << RTSEMRW_WAIT_CNT_RD_SHIFT;
394
395 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
396 {
397 if (cWait == 0)
398 {
399 if (ASMAtomicXchgBool(&pThis->fNeedReset, false))
400 {
401 int rc = RTSemEventMultiReset(pThis->hEvtRead);
402 AssertRCReturn(rc, rc);
403 }
404 }
405 break;
406 }
407 u64State = ASMAtomicReadU64(&pThis->u64State);
408 }
409
410#ifdef RTSEMRW_STRICT
411 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
412#endif
413 break;
414 }
415 }
416
417 if (pThis->u32Magic != RTSEMRW_MAGIC)
418 return VERR_SEM_DESTROYED;
419
420 ASMNopPause();
421 u64State = ASMAtomicReadU64(&pThis->u64State);
422 u64OldState = u64State;
423 }
424
425 /* got it! */
426 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT));
427 return VINF_SUCCESS;
428
429}
430
431
432#undef RTSemRWRequestRead
433RTDECL(int) RTSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
434{
435#ifndef RTSEMRW_STRICT
436 return rtSemRWRequestRead(hRWSem, cMillies, false, NULL);
437#else
438 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
439 return rtSemRWRequestRead(hRWSem, cMillies, false, &SrcPos);
440#endif
441}
442RT_EXPORT_SYMBOL(RTSemRWRequestRead);
443
444
445RTDECL(int) RTSemRWRequestReadDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
446{
447 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
448 return rtSemRWRequestRead(hRWSem, cMillies, false, &SrcPos);
449}
450RT_EXPORT_SYMBOL(RTSemRWRequestReadDebug);
451
452
453#undef RTSemRWRequestReadNoResume
454RTDECL(int) RTSemRWRequestReadNoResume(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
455{
456#ifndef RTSEMRW_STRICT
457 return rtSemRWRequestRead(hRWSem, cMillies, true, NULL);
458#else
459 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
460 return rtSemRWRequestRead(hRWSem, cMillies, true, &SrcPos);
461#endif
462}
463RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResume);
464
465
466RTDECL(int) RTSemRWRequestReadNoResumeDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
467{
468 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
469 return rtSemRWRequestRead(hRWSem, cMillies, true, &SrcPos);
470}
471RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResumeDebug);
472
473
474
475RTDECL(int) RTSemRWReleaseRead(RTSEMRW hRWSem)
476{
477 /*
478 * Validate handle.
479 */
480 RTSEMRWINTERNAL *pThis = hRWSem;
481 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
482 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
483
484 /*
485 * Check the direction and take action accordingly.
486 */
487 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
488 uint64_t u64OldState = u64State;
489 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
490 {
491#ifdef RTSEMRW_STRICT
492 int rc9 = RTLockValidatorRecSharedCheckAndRelease(&pThis->ValidatorRead, NIL_RTTHREAD);
493 if (RT_FAILURE(rc9))
494 return rc9;
495#endif
496 for (;;)
497 {
498 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
499 AssertReturn(c > 0, VERR_NOT_OWNER);
500 c--;
501
502 if ( c > 0
503 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
504 {
505 /* Don't change the direction. */
506 u64State &= ~RTSEMRW_CNT_RD_MASK;
507 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
508 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
509 break;
510 }
511 else
512 {
513 /* Reverse the direction and signal the reader threads. */
514 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_DIR_MASK);
515 u64State |= RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT;
516 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
517 {
518 int rc = RTSemEventSignal(pThis->hEvtWrite);
519 AssertRC(rc);
520 break;
521 }
522 }
523
524 ASMNopPause();
525 u64State = ASMAtomicReadU64(&pThis->u64State);
526 u64OldState = u64State;
527 }
528 }
529 else
530 {
531 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
532 RTNATIVETHREAD hNativeWriter;
533 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
534 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
535 AssertReturn(pThis->cWriterReads > 0, VERR_NOT_OWNER);
536#ifdef RTSEMRW_STRICT
537 int rc = RTLockValidatorRecExclUnwindMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core);
538 if (RT_FAILURE(rc))
539 return rc;
540#endif
541 ASMAtomicDecU32(&pThis->cWriterReads);
542 }
543
544 return VINF_SUCCESS;
545}
546RT_EXPORT_SYMBOL(RTSemRWReleaseRead);
547
548
549DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
550{
551 /*
552 * Validate input.
553 */
554 RTSEMRWINTERNAL *pThis = hRWSem;
555 if (pThis == NIL_RTSEMRW)
556 return VINF_SUCCESS;
557 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
558 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
559
560#ifdef RTSEMRW_STRICT
561 RTTHREAD hThreadSelf = NIL_RTTHREAD;
562 if (cMillies)
563 {
564 hThreadSelf = RTThreadSelfAutoAdopt();
565 int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
566 if (RT_FAILURE(rc9))
567 return rc9;
568 }
569#endif
570
571 /*
572 * Check if we're already the owner and just recursing.
573 */
574 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
575 RTNATIVETHREAD hNativeWriter;
576 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
577 if (hNativeSelf == hNativeWriter)
578 {
579 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
580#ifdef RTSEMRW_STRICT
581 int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos);
582 if (RT_FAILURE(rc9))
583 return rc9;
584#endif
585 Assert(pThis->cWriteRecursions < UINT32_MAX / 2);
586 ASMAtomicIncU32(&pThis->cWriteRecursions);
587 return VINF_SUCCESS;
588 }
589
590 /*
591 * Get cracking.
592 */
593 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
594 uint64_t u64OldState = u64State;
595
596 for (;;)
597 {
598 if ( (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
599 || (u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) != 0)
600 {
601 /* It flows in the right direction, try follow it before it changes. */
602 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
603 c++;
604 Assert(c < RTSEMRW_CNT_MASK / 2);
605 u64State &= ~RTSEMRW_CNT_WR_MASK;
606 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
607 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
608 break;
609 }
610 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
611 {
612 /* Wrong direction, but we're alone here and can simply try switch the direction. */
613 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
614 u64State |= (UINT64_C(1) << RTSEMRW_CNT_WR_SHIFT) | (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT);
615 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
616 break;
617 }
618 else if (!cMillies)
619 /* Wrong direction and we're not supposed to wait, just return. */
620 return VERR_TIMEOUT;
621 else
622 {
623 /* Add ourselves to the write count and break out to do the wait. */
624 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
625 c++;
626 Assert(c < RTSEMRW_CNT_MASK / 2);
627 u64State &= ~RTSEMRW_CNT_WR_MASK;
628 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
629 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
630 break;
631 }
632
633 if (pThis->u32Magic != RTSEMRW_MAGIC)
634 return VERR_SEM_DESTROYED;
635
636 ASMNopPause();
637 u64State = ASMAtomicReadU64(&pThis->u64State);
638 u64OldState = u64State;
639 }
640
641 /*
642 * If we're in write mode now try grab the ownership. Play fair if there
643 * are threads already waiting.
644 */
645 bool fDone = (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
646 && ( ((u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT) == 1
647 || cMillies == 0);
648 if (fDone)
649 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
650 if (!fDone)
651 {
652 /*
653 * Wait for our turn.
654 */
655 for (uint32_t iLoop = 0; ; iLoop++)
656 {
657 int rc;
658#ifdef RTSEMRW_STRICT
659 if (cMillies)
660 {
661 if (hThreadSelf == NIL_RTTHREAD)
662 hThreadSelf = RTThreadSelfAutoAdopt();
663 rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
664 cMillies, RTTHREADSTATE_RW_WRITE, false);
665 }
666 else
667 rc = VINF_SUCCESS;
668 if (RT_SUCCESS(rc))
669#else
670 RTTHREAD hThreadSelf = RTThreadSelf();
671 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
672#endif
673 {
674 if (fInterruptible)
675 rc = RTSemEventWaitNoResume(pThis->hEvtWrite, cMillies);
676 else
677 rc = RTSemEventWait(pThis->hEvtWrite, cMillies);
678 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
679 if (pThis->u32Magic != RTSEMRW_MAGIC)
680 return VERR_SEM_DESTROYED;
681 }
682 if (RT_FAILURE(rc))
683 {
684 /* Decrement the counts and return the error. */
685 for (;;)
686 {
687 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
688 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT; Assert(c > 0);
689 c--;
690 u64State &= ~RTSEMRW_CNT_WR_MASK;
691 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
692 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
693 break;
694 }
695 return rc;
696 }
697
698 u64State = ASMAtomicReadU64(&pThis->u64State);
699 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT))
700 {
701 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
702 if (fDone)
703 break;
704 }
705 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
706 }
707 }
708
709 /*
710 * Got it!
711 */
712 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
713 ASMAtomicWriteU32(&pThis->cWriteRecursions, 1);
714 Assert(pThis->cWriterReads == 0);
715#ifdef RTSEMRW_STRICT
716 RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
717#endif
718
719 return VINF_SUCCESS;
720}
721
722
723#undef RTSemRWRequestWrite
724RTDECL(int) RTSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
725{
726#ifndef RTSEMRW_STRICT
727 return rtSemRWRequestWrite(hRWSem, cMillies, false, NULL);
728#else
729 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
730 return rtSemRWRequestWrite(hRWSem, cMillies, false, &SrcPos);
731#endif
732}
733RT_EXPORT_SYMBOL(RTSemRWRequestWrite);
734
735
736RTDECL(int) RTSemRWRequestWriteDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
737{
738 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
739 return rtSemRWRequestWrite(hRWSem, cMillies, false, &SrcPos);
740}
741RT_EXPORT_SYMBOL(RTSemRWRequestWriteDebug);
742
743
744#undef RTSemRWRequestWriteNoResume
745RTDECL(int) RTSemRWRequestWriteNoResume(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
746{
747#ifndef RTSEMRW_STRICT
748 return rtSemRWRequestWrite(hRWSem, cMillies, true, NULL);
749#else
750 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
751 return rtSemRWRequestWrite(hRWSem, cMillies, true, &SrcPos);
752#endif
753}
754RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResume);
755
756
757RTDECL(int) RTSemRWRequestWriteNoResumeDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
758{
759 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
760 return rtSemRWRequestWrite(hRWSem, cMillies, true, &SrcPos);
761}
762RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResumeDebug);
763
764
765RTDECL(int) RTSemRWReleaseWrite(RTSEMRW hRWSem)
766{
767
768 /*
769 * Validate handle.
770 */
771 struct RTSEMRWINTERNAL *pThis = hRWSem;
772 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
773 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
774
775 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
776 RTNATIVETHREAD hNativeWriter;
777 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
778 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
779
780 /*
781 * Unwind a recursion.
782 */
783 if (pThis->cWriteRecursions == 1)
784 {
785 AssertReturn(pThis->cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
786#ifdef RTSEMRW_STRICT
787 int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorWrite, true);
788 if (RT_FAILURE(rc9))
789 return rc9;
790#endif
791 /*
792 * Update the state.
793 */
794 ASMAtomicWriteU32(&pThis->cWriteRecursions, 0);
795 ASMAtomicWriteHandle(&pThis->hNativeWriter, NIL_RTNATIVETHREAD);
796
797 for (;;)
798 {
799 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
800 uint64_t u64OldState = u64State;
801
802 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
803 Assert(c > 0);
804 c--;
805
806 if ( c > 0
807 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
808 {
809 /* Don't change the direction, wait up the next writer if any. */
810 u64State &= ~RTSEMRW_CNT_WR_MASK;
811 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
812 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
813 {
814 if (c > 0)
815 {
816 int rc = RTSemEventSignal(pThis->hEvtWrite);
817 AssertRC(rc);
818 }
819 break;
820 }
821 }
822 else
823 {
824 /* Reverse the direction and signal the reader threads. */
825 u64State &= ~(RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
826 u64State |= RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT;
827 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
828 {
829 Assert(!pThis->fNeedReset);
830 ASMAtomicWriteBool(&pThis->fNeedReset, true);
831 int rc = RTSemEventMultiSignal(pThis->hEvtRead);
832 AssertRC(rc);
833 break;
834 }
835 }
836
837 ASMNopPause();
838 if (pThis->u32Magic != RTSEMRW_MAGIC)
839 return VERR_SEM_DESTROYED;
840 }
841 }
842 else
843 {
844 Assert(pThis->cWriteRecursions != 0);
845#ifdef RTSEMRW_STRICT
846 int rc9 = RTLockValidatorRecExclUnwind(&pThis->ValidatorWrite);
847 if (RT_FAILURE(rc9))
848 return rc9;
849#endif
850 ASMAtomicDecU32(&pThis->cWriteRecursions);
851 }
852
853 return VINF_SUCCESS;
854}
855RT_EXPORT_SYMBOL(RTSemRWReleaseWrite);
856
857
858RTDECL(bool) RTSemRWIsWriteOwner(RTSEMRW hRWSem)
859{
860 /*
861 * Validate handle.
862 */
863 struct RTSEMRWINTERNAL *pThis = hRWSem;
864 AssertPtrReturn(pThis, false);
865 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, false);
866
867 /*
868 * Check ownership.
869 */
870 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
871 RTNATIVETHREAD hNativeWriter;
872 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
873 return hNativeWriter == hNativeSelf;
874}
875RT_EXPORT_SYMBOL(RTSemRWIsWriteOwner);
876
877
878RTDECL(bool) RTSemRWIsReadOwner(RTSEMRW hRWSem, bool fWannaHear)
879{
880 /*
881 * Validate handle.
882 */
883 struct RTSEMRWINTERNAL *pThis = hRWSem;
884 AssertPtrReturn(pThis, false);
885 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, false);
886
887 /*
888 * Inspect the state.
889 */
890 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
891 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT))
892 {
893 /*
894 * It's in write mode, so we can only be a reader if we're also the
895 * current writer.
896 */
897 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
898 RTNATIVETHREAD hWriter;
899 ASMAtomicUoReadHandle(&pThis->hWriter, &hWriter);
900 return hWriter == hNativeSelf;
901 }
902
903 /*
904 * Read mode. If there are no current readers, then we cannot be a reader.
905 */
906 if (!(u64State & RTSEMRW_CNT_RD_MASK))
907 return false;
908
909#ifdef RTSEMRW_STRICT
910 /*
911 * Ask the lock validator.
912 */
913 return RTLockValidatorRecSharedIsOwner(&pThis->ValidatorRead, NIL_RTTHREAD);
914#else
915 /*
916 * Ok, we don't know, just tell the caller what he want to hear.
917 */
918 return fWannaHear;
919#endif
920}
921RT_EXPORT_SYMBOL(RTSemRWIsReadOwner);
922
923
924RTDECL(uint32_t) RTSemRWGetWriteRecursion(RTSEMRW hRWSem)
925{
926 /*
927 * Validate handle.
928 */
929 struct RTSEMRWINTERNAL *pThis = hRWSem;
930 AssertPtrReturn(pThis, 0);
931 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, 0);
932
933 /*
934 * Return the requested data.
935 */
936 return pThis->cWriteRecursions;
937}
938RT_EXPORT_SYMBOL(RTSemRWGetWriteRecursion);
939
940
941RTDECL(uint32_t) RTSemRWGetWriterReadRecursion(RTSEMRW hRWSem)
942{
943 /*
944 * Validate handle.
945 */
946 struct RTSEMRWINTERNAL *pThis = hRWSem;
947 AssertPtrReturn(pThis, 0);
948 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, 0);
949
950 /*
951 * Return the requested data.
952 */
953 return pThis->cWriterReads;
954}
955RT_EXPORT_SYMBOL(RTSemRWGetWriterReadRecursion);
956
957
958RTDECL(uint32_t) RTSemRWGetReadCount(RTSEMRW hRWSem)
959{
960 /*
961 * Validate input.
962 */
963 struct RTSEMRWINTERNAL *pThis = hRWSem;
964 AssertPtrReturn(pThis, 0);
965 AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC,
966 ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
967 0);
968
969 /*
970 * Return the requested data.
971 */
972 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
973 if ((u64State & RTSEMRW_DIR_MASK) != (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
974 return 0;
975 return (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
976}
977RT_EXPORT_SYMBOL(RTSemRWGetReadCount);
978
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette