VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/semrw-lockless-generic.cpp@ 30801

Last change on this file since 30801 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 33.8 KB
Line 
1/* $Id: semrw-lockless-generic.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * IPRT Testcase - RTSemXRoads, generic implementation.
4 */
5
6/*
7 * Copyright (C) 2009 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define RTASSERT_QUIET
32#include <iprt/semaphore.h>
33#include "internal/iprt.h"
34
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/err.h>
38#include <iprt/lockvalidator.h>
39#include <iprt/mem.h>
40#include <iprt/thread.h>
41
42#include "internal/magics.h"
43#include "internal/strict.h"
44
45
46/*******************************************************************************
47* Structures and Typedefs *
48*******************************************************************************/
49typedef struct RTSEMRWINTERNAL
50{
51 /** Magic value (RTSEMRW_MAGIC). */
52 uint32_t volatile u32Magic;
53 uint32_t u32Padding; /**< alignment padding.*/
54 /* The state variable.
55 * All accesses are atomic and it bits are defined like this:
56 * Bits 0..14 - cReads.
57 * Bit 15 - Unused.
58 * Bits 16..31 - cWrites. - doesn't make sense here
59 * Bit 31 - fDirection; 0=Read, 1=Write.
60 * Bits 32..46 - cWaitingReads
61 * Bit 47 - Unused.
62 * Bits 48..62 - cWaitingWrites
63 * Bit 63 - Unused.
64 */
65 uint64_t volatile u64State;
66 /** The write owner. */
67 RTNATIVETHREAD volatile hNativeWriter;
68 /** The number of reads made by the current writer. */
69 uint32_t volatile cWriterReads;
70 /** The number of reads made by the current writer. */
71 uint32_t volatile cWriteRecursions;
72
73 /** What the writer threads are blocking on. */
74 RTSEMEVENT hEvtWrite;
75 /** What the read threads are blocking on when waiting for the writer to
76 * finish. */
77 RTSEMEVENTMULTI hEvtRead;
78 /** Indicates whether hEvtRead needs resetting. */
79 bool volatile fNeedReset;
80
81#ifdef RTSEMRW_STRICT
82 /** The validator record for the writer. */
83 RTLOCKVALRECEXCL ValidatorWrite;
84 /** The validator record for the readers. */
85 RTLOCKVALRECSHRD ValidatorRead;
86#endif
87} RTSEMRWINTERNAL;
88
89
90/*******************************************************************************
91* Defined Constants And Macros *
92*******************************************************************************/
93#define RTSEMRW_CNT_BITS 15
94#define RTSEMRW_CNT_MASK UINT64_C(0x00007fff)
95
96#define RTSEMRW_CNT_RD_SHIFT 0
97#define RTSEMRW_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_RD_SHIFT)
98#define RTSEMRW_CNT_WR_SHIFT 16
99#define RTSEMRW_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_CNT_WR_SHIFT)
100#define RTSEMRW_DIR_SHIFT 31
101#define RTSEMRW_DIR_MASK RT_BIT_64(RTSEMRW_DIR_SHIFT)
102#define RTSEMRW_DIR_READ UINT64_C(0)
103#define RTSEMRW_DIR_WRITE UINT64_C(1)
104
105#define RTSEMRW_WAIT_CNT_RD_SHIFT 32
106#define RTSEMRW_WAIT_CNT_RD_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_RD_SHIFT)
107//#define RTSEMRW_WAIT_CNT_WR_SHIFT 48
108//#define RTSEMRW_WAIT_CNT_WR_MASK (RTSEMRW_CNT_MASK << RTSEMRW_WAIT_CNT_WR_SHIFT)
109
110
111#undef RTSemRWCreate
112RTDECL(int) RTSemRWCreate(PRTSEMRW phRWSem)
113{
114 return RTSemRWCreateEx(phRWSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTSemRW");
115}
116RT_EXPORT_SYMBOL(RTSemRWCreate);
117
118
119RTDECL(int) RTSemRWCreateEx(PRTSEMRW phRWSem, uint32_t fFlags,
120 RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...)
121{
122 AssertReturn(!(fFlags & ~RTSEMRW_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER);
123
124 RTSEMRWINTERNAL *pThis = (RTSEMRWINTERNAL *)RTMemAlloc(sizeof(*pThis));
125 if (!pThis)
126 return VERR_NO_MEMORY;
127
128 int rc = RTSemEventMultiCreate(&pThis->hEvtRead);
129 if (RT_SUCCESS(rc))
130 {
131 rc = RTSemEventCreate(&pThis->hEvtWrite);
132 if (RT_SUCCESS(rc))
133 {
134 pThis->u32Magic = RTSEMRW_MAGIC;
135 pThis->u32Padding = 0;
136 pThis->u64State = 0;
137 pThis->hNativeWriter = NIL_RTNATIVETHREAD;
138 pThis->cWriterReads = 0;
139 pThis->cWriteRecursions = 0;
140 pThis->fNeedReset = false;
141#ifdef RTSEMRW_STRICT
142 bool const fLVEnabled = !(fFlags & RTSEMRW_FLAGS_NO_LOCK_VAL);
143 if (!pszNameFmt)
144 {
145 static uint32_t volatile s_iSemRWAnon = 0;
146 uint32_t i = ASMAtomicIncU32(&s_iSemRWAnon) - 1;
147 RTLockValidatorRecExclInit(&pThis->ValidatorWrite, hClass, uSubClass, pThis,
148 fLVEnabled, "RTSemRW-%u", i);
149 RTLockValidatorRecSharedInit(&pThis->ValidatorRead, hClass, uSubClass, pThis,
150 false /*fSignaller*/, fLVEnabled, "RTSemRW-%u", i);
151 }
152 else
153 {
154 va_list va;
155 va_start(va, pszNameFmt);
156 RTLockValidatorRecExclInitV(&pThis->ValidatorWrite, hClass, uSubClass, pThis,
157 fLVEnabled, pszNameFmt, va);
158 va_end(va);
159 va_start(va, pszNameFmt);
160 RTLockValidatorRecSharedInitV(&pThis->ValidatorRead, hClass, uSubClass, pThis,
161 false /*fSignaller*/, fLVEnabled, pszNameFmt, va);
162 va_end(va);
163 }
164 RTLockValidatorRecMakeSiblings(&pThis->ValidatorWrite.Core, &pThis->ValidatorRead.Core);
165#endif
166
167 *phRWSem = pThis;
168 return VINF_SUCCESS;
169 }
170 RTSemEventMultiDestroy(pThis->hEvtRead);
171 }
172 return rc;
173}
174RT_EXPORT_SYMBOL(RTSemRWCreateEx);
175
176
177RTDECL(int) RTSemRWDestroy(RTSEMRW hRWSem)
178{
179 /*
180 * Validate input.
181 */
182 RTSEMRWINTERNAL *pThis = hRWSem;
183 if (pThis == NIL_RTSEMRW)
184 return VINF_SUCCESS;
185 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
186 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
187 Assert(!(ASMAtomicReadU64(&pThis->u64State) & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)));
188
189 /*
190 * Invalidate the object and free up the resources.
191 */
192 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMRW_MAGIC, RTSEMRW_MAGIC), VERR_INVALID_HANDLE);
193
194 RTSEMEVENTMULTI hEvtRead;
195 ASMAtomicXchgHandle(&pThis->hEvtRead, NIL_RTSEMEVENTMULTI, &hEvtRead);
196 int rc = RTSemEventMultiDestroy(hEvtRead);
197 AssertRC(rc);
198
199 RTSEMEVENT hEvtWrite;
200 ASMAtomicXchgHandle(&pThis->hEvtWrite, NIL_RTSEMEVENT, &hEvtWrite);
201 rc = RTSemEventDestroy(hEvtWrite);
202 AssertRC(rc);
203
204#ifdef RTSEMRW_STRICT
205 RTLockValidatorRecSharedDelete(&pThis->ValidatorRead);
206 RTLockValidatorRecExclDelete(&pThis->ValidatorWrite);
207#endif
208 RTMemFree(pThis);
209 return VINF_SUCCESS;
210}
211RT_EXPORT_SYMBOL(RTSemRWDestroy);
212
213
214RTDECL(uint32_t) RTSemRWSetSubClass(RTSEMRW hRWSem, uint32_t uSubClass)
215{
216#ifdef RTSEMRW_STRICT
217 /*
218 * Validate handle.
219 */
220 struct RTSEMRWINTERNAL *pThis = hRWSem;
221 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
222 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
223
224 RTLockValidatorRecSharedSetSubClass(&pThis->ValidatorRead, uSubClass);
225 return RTLockValidatorRecExclSetSubClass(&pThis->ValidatorWrite, uSubClass);
226#else
227 return RTLOCKVAL_SUB_CLASS_INVALID;
228#endif
229}
230RT_EXPORT_SYMBOL(RTSemRWSetSubClass);
231
232
233static int rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
234{
235 /*
236 * Validate input.
237 */
238 RTSEMRWINTERNAL *pThis = hRWSem;
239 if (pThis == NIL_RTSEMRW)
240 return VINF_SUCCESS;
241 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
242 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
243
244#ifdef RTSEMRW_STRICT
245 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
246 if (cMillies > 0)
247 {
248 int rc9;
249 RTNATIVETHREAD hNativeWriter;
250 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
251 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
252 rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
253 else
254 rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
255 if (RT_FAILURE(rc9))
256 return rc9;
257 }
258#endif
259
260 /*
261 * Get cracking...
262 */
263 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
264 uint64_t u64OldState = u64State;
265
266 for (;;)
267 {
268 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
269 {
270 /* It flows in the right direction, try follow it before it changes. */
271 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
272 c++;
273 Assert(c < RTSEMRW_CNT_MASK / 2);
274 u64State &= ~RTSEMRW_CNT_RD_MASK;
275 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
276 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
277 {
278#ifdef RTSEMRW_STRICT
279 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
280#endif
281 break;
282 }
283 }
284 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
285 {
286 /* Wrong direction, but we're alone here and can simply try switch the direction. */
287 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
288 u64State |= (UINT64_C(1) << RTSEMRW_CNT_RD_SHIFT) | (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT);
289 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
290 {
291 Assert(!pThis->fNeedReset);
292#ifdef RTSEMRW_STRICT
293 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
294#endif
295 break;
296 }
297 }
298 else
299 {
300 /* Is the writer perhaps doing a read recursion? */
301 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
302 RTNATIVETHREAD hNativeWriter;
303 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
304 if (hNativeSelf == hNativeWriter)
305 {
306#ifdef RTSEMRW_STRICT
307 int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos);
308 if (RT_FAILURE(rc9))
309 return rc9;
310#endif
311 Assert(pThis->cWriterReads < UINT32_MAX / 2);
312 ASMAtomicIncU32(&pThis->cWriterReads);
313 return VINF_SUCCESS; /* don't break! */
314 }
315
316 /* If the timeout is 0, return already. */
317 if (!cMillies)
318 return VERR_TIMEOUT;
319
320 /* Add ourselves to the queue and wait for the direction to change. */
321 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
322 c++;
323 Assert(c < RTSEMRW_CNT_MASK / 2);
324
325 uint64_t cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
326 cWait++;
327 Assert(cWait <= c);
328 Assert(cWait < RTSEMRW_CNT_MASK / 2);
329
330 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
331 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
332
333 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
334 {
335 for (uint32_t iLoop = 0; ; iLoop++)
336 {
337 int rc;
338#ifdef RTSEMRW_STRICT
339 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true,
340 cMillies, RTTHREADSTATE_RW_READ, false);
341 if (RT_SUCCESS(rc))
342#else
343 RTTHREAD hThreadSelf = RTThreadSelf();
344 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
345#endif
346 {
347 if (fInterruptible)
348 rc = RTSemEventMultiWaitNoResume(pThis->hEvtRead, cMillies);
349 else
350 rc = RTSemEventMultiWait(pThis->hEvtRead, cMillies);
351 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
352 if (pThis->u32Magic != RTSEMRW_MAGIC)
353 return VERR_SEM_DESTROYED;
354 }
355 if (RT_FAILURE(rc))
356 {
357 /* Decrement the counts and return the error. */
358 for (;;)
359 {
360 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
361 c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; Assert(c > 0);
362 c--;
363 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
364 cWait--;
365 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_WAIT_CNT_RD_MASK);
366 u64State |= (c << RTSEMRW_CNT_RD_SHIFT) | (cWait << RTSEMRW_WAIT_CNT_RD_SHIFT);
367 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
368 break;
369 }
370 return rc;
371 }
372
373 Assert(pThis->fNeedReset);
374 u64State = ASMAtomicReadU64(&pThis->u64State);
375 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
376 break;
377 AssertMsg(iLoop < 1, ("%u\n", iLoop));
378 }
379
380 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
381 for (;;)
382 {
383 u64OldState = u64State;
384
385 cWait = (u64State & RTSEMRW_WAIT_CNT_RD_MASK) >> RTSEMRW_WAIT_CNT_RD_SHIFT;
386 Assert(cWait > 0);
387 cWait--;
388 u64State &= ~RTSEMRW_WAIT_CNT_RD_MASK;
389 u64State |= cWait << RTSEMRW_WAIT_CNT_RD_SHIFT;
390
391 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
392 {
393 if (cWait == 0)
394 {
395 if (ASMAtomicXchgBool(&pThis->fNeedReset, false))
396 {
397 int rc = RTSemEventMultiReset(pThis->hEvtRead);
398 AssertRCReturn(rc, rc);
399 }
400 }
401 break;
402 }
403 u64State = ASMAtomicReadU64(&pThis->u64State);
404 }
405
406#ifdef RTSEMRW_STRICT
407 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
408#endif
409 break;
410 }
411 }
412
413 if (pThis->u32Magic != RTSEMRW_MAGIC)
414 return VERR_SEM_DESTROYED;
415
416 ASMNopPause();
417 u64State = ASMAtomicReadU64(&pThis->u64State);
418 u64OldState = u64State;
419 }
420
421 /* got it! */
422 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT));
423 return VINF_SUCCESS;
424
425}
426
427
428#undef RTSemRWRequestRead
429RTDECL(int) RTSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
430{
431#ifndef RTSEMRW_STRICT
432 return rtSemRWRequestRead(hRWSem, cMillies, false, NULL);
433#else
434 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
435 return rtSemRWRequestRead(hRWSem, cMillies, false, &SrcPos);
436#endif
437}
438RT_EXPORT_SYMBOL(RTSemRWRequestRead);
439
440
441RTDECL(int) RTSemRWRequestReadDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
442{
443 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
444 return rtSemRWRequestRead(hRWSem, cMillies, false, &SrcPos);
445}
446RT_EXPORT_SYMBOL(RTSemRWRequestReadDebug);
447
448
449#undef RTSemRWRequestReadNoResume
450RTDECL(int) RTSemRWRequestReadNoResume(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
451{
452#ifndef RTSEMRW_STRICT
453 return rtSemRWRequestRead(hRWSem, cMillies, true, NULL);
454#else
455 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
456 return rtSemRWRequestRead(hRWSem, cMillies, true, &SrcPos);
457#endif
458}
459RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResume);
460
461
462RTDECL(int) RTSemRWRequestReadNoResumeDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
463{
464 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
465 return rtSemRWRequestRead(hRWSem, cMillies, true, &SrcPos);
466}
467RT_EXPORT_SYMBOL(RTSemRWRequestReadNoResumeDebug);
468
469
470
471RTDECL(int) RTSemRWReleaseRead(RTSEMRW hRWSem)
472{
473 /*
474 * Validate handle.
475 */
476 RTSEMRWINTERNAL *pThis = hRWSem;
477 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
478 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
479
480 /*
481 * Check the direction and take action accordingly.
482 */
483 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
484 uint64_t u64OldState = u64State;
485 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
486 {
487#ifdef RTSEMRW_STRICT
488 int rc9 = RTLockValidatorRecSharedCheckAndRelease(&pThis->ValidatorRead, NIL_RTTHREAD);
489 if (RT_FAILURE(rc9))
490 return rc9;
491#endif
492 for (;;)
493 {
494 uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
495 AssertReturn(c > 0, VERR_NOT_OWNER);
496 c--;
497
498 if ( c > 0
499 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
500 {
501 /* Don't change the direction. */
502 u64State &= ~RTSEMRW_CNT_RD_MASK;
503 u64State |= c << RTSEMRW_CNT_RD_SHIFT;
504 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
505 break;
506 }
507 else
508 {
509 /* Reverse the direction and signal the reader threads. */
510 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_DIR_MASK);
511 u64State |= RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT;
512 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
513 {
514 int rc = RTSemEventSignal(pThis->hEvtWrite);
515 AssertRC(rc);
516 break;
517 }
518 }
519
520 ASMNopPause();
521 u64State = ASMAtomicReadU64(&pThis->u64State);
522 u64OldState = u64State;
523 }
524 }
525 else
526 {
527 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
528 RTNATIVETHREAD hNativeWriter;
529 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
530 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
531 AssertReturn(pThis->cWriterReads > 0, VERR_NOT_OWNER);
532#ifdef RTSEMRW_STRICT
533 int rc = RTLockValidatorRecExclUnwindMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core);
534 if (RT_FAILURE(rc))
535 return rc;
536#endif
537 ASMAtomicDecU32(&pThis->cWriterReads);
538 }
539
540 return VINF_SUCCESS;
541}
542RT_EXPORT_SYMBOL(RTSemRWReleaseRead);
543
544
545DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
546{
547 /*
548 * Validate input.
549 */
550 RTSEMRWINTERNAL *pThis = hRWSem;
551 if (pThis == NIL_RTSEMRW)
552 return VINF_SUCCESS;
553 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
554 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
555
556#ifdef RTSEMRW_STRICT
557 RTTHREAD hThreadSelf = NIL_RTTHREAD;
558 if (cMillies)
559 {
560 hThreadSelf = RTThreadSelfAutoAdopt();
561 int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
562 if (RT_FAILURE(rc9))
563 return rc9;
564 }
565#endif
566
567 /*
568 * Check if we're already the owner and just recursing.
569 */
570 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
571 RTNATIVETHREAD hNativeWriter;
572 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
573 if (hNativeSelf == hNativeWriter)
574 {
575 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
576#ifdef RTSEMRW_STRICT
577 int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos);
578 if (RT_FAILURE(rc9))
579 return rc9;
580#endif
581 Assert(pThis->cWriteRecursions < UINT32_MAX / 2);
582 ASMAtomicIncU32(&pThis->cWriteRecursions);
583 return VINF_SUCCESS;
584 }
585
586 /*
587 * Get cracking.
588 */
589 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
590 uint64_t u64OldState = u64State;
591
592 for (;;)
593 {
594 if ( (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
595 || (u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) != 0)
596 {
597 /* It flows in the right direction, try follow it before it changes. */
598 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
599 c++;
600 Assert(c < RTSEMRW_CNT_MASK / 2);
601 u64State &= ~RTSEMRW_CNT_WR_MASK;
602 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
603 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
604 break;
605 }
606 else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
607 {
608 /* Wrong direction, but we're alone here and can simply try switch the direction. */
609 u64State &= ~(RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
610 u64State |= (UINT64_C(1) << RTSEMRW_CNT_WR_SHIFT) | (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT);
611 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
612 break;
613 }
614 else if (!cMillies)
615 /* Wrong direction and we're not supposed to wait, just return. */
616 return VERR_TIMEOUT;
617 else
618 {
619 /* Add ourselves to the write count and break out to do the wait. */
620 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
621 c++;
622 Assert(c < RTSEMRW_CNT_MASK / 2);
623 u64State &= ~RTSEMRW_CNT_WR_MASK;
624 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
625 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
626 break;
627 }
628
629 if (pThis->u32Magic != RTSEMRW_MAGIC)
630 return VERR_SEM_DESTROYED;
631
632 ASMNopPause();
633 u64State = ASMAtomicReadU64(&pThis->u64State);
634 u64OldState = u64State;
635 }
636
637 /*
638 * If we're in write mode now try grab the ownership. Play fair if there
639 * are threads already waiting.
640 */
641 bool fDone = (u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT)
642 && ( ((u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT) == 1
643 || cMillies == 0);
644 if (fDone)
645 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
646 if (!fDone)
647 {
648 /*
649 * Wait for our turn.
650 */
651 for (uint32_t iLoop = 0; ; iLoop++)
652 {
653 int rc;
654#ifdef RTSEMRW_STRICT
655 if (cMillies)
656 {
657 if (hThreadSelf == NIL_RTTHREAD)
658 hThreadSelf = RTThreadSelfAutoAdopt();
659 rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
660 cMillies, RTTHREADSTATE_RW_WRITE, false);
661 }
662 else
663 rc = VINF_SUCCESS;
664 if (RT_SUCCESS(rc))
665#else
666 RTTHREAD hThreadSelf = RTThreadSelf();
667 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
668#endif
669 {
670 if (fInterruptible)
671 rc = RTSemEventWaitNoResume(pThis->hEvtWrite, cMillies);
672 else
673 rc = RTSemEventWait(pThis->hEvtWrite, cMillies);
674 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
675 if (pThis->u32Magic != RTSEMRW_MAGIC)
676 return VERR_SEM_DESTROYED;
677 }
678 if (RT_FAILURE(rc))
679 {
680 /* Decrement the counts and return the error. */
681 for (;;)
682 {
683 u64OldState = u64State = ASMAtomicReadU64(&pThis->u64State);
684 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT; Assert(c > 0);
685 c--;
686 u64State &= ~RTSEMRW_CNT_WR_MASK;
687 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
688 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
689 break;
690 }
691 return rc;
692 }
693
694 u64State = ASMAtomicReadU64(&pThis->u64State);
695 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT))
696 {
697 ASMAtomicCmpXchgHandle(&pThis->hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
698 if (fDone)
699 break;
700 }
701 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
702 }
703 }
704
705 /*
706 * Got it!
707 */
708 Assert((ASMAtomicReadU64(&pThis->u64State) & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT));
709 ASMAtomicWriteU32(&pThis->cWriteRecursions, 1);
710 Assert(pThis->cWriterReads == 0);
711#ifdef RTSEMRW_STRICT
712 RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
713#endif
714
715 return VINF_SUCCESS;
716}
717
718
719#undef RTSemRWRequestWrite
720RTDECL(int) RTSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
721{
722#ifndef RTSEMRW_STRICT
723 return rtSemRWRequestWrite(hRWSem, cMillies, false, NULL);
724#else
725 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
726 return rtSemRWRequestWrite(hRWSem, cMillies, false, &SrcPos);
727#endif
728}
729RT_EXPORT_SYMBOL(RTSemRWRequestWrite);
730
731
732RTDECL(int) RTSemRWRequestWriteDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
733{
734 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
735 return rtSemRWRequestWrite(hRWSem, cMillies, false, &SrcPos);
736}
737RT_EXPORT_SYMBOL(RTSemRWRequestWriteDebug);
738
739
740#undef RTSemRWRequestWriteNoResume
741RTDECL(int) RTSemRWRequestWriteNoResume(RTSEMRW hRWSem, RTMSINTERVAL cMillies)
742{
743#ifndef RTSEMRW_STRICT
744 return rtSemRWRequestWrite(hRWSem, cMillies, true, NULL);
745#else
746 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
747 return rtSemRWRequestWrite(hRWSem, cMillies, true, &SrcPos);
748#endif
749}
750RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResume);
751
752
753RTDECL(int) RTSemRWRequestWriteNoResumeDebug(RTSEMRW hRWSem, RTMSINTERVAL cMillies, RTHCUINTPTR uId, RT_SRC_POS_DECL)
754{
755 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
756 return rtSemRWRequestWrite(hRWSem, cMillies, true, &SrcPos);
757}
758RT_EXPORT_SYMBOL(RTSemRWRequestWriteNoResumeDebug);
759
760
761RTDECL(int) RTSemRWReleaseWrite(RTSEMRW hRWSem)
762{
763
764 /*
765 * Validate handle.
766 */
767 struct RTSEMRWINTERNAL *pThis = hRWSem;
768 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
769 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);
770
771 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
772 RTNATIVETHREAD hNativeWriter;
773 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
774 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
775
776 /*
777 * Unwind a recursion.
778 */
779 if (pThis->cWriteRecursions == 1)
780 {
781 AssertReturn(pThis->cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
782#ifdef RTSEMRW_STRICT
783 int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorWrite, true);
784 if (RT_FAILURE(rc9))
785 return rc9;
786#endif
787 /*
788 * Update the state.
789 */
790 ASMAtomicWriteU32(&pThis->cWriteRecursions, 0);
791 ASMAtomicWriteHandle(&pThis->hNativeWriter, NIL_RTNATIVETHREAD);
792
793 for (;;)
794 {
795 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
796 uint64_t u64OldState = u64State;
797
798 uint64_t c = (u64State & RTSEMRW_CNT_WR_MASK) >> RTSEMRW_CNT_WR_SHIFT;
799 Assert(c > 0);
800 c--;
801
802 if ( c > 0
803 || (u64State & RTSEMRW_CNT_RD_MASK) == 0)
804 {
805 /* Don't change the direction, wait up the next writer if any. */
806 u64State &= ~RTSEMRW_CNT_WR_MASK;
807 u64State |= c << RTSEMRW_CNT_WR_SHIFT;
808 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
809 {
810 if (c > 0)
811 {
812 int rc = RTSemEventSignal(pThis->hEvtWrite);
813 AssertRC(rc);
814 }
815 break;
816 }
817 }
818 else
819 {
820 /* Reverse the direction and signal the reader threads. */
821 u64State &= ~(RTSEMRW_CNT_WR_MASK | RTSEMRW_DIR_MASK);
822 u64State |= RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT;
823 if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
824 {
825 Assert(!pThis->fNeedReset);
826 ASMAtomicWriteBool(&pThis->fNeedReset, true);
827 int rc = RTSemEventMultiSignal(pThis->hEvtRead);
828 AssertRC(rc);
829 break;
830 }
831 }
832
833 ASMNopPause();
834 if (pThis->u32Magic != RTSEMRW_MAGIC)
835 return VERR_SEM_DESTROYED;
836 }
837 }
838 else
839 {
840 Assert(pThis->cWriteRecursions != 0);
841#ifdef RTSEMRW_STRICT
842 int rc9 = RTLockValidatorRecExclUnwind(&pThis->ValidatorWrite);
843 if (RT_FAILURE(rc9))
844 return rc9;
845#endif
846 ASMAtomicDecU32(&pThis->cWriteRecursions);
847 }
848
849 return VINF_SUCCESS;
850}
851RT_EXPORT_SYMBOL(RTSemRWReleaseWrite);
852
853
854RTDECL(bool) RTSemRWIsWriteOwner(RTSEMRW hRWSem)
855{
856 /*
857 * Validate handle.
858 */
859 struct RTSEMRWINTERNAL *pThis = hRWSem;
860 AssertPtrReturn(pThis, false);
861 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, false);
862
863 /*
864 * Check ownership.
865 */
866 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
867 RTNATIVETHREAD hNativeWriter;
868 ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
869 return hNativeWriter == hNativeSelf;
870}
871RT_EXPORT_SYMBOL(RTSemRWIsWriteOwner);
872
873
874RTDECL(bool) RTSemRWIsReadOwner(RTSEMRW hRWSem, bool fWannaHear)
875{
876 /*
877 * Validate handle.
878 */
879 struct RTSEMRWINTERNAL *pThis = hRWSem;
880 AssertPtrReturn(pThis, false);
881 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, false);
882
883 /*
884 * Inspect the state.
885 */
886 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
887 if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_WRITE << RTSEMRW_DIR_SHIFT))
888 {
889 /*
890 * It's in write mode, so we can only be a reader if we're also the
891 * current writer.
892 */
893 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
894 RTNATIVETHREAD hWriter;
895 ASMAtomicUoReadHandle(&pThis->hWriter, &hWriter);
896 return hWriter == hNativeSelf;
897 }
898
899 /*
900 * Read mode. If there are no current readers, then we cannot be a reader.
901 */
902 if (!(u64State & RTSEMRW_CNT_RD_MASK))
903 return false;
904
905#ifdef RTSEMRW_STRICT
906 /*
907 * Ask the lock validator.
908 */
909 return RTLockValidatorRecSharedIsOwner(&pThis->ValidatorRead, NIL_RTTHREAD);
910#else
911 /*
912 * Ok, we don't know, just tell the caller what he want to hear.
913 */
914 return fWannaHear;
915#endif
916}
917RT_EXPORT_SYMBOL(RTSemRWIsReadOwner);
918
919
920RTDECL(uint32_t) RTSemRWGetWriteRecursion(RTSEMRW hRWSem)
921{
922 /*
923 * Validate handle.
924 */
925 struct RTSEMRWINTERNAL *pThis = hRWSem;
926 AssertPtrReturn(pThis, 0);
927 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, 0);
928
929 /*
930 * Return the requested data.
931 */
932 return pThis->cWriteRecursions;
933}
934RT_EXPORT_SYMBOL(RTSemRWGetWriteRecursion);
935
936
937RTDECL(uint32_t) RTSemRWGetWriterReadRecursion(RTSEMRW hRWSem)
938{
939 /*
940 * Validate handle.
941 */
942 struct RTSEMRWINTERNAL *pThis = hRWSem;
943 AssertPtrReturn(pThis, 0);
944 AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, 0);
945
946 /*
947 * Return the requested data.
948 */
949 return pThis->cWriterReads;
950}
951RT_EXPORT_SYMBOL(RTSemRWGetWriterReadRecursion);
952
953
954RTDECL(uint32_t) RTSemRWGetReadCount(RTSEMRW hRWSem)
955{
956 /*
957 * Validate input.
958 */
959 struct RTSEMRWINTERNAL *pThis = hRWSem;
960 AssertPtrReturn(pThis, 0);
961 AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC,
962 ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
963 0);
964
965 /*
966 * Return the requested data.
967 */
968 uint64_t u64State = ASMAtomicReadU64(&pThis->u64State);
969 if ((u64State & RTSEMRW_DIR_MASK) != (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
970 return 0;
971 return (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
972}
973RT_EXPORT_SYMBOL(RTSemRWGetReadCount);
974
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette