VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 25607

Last change on this file since 25607 was 25607, checked in by vboxsync, 15 years ago

iprt,pdmcritsect: Shortening and cleaning up the lock validator structure names.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 57.4 KB
Line 
1/* $Id: lockvalidator.cpp 25607 2009-12-31 13:21:39Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include <iprt/lockvalidator.h>
35#include "internal/iprt.h"
36
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/err.h>
40#include <iprt/mem.h>
41#include <iprt/once.h>
42#include <iprt/semaphore.h>
43#include <iprt/string.h>
44#include <iprt/thread.h>
45
46#include "internal/lockvalidator.h"
47#include "internal/magics.h"
48#include "internal/thread.h"
49
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * Deadlock detection stack entry.
56 */
57typedef struct RTLOCKVALIDATORDDENTRY
58{
59 /** The current record. */
60 PRTLOCKVALRECUNION pRec;
61 /** The current entry number if pRec is a shared one. */
62 uint32_t iEntry;
63 /** The thread state of the thread we followed to get to pFirstSibling.
64 * This is only used for validating a deadlock stack. */
65 RTTHREADSTATE enmState;
66 /** The thread we followed to get to pFirstSibling.
67 * This is only used for validating a deadlock stack. */
68 PRTTHREADINT pThread;
69 /** What pThread is waiting on, i.e. where we entered the circular list of
70 * siblings. This is used for validating a deadlock stack as well as
71 * terminating the sibling walk. */
72 PRTLOCKVALRECUNION pFirstSibling;
73} RTLOCKVALIDATORDDENTRY;
74
75
76/**
77 * Deadlock detection stack.
78 */
79typedef struct RTLOCKVALIDATORDDSTACK
80{
81 /** The number stack entries. */
82 uint32_t c;
83 /** The stack entries. */
84 RTLOCKVALIDATORDDENTRY a[32];
85} RTLOCKVALIDATORDDSTACK;
86/** Pointer to a deadlock detction stack. */
87typedef RTLOCKVALIDATORDDSTACK *PRTLOCKVALIDATORDDSTACK;
88
89
90/*******************************************************************************
91* Global Variables *
92*******************************************************************************/
93/** Serializing object destruction and deadlock detection.
94 * NS: RTLOCKVALIDATORREC* and RTTHREADINT destruction.
95 * EW: Deadlock detection.
96 */
97static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
98/** Whether the lock validator is enabled or disabled.
99 * Only applies to new locks. */
100static bool volatile g_fLockValidatorEnabled = true;
101/** Set if the lock validator is quiet. */
102#ifdef RT_STRICT
103static bool volatile g_fLockValidatorQuiet = false;
104#else
105static bool volatile g_fLockValidatorQuiet = true;
106#endif
107/** Set if the lock validator may panic. */
108#ifdef RT_STRICT
109static bool volatile g_fLockValidatorMayPanic = true;
110#else
111static bool volatile g_fLockValidatorMayPanic = false;
112#endif
113
114
115/** Wrapper around ASMAtomicReadPtr. */
116DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
117{
118 return (PRTLOCKVALRECUNION)ASMAtomicReadPtr((void * volatile *)ppRec);
119}
120
121
122/** Wrapper around ASMAtomicWritePtr. */
123DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
124{
125 ASMAtomicWritePtr((void * volatile *)ppRec, pRecNew);
126}
127
128
129/** Wrapper around ASMAtomicReadPtr. */
130DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
131{
132 return (PRTTHREADINT)ASMAtomicReadPtr((void * volatile *)phThread);
133}
134
135
136/** Wrapper around ASMAtomicUoReadPtr. */
137DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
138{
139 return (PRTLOCKVALRECSHRDOWN)ASMAtomicUoReadPtr((void * volatile *)ppOwner);
140}
141
142
143/**
144 * Reads a volatile thread handle field and returns the thread name.
145 *
146 * @returns Thread name (read only).
147 * @param phThread The thread handle field.
148 */
149static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
150{
151 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
152 if (!pThread)
153 return "<NIL>";
154 if (!VALID_PTR(pThread))
155 return "<INVALID>";
156 if (pThread->u32Magic != RTTHREADINT_MAGIC)
157 return "<BAD-THREAD-MAGIC>";
158 return pThread->szName;
159}
160
161
162/**
163 * Launch a simple assertion like complaint w/ panic.
164 *
165 * @param RT_SRC_POS_DECL Where from.
166 * @param pszWhat What we're complaining about.
167 * @param ... Format arguments.
168 */
169static void rtLockValidatorComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
170{
171 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
172 {
173 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
174 va_list va;
175 va_start(va, pszWhat);
176 RTAssertMsg2WeakV(pszWhat, va);
177 va_end(va);
178 }
179 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
180 RTAssertPanic();
181}
182
183
184/**
185 * Describes the lock.
186 *
187 * @param pszPrefix Message prefix.
188 * @param Rec The lock record we're working on.
189 * @param pszPrefix Message suffix.
190 */
191static void rtLockValidatorComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
192{
193 if ( VALID_PTR(pRec)
194 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
195 {
196 switch (pRec->Core.u32Magic)
197 {
198 case RTLOCKVALRECEXCL_MAGIC:
199 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s nest=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
200 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
201 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), pRec->Excl.cRecursion,
202 pRec->Excl.SrcPos.pszFile, pRec->Excl.SrcPos.uLine, pRec->Excl.SrcPos.pszFunction, pRec->Excl.SrcPos.uId,
203 pszSuffix);
204 break;
205
206 case RTLOCKVALRECSHRD_MAGIC:
207 RTAssertMsg2AddWeak("%s%p %s srec=%p%s", pszPrefix,
208 pRec->Shared.hLock, pRec->Shared.pszName, pRec,
209 pszSuffix);
210 break;
211
212 case RTLOCKVALRECSHRDOWN_MAGIC:
213 {
214 PRTLOCKVALRECSHRD pShared = pRec->SharedOwn.pSharedRec;
215 if ( VALID_PTR(pShared)
216 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
217 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p thr=%s nest=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
218 pShared->hLock, pShared->pszName, pShared,
219 pRec, rtLockValidatorNameThreadHandle(&pRec->SharedOwn.hThread), pRec->SharedOwn.cRecursion,
220 pRec->SharedOwn.SrcPos.pszFile, pRec->SharedOwn.SrcPos.uLine, pRec->SharedOwn.SrcPos.pszFunction, pRec->SharedOwn.SrcPos.uId,
221 pszSuffix);
222 else
223 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p thr=%s nest=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
224 pShared,
225 pRec, rtLockValidatorNameThreadHandle(&pRec->SharedOwn.hThread), pRec->SharedOwn.cRecursion,
226 pRec->SharedOwn.SrcPos.pszFile, pRec->SharedOwn.SrcPos.uLine, pRec->SharedOwn.SrcPos.pszFunction, pRec->SharedOwn.SrcPos.uId,
227 pszSuffix);
228 break;
229 }
230
231 default:
232 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
233 break;
234 }
235 }
236}
237
238
239/**
240 * Launch the initial complaint.
241 *
242 * @param pszWhat What we're complaining about.
243 * @param pSrcPos Where we are complaining from, as it were.
244 * @param pThreadSelf The calling thread.
245 * @param pRec The main lock involved. Can be NULL.
246 */
247static void rtLockValidatorComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
248{
249 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
250 {
251 RTAssertMsg1Weak("RTLockValidator", pSrcPos->uLine, pSrcPos->pszFile, pSrcPos->pszFunction);
252 if (pSrcPos->uId)
253 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
254 else
255 RTAssertMsg2Weak("%s\n", pszWhat, pSrcPos->uId);
256 rtLockValidatorComplainAboutLock("Lock: ", pRec, "\n");
257 }
258}
259
260
261/**
262 * Continue bitching.
263 *
264 * @param pszFormat Format string.
265 * @param ... Format arguments.
266 */
267static void rtLockValidatorComplainMore(const char *pszFormat, ...)
268{
269 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
270 {
271 va_list va;
272 va_start(va, pszFormat);
273 RTAssertMsg2AddWeakV(pszFormat, va);
274 va_end(va);
275 }
276}
277
278
279/**
280 * Raise a panic if enabled.
281 */
282static void rtLockValidatorComplainPanic(void)
283{
284 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
285 RTAssertPanic();
286}
287
288
289/**
290 * Copy a source position record.
291 *
292 * @param pDst The destination.
293 * @param pSrc The source.
294 */
295DECL_FORCE_INLINE(void) rtLockValidatorCopySrcPos(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
296{
297 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
298 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFile, pSrc->pszFile);
299 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFunction, pSrc->pszFunction);
300 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
301}
302
303
304/**
305 * Init a source position record.
306 *
307 * @param pSrcPos The source position record.
308 */
309DECL_FORCE_INLINE(void) rtLockValidatorInitSrcPos(PRTLOCKVALSRCPOS pSrcPos)
310{
311 pSrcPos->pszFile = NULL;
312 pSrcPos->pszFunction = NULL;
313 pSrcPos->uId = 0;
314 pSrcPos->uLine = 0;
315#if HC_ARCH_BITS == 64
316 pSrcPos->u32Padding = 0;
317#endif
318}
319
320
321/**
322 * Serializes destruction of RTLOCKVALIDATORREC* and RTTHREADINT structures.
323 */
324DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
325{
326 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
327 if (hXRoads != NIL_RTSEMXROADS)
328 RTSemXRoadsNSEnter(hXRoads);
329}
330
331
332/**
333 * Call after rtLockValidatorSerializeDestructEnter.
334 */
335DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
336{
337 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
338 if (hXRoads != NIL_RTSEMXROADS)
339 RTSemXRoadsNSLeave(hXRoads);
340}
341
342
343/**
344 * Serializes deadlock detection against destruction of the objects being
345 * inspected.
346 */
347DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
348{
349 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
350 if (hXRoads != NIL_RTSEMXROADS)
351 RTSemXRoadsEWEnter(hXRoads);
352}
353
354
355/**
356 * Call after rtLockValidatorSerializeDetectionEnter.
357 */
358DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
359{
360 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
361 if (hXRoads != NIL_RTSEMXROADS)
362 RTSemXRoadsEWLeave(hXRoads);
363}
364
365
366/**
367 * Unlinks all siblings.
368 *
369 * This is used during record deletion and assumes no races.
370 *
371 * @param pCore One of the siblings.
372 */
373static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
374{
375 /* ASSUMES sibling destruction doesn't involve any races and that all
376 related records are to be disposed off now. */
377 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
378 while (pSibling)
379 {
380 PRTLOCKVALRECUNION volatile *ppCoreNext;
381 switch (pSibling->Core.u32Magic)
382 {
383 case RTLOCKVALRECEXCL_MAGIC:
384 case RTLOCKVALRECEXCL_MAGIC_DEAD:
385 ppCoreNext = &pSibling->Excl.pSibling;
386 break;
387
388 case RTLOCKVALRECSHRD_MAGIC:
389 case RTLOCKVALRECSHRD_MAGIC_DEAD:
390 ppCoreNext = &pSibling->Shared.pSibling;
391 break;
392
393 default:
394 AssertFailed();
395 ppCoreNext = NULL;
396 break;
397 }
398 if (RT_UNLIKELY(ppCoreNext))
399 break;
400 pSibling = (PRTLOCKVALRECUNION)ASMAtomicXchgPtr((void * volatile *)ppCoreNext, NULL);
401 }
402}
403
404
405RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALIDATORCLASS hClass,
406 uint32_t uSubClass, const char *pszName, void *hLock)
407{
408 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
409 pRec->fEnabled = RTLockValidatorIsEnabled();
410 pRec->afReserved[0] = 0;
411 pRec->afReserved[1] = 0;
412 pRec->afReserved[2] = 0;
413 rtLockValidatorInitSrcPos(&pRec->SrcPos);
414 pRec->hThread = NIL_RTTHREAD;
415 pRec->pDown = NULL;
416 pRec->hClass = hClass;
417 pRec->uSubClass = uSubClass;
418 pRec->cRecursion = 0;
419 pRec->hLock = hLock;
420 pRec->pszName = pszName;
421 pRec->pSibling = NULL;
422
423 /* Lazily initialize the crossroads semaphore. */
424 static uint32_t volatile s_fInitializing = false;
425 if (RT_UNLIKELY( g_hLockValidatorXRoads == NIL_RTSEMXROADS
426 && ASMAtomicCmpXchgU32(&s_fInitializing, true, false)))
427 {
428 RTSEMXROADS hXRoads;
429 int rc = RTSemXRoadsCreate(&hXRoads);
430 if (RT_SUCCESS(rc))
431 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
432 ASMAtomicWriteU32(&s_fInitializing, false);
433 }
434}
435
436
437RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALIDATORCLASS hClass,
438 uint32_t uSubClass, const char *pszName, void *pvLock)
439{
440 PRTLOCKVALRECEXCL pRec;
441 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
442 if (!pRec)
443 return VERR_NO_MEMORY;
444
445 RTLockValidatorRecExclInit(pRec, hClass, uSubClass, pszName, pvLock);
446
447 return VINF_SUCCESS;
448}
449
450
451RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
452{
453 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
454
455 rtLockValidatorSerializeDestructEnter();
456
457 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
458 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
459 ASMAtomicWriteHandle(&pRec->hClass, NIL_RTLOCKVALIDATORCLASS);
460 if (pRec->pSibling)
461 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
462 rtLockValidatorSerializeDestructLeave();
463}
464
465
466RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
467{
468 PRTLOCKVALRECEXCL pRec = *ppRec;
469 *ppRec = NULL;
470 if (pRec)
471 {
472 RTLockValidatorRecExclDelete(pRec);
473 RTMemFree(pRec);
474 }
475}
476
477
478RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALIDATORCLASS hClass,
479 uint32_t uSubClass, const char *pszName, void *hLock)
480{
481 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
482 pRec->uSubClass = uSubClass;
483 pRec->hClass = hClass;
484 pRec->hLock = hLock;
485 pRec->pszName = pszName;
486 pRec->fEnabled = RTLockValidatorIsEnabled();
487 pRec->pSibling = NULL;
488
489 /* the table */
490 pRec->cEntries = 0;
491 pRec->iLastEntry = 0;
492 pRec->cAllocated = 0;
493 pRec->fReallocating = false;
494 pRec->afPadding[0] = false;
495 pRec->afPadding[1] = false;
496 pRec->papOwners = NULL;
497#if HC_ARCH_BITS == 32
498 pRec->u32Alignment = UINT32_MAX;
499#endif
500}
501
502
503RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
504{
505 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
506
507 /*
508 * Flip it into table realloc mode and take the destruction lock.
509 */
510 rtLockValidatorSerializeDestructEnter();
511 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
512 {
513 rtLockValidatorSerializeDestructLeave();
514
515 rtLockValidatorSerializeDetectionEnter();
516 rtLockValidatorSerializeDetectionLeave();
517
518 rtLockValidatorSerializeDestructEnter();
519 }
520
521 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
522 ASMAtomicUoWriteHandle(&pRec->hClass, NIL_RTLOCKVALIDATORCLASS);
523 if (pRec->papOwners)
524 {
525 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
526 ASMAtomicUoWritePtr((void * volatile *)&pRec->papOwners, NULL);
527 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
528
529 RTMemFree((void *)pRec->papOwners);
530 }
531 if (pRec->pSibling)
532 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
533 ASMAtomicWriteBool(&pRec->fReallocating, false);
534
535 rtLockValidatorSerializeDestructLeave();
536}
537
538
539/**
540 * Locates a thread in a shared lock record.
541 *
542 * @returns Pointer to the thread record on success, NULL on failure..
543 * @param pShared The shared lock record.
544 * @param hThread The thread to find.
545 * @param piEntry Where to optionally return the table in index.
546 */
547DECLINLINE(PRTLOCKVALRECSHRDOWN)
548rtLockValidatorSharedRecFindThread(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
549{
550 rtLockValidatorSerializeDetectionEnter();
551 if (pShared->papOwners)
552 {
553 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
554 uint32_t const cMax = pShared->cAllocated;
555 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
556 {
557 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
558 if (pEntry && pEntry->hThread == hThread)
559 {
560 rtLockValidatorSerializeDetectionLeave();
561 if (piEntry)
562 *piEntry = iEntry;
563 return pEntry;
564 }
565 }
566 }
567 rtLockValidatorSerializeDetectionLeave();
568 return NULL;
569}
570
571
572/**
573 * Allocates and initializes a thread entry for the shared lock record.
574 *
575 * @returns The new thread entry.
576 * @param pShared The shared lock record.
577 * @param hThread The thread handle.
578 * @param pSrcPos The source position.
579 */
580DECLINLINE(PRTLOCKVALRECSHRDOWN)
581rtLockValidatorSharedRecAllocThread(PRTLOCKVALRECSHRD pRead, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
582{
583 PRTLOCKVALRECSHRDOWN pEntry;
584
585 pEntry = (PRTLOCKVALRECSHRDOWN)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
586 if (pEntry)
587 {
588 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
589 pEntry->cRecursion = 1;
590 pEntry->hThread = hThread;
591 pEntry->pDown = NULL;
592 pEntry->pSharedRec = pRead;
593#if HC_ARCH_BITS == 32
594 pEntry->pvReserved = NULL;
595#endif
596 if (pSrcPos)
597 pEntry->SrcPos = *pSrcPos;
598 else
599 rtLockValidatorInitSrcPos(&pEntry->SrcPos);
600 }
601
602 return pEntry;
603}
604
605/**
606 * Frees a thread entry allocated by rtLockValidatorSharedRecAllocThread.
607 *
608 * @param pEntry The thread entry.
609 */
610DECLINLINE(void) rtLockValidatorSharedRecFreeThread(PRTLOCKVALRECSHRDOWN pEntry)
611{
612 if (pEntry)
613 {
614 rtLockValidatorSerializeDestructEnter();
615 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
616 ASMAtomicWriteHandle(&pEntry->hThread, NIL_RTTHREAD);
617 rtLockValidatorSerializeDestructLeave();
618
619 RTMemFree(pEntry);
620 }
621}
622
623
624/**
625 * Make more room in the table.
626 *
627 * @retval true on success
628 * @retval false if we're out of memory or running into a bad race condition
629 * (probably a bug somewhere). No longer holding the lock.
630 *
631 * @param pShared The shared lock record.
632 */
633static bool rtLockValidatorSharedRecMakeRoom(PRTLOCKVALRECSHRD pShared)
634{
635 for (unsigned i = 0; i < 1000; i++)
636 {
637 /*
638 * Switch to the other data access direction.
639 */
640 rtLockValidatorSerializeDetectionLeave();
641 if (i >= 10)
642 {
643 Assert(i != 10 && i != 100);
644 RTThreadSleep(i >= 100);
645 }
646 rtLockValidatorSerializeDestructEnter();
647
648 /*
649 * Try grab the privilege to reallocating the table.
650 */
651 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
652 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
653 {
654 uint32_t cAllocated = pShared->cAllocated;
655 if (cAllocated < pShared->cEntries)
656 {
657 /*
658 * Ok, still not enough space. Reallocate the table.
659 */
660#if 0 /** @todo enable this after making sure growing works flawlessly. */
661 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
662#else
663 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
664#endif
665 PRTLOCKVALRECSHRDOWN *papOwners;
666 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
667 (cAllocated + cInc) * sizeof(void *));
668 if (!papOwners)
669 {
670 ASMAtomicWriteBool(&pShared->fReallocating, false);
671 rtLockValidatorSerializeDestructLeave();
672 /* RTMemRealloc will assert */
673 return false;
674 }
675
676 while (cInc-- > 0)
677 {
678 papOwners[cAllocated] = NULL;
679 cAllocated++;
680 }
681
682 ASMAtomicWritePtr((void * volatile *)&pShared->papOwners, papOwners);
683 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
684 }
685 ASMAtomicWriteBool(&pShared->fReallocating, false);
686 }
687 rtLockValidatorSerializeDestructLeave();
688
689 rtLockValidatorSerializeDetectionEnter();
690 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
691 break;
692
693 if (pShared->cAllocated >= pShared->cEntries)
694 return true;
695 }
696
697 rtLockValidatorSerializeDetectionLeave();
698 AssertFailed(); /* too many iterations or destroyed while racing. */
699 return false;
700}
701
702
703/**
704 * Adds a thread entry to a shared lock record.
705 *
706 * @returns true on success, false on serious race or we're if out of memory.
707 * @param pShared The shared lock record.
708 * @param pEntry The thread entry.
709 */
710DECLINLINE(bool) rtLockValidatorSharedRecAddThread(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
711{
712 rtLockValidatorSerializeDetectionEnter();
713 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
714 {
715 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
716 && !rtLockValidatorSharedRecMakeRoom(pShared))
717 return false; /* the worker leave the lock */
718
719 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
720 uint32_t const cMax = pShared->cAllocated;
721 for (unsigned i = 0; i < 100; i++)
722 {
723 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
724 {
725 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], pEntry, NULL))
726 {
727 rtLockValidatorSerializeDetectionLeave();
728 return true;
729 }
730 }
731 Assert(i != 25);
732 }
733 AssertFailed();
734 }
735 rtLockValidatorSerializeDetectionLeave();
736 return false;
737}
738
739
740/**
741 * Remove a thread entry from a shared lock record and free it.
742 *
743 * @param pShared The shared lock record.
744 * @param pEntry The thread entry to remove.
745 * @param iEntry The last known index.
746 */
747DECLINLINE(void) rtLockValidatorSharedRecRemoveAndFree(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
748 uint32_t iEntry)
749{
750 /*
751 * Remove it from the table.
752 */
753 rtLockValidatorSerializeDetectionEnter();
754 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
755 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
756 || !ASMAtomicCmpXchgPtr((void * volatile *)&pShared->papOwners[iEntry], NULL, pEntry)))
757 {
758 /* this shouldn't happen yet... */
759 AssertFailed();
760 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
761 uint32_t const cMax = pShared->cAllocated;
762 for (iEntry = 0; iEntry < cMax; iEntry++)
763 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], NULL, pEntry))
764 break;
765 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
766 }
767 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
768 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
769 rtLockValidatorSerializeDetectionLeave();
770
771 /*
772 * Successfully removed, now free it.
773 */
774 rtLockValidatorSharedRecFreeThread(pEntry);
775}
776
777
778RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
779{
780 /*
781 * Validate input.
782 */
783 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
784 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
785
786 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
787 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
788 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
789 , VERR_SEM_LV_INVALID_PARAMETER);
790
791 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
792 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
793 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
794 , VERR_SEM_LV_INVALID_PARAMETER);
795
796 /*
797 * Link them (circular list).
798 */
799 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
800 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
801 {
802 p1->Excl.pSibling = p2;
803 p2->Shared.pSibling = p1;
804 }
805 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
806 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
807 {
808 p1->Shared.pSibling = p2;
809 p2->Excl.pSibling = p1;
810 }
811 else
812 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
813
814 return VINF_SUCCESS;
815}
816
817
818RTDECL(int) RTLockValidatorCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
819{
820 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
821 if (!pRec->fEnabled)
822 return VINF_SUCCESS;
823
824 /*
825 * Check it locks we're currently holding.
826 */
827 /** @todo later */
828
829 /*
830 * If missing order rules, add them.
831 */
832
833 return VINF_SUCCESS;
834}
835
836
837RTDECL(int) RTLockValidatorCheckAndRelease(PRTLOCKVALRECEXCL pRec)
838{
839 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
840 if (!pRec->fEnabled)
841 return VINF_SUCCESS;
842 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
843
844 RTLockValidatorUnsetOwner(pRec);
845 return VINF_SUCCESS;
846}
847
848
849RTDECL(int) RTLockValidatorCheckAndReleaseReadOwner(PRTLOCKVALRECSHRD pRead, RTTHREAD hThread)
850{
851 AssertReturn(pRead->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
852 if (!pRead->fEnabled)
853 return VINF_SUCCESS;
854 AssertReturn(hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
855
856 /*
857 * Locate the entry for this thread in the table.
858 */
859 uint32_t iEntry = 0;
860 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorSharedRecFindThread(pRead, hThread, &iEntry);
861 AssertReturn(pEntry, VERR_SEM_LV_NOT_OWNER);
862
863 /*
864 * Check the release order.
865 */
866 if (pRead->hClass != NIL_RTLOCKVALIDATORCLASS)
867 {
868 /** @todo order validation */
869 }
870
871 /*
872 * Release the ownership or unwind a level of recursion.
873 */
874 Assert(pEntry->cRecursion > 0);
875 if (pEntry->cRecursion > 1)
876 pEntry->cRecursion--;
877 else
878 rtLockValidatorSharedRecRemoveAndFree(pRead, pEntry, iEntry);
879
880 return VINF_SUCCESS;
881}
882
883
884RTDECL(int) RTLockValidatorRecordRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
885{
886 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
887 if (!pRec->fEnabled)
888 return VINF_SUCCESS;
889 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
890
891 Assert(pRec->cRecursion < _1M);
892 pRec->cRecursion++;
893
894 return VINF_SUCCESS;
895}
896
897
898RTDECL(int) RTLockValidatorUnwindRecursion(PRTLOCKVALRECEXCL pRec)
899{
900 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
901 if (!pRec->fEnabled)
902 return VINF_SUCCESS;
903 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
904 AssertReturn(pRec->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
905
906 Assert(pRec->cRecursion);
907 pRec->cRecursion--;
908 return VINF_SUCCESS;
909}
910
911
912RTDECL(int) RTLockValidatorRecordReadWriteRecursion(PRTLOCKVALRECEXCL pWrite, PRTLOCKVALRECSHRD pRead, PCRTLOCKVALSRCPOS pSrcPos)
913{
914 AssertReturn(pWrite->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
915 AssertReturn(pRead->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
916 AssertReturn(pRead->fEnabled == pWrite->fEnabled, VERR_SEM_LV_INVALID_PARAMETER);
917 if (!pWrite->fEnabled)
918 return VINF_SUCCESS;
919 AssertReturn(pWrite->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
920 AssertReturn(pWrite->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
921
922 Assert(pWrite->cRecursion < _1M);
923 pWrite->cRecursion++;
924
925 return VINF_SUCCESS;
926}
927
928
929RTDECL(int) RTLockValidatorUnwindReadWriteRecursion(PRTLOCKVALRECEXCL pWrite, PRTLOCKVALRECSHRD pRead)
930{
931 AssertReturn(pWrite->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
932 AssertReturn(pRead->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
933 AssertReturn(pRead->fEnabled == pWrite->fEnabled, VERR_SEM_LV_INVALID_PARAMETER);
934 if (!pWrite->fEnabled)
935 return VINF_SUCCESS;
936 AssertReturn(pWrite->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
937 AssertReturn(pWrite->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
938
939 Assert(pWrite->cRecursion);
940 pWrite->cRecursion--;
941 return VINF_SUCCESS;
942}
943
944
945RTDECL(RTTHREAD) RTLockValidatorSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
946{
947 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, NIL_RTTHREAD);
948 if (!pRec->fEnabled)
949 return VINF_SUCCESS;
950 if (hThread == NIL_RTTHREAD)
951 {
952 hThread = RTThreadSelfAutoAdopt();
953 AssertReturn(hThread != NIL_RTTHREAD, hThread);
954 }
955
956 ASMAtomicIncS32(&hThread->LockValidator.cWriteLocks);
957
958 if (pRec->hThread == hThread)
959 pRec->cRecursion++;
960 else
961 {
962 Assert(pRec->hThread == NIL_RTTHREAD);
963
964 /*
965 * Update the record.
966 */
967 rtLockValidatorCopySrcPos(&pRec->SrcPos, pSrcPos);
968 ASMAtomicUoWriteU32(&pRec->cRecursion, 1);
969 ASMAtomicWriteHandle(&pRec->hThread, hThread);
970
971 /*
972 * Push the lock onto the lock stack.
973 */
974 /** @todo push it onto the per-thread lock stack. */
975 }
976
977 return hThread;
978}
979
980
981RTDECL(RTTHREAD) RTLockValidatorUnsetOwner(PRTLOCKVALRECEXCL pRec)
982{
983 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, NIL_RTTHREAD);
984 if (!pRec->fEnabled)
985 return VINF_SUCCESS;
986 RTTHREADINT *pThread = pRec->hThread;
987 AssertReturn(pThread != NIL_RTTHREAD, pThread);
988
989 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
990
991 if (ASMAtomicDecU32(&pRec->cRecursion) == 0)
992 {
993 /*
994 * Pop (remove) the lock.
995 */
996 /** @todo remove it from the per-thread stack/whatever. */
997
998 /*
999 * Update the record.
1000 */
1001 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
1002 }
1003
1004 return pThread;
1005}
1006
1007
1008RTDECL(void) RTLockValidatorAddReadOwner(PRTLOCKVALRECSHRD pRead, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
1009{
1010 AssertReturnVoid(pRead->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
1011 if (!pRead->fEnabled)
1012 return;
1013 AssertReturnVoid(hThread != NIL_RTTHREAD);
1014
1015 /*
1016 * Recursive?
1017 *
1018 * Note! This code can be optimized to try avoid scanning the table on
1019 * insert. However, that's annoying work that makes the code big,
1020 * so it can wait til later sometime.
1021 */
1022 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorSharedRecFindThread(pRead, hThread, NULL);
1023 if (pEntry)
1024 {
1025 pEntry->cRecursion++;
1026 return;
1027 }
1028
1029 /*
1030 * Allocate a new thread entry and insert it into the table.
1031 */
1032 pEntry = rtLockValidatorSharedRecAllocThread(pRead, hThread, pSrcPos);
1033 if ( pEntry
1034 && !rtLockValidatorSharedRecAddThread(pRead, pEntry))
1035 rtLockValidatorSharedRecFreeThread(pEntry);
1036}
1037
1038
1039RTDECL(void) RTLockValidatorRemoveReadOwner(PRTLOCKVALRECSHRD pRead, RTTHREAD hThread)
1040{
1041 AssertReturnVoid(pRead->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
1042 if (!pRead->fEnabled)
1043 return;
1044 AssertReturnVoid(hThread != NIL_RTTHREAD);
1045
1046 /*
1047 * Find the entry hope it's a recursive one.
1048 */
1049 uint32_t iEntry;
1050 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorSharedRecFindThread(pRead, hThread, &iEntry);
1051 AssertReturnVoid(pEntry);
1052 if (pEntry->cRecursion > 1)
1053 pEntry->cRecursion--;
1054 else
1055 rtLockValidatorSharedRecRemoveAndFree(pRead, pEntry, iEntry);
1056}
1057
1058
1059RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
1060{
1061 if (Thread == NIL_RTTHREAD)
1062 return 0;
1063
1064 PRTTHREADINT pThread = rtThreadGet(Thread);
1065 if (!pThread)
1066 return VERR_INVALID_HANDLE;
1067 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
1068 rtThreadRelease(pThread);
1069 return cWriteLocks;
1070}
1071RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
1072
1073
1074RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
1075{
1076 PRTTHREADINT pThread = rtThreadGet(Thread);
1077 AssertReturnVoid(pThread);
1078 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
1079 rtThreadRelease(pThread);
1080}
1081RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
1082
1083
1084RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
1085{
1086 PRTTHREADINT pThread = rtThreadGet(Thread);
1087 AssertReturnVoid(pThread);
1088 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
1089 rtThreadRelease(pThread);
1090}
1091RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
1092
1093
1094RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
1095{
1096 if (Thread == NIL_RTTHREAD)
1097 return 0;
1098
1099 PRTTHREADINT pThread = rtThreadGet(Thread);
1100 if (!pThread)
1101 return VERR_INVALID_HANDLE;
1102 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
1103 rtThreadRelease(pThread);
1104 return cReadLocks;
1105}
1106RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
1107
1108
1109RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
1110{
1111 PRTTHREADINT pThread = rtThreadGet(Thread);
1112 Assert(pThread);
1113 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
1114 rtThreadRelease(pThread);
1115}
1116RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
1117
1118
1119RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
1120{
1121 PRTTHREADINT pThread = rtThreadGet(Thread);
1122 Assert(pThread);
1123 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
1124 rtThreadRelease(pThread);
1125}
1126RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
1127
1128
1129/**
1130 * Verifies the deadlock stack before calling it a deadlock.
1131 *
1132 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
1133 * @retval VERR_TRY_AGAIN if something changed.
1134 *
1135 * @param pStack The deadlock detection stack.
1136 */
1137static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALIDATORDDSTACK pStack)
1138{
1139 uint32_t const c = pStack->c;
1140 for (uint32_t iPass = 0; iPass < 3; iPass++)
1141 {
1142 for (uint32_t i = 1; i < c; i++)
1143 {
1144 PRTTHREADINT pThread = pStack->a[i].pThread;
1145 if (pThread->u32Magic != RTTHREADINT_MAGIC)
1146 return VERR_TRY_AGAIN;
1147 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
1148 return VERR_TRY_AGAIN;
1149 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
1150 return VERR_TRY_AGAIN;
1151 }
1152 RTThreadYield();
1153 }
1154
1155 return VERR_SEM_LV_DEADLOCK;
1156}
1157
1158
1159/**
1160 * Checks for stack cycles caused by another deadlock before returning.
1161 *
1162 * @retval VINF_SUCCESS if the stack is simply too small.
1163 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
1164 *
1165 * @param pStack The deadlock detection stack.
1166 */
1167static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALIDATORDDSTACK pStack)
1168{
1169 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
1170 {
1171 PRTTHREADINT pThread = pStack->a[i].pThread;
1172 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
1173 if (pStack->a[j].pThread == pThread)
1174 return VERR_SEM_LV_EXISTING_DEADLOCK;
1175 }
1176 static bool volatile s_fComplained = false;
1177 if (!s_fComplained)
1178 {
1179 s_fComplained = true;
1180 rtLockValidatorComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
1181 }
1182 return VINF_SUCCESS;
1183}
1184
1185
1186/**
1187 * Worker for rtLockValidatorDoDeadlockCheck that checks if there is more work
1188 * to be done during unwind.
1189 *
1190 * @returns true if there is more work left for this lock, false if not.
1191 * @param pRec The current record.
1192 * @param iEntry The current index.
1193 * @param pFirstSibling The first record we examined.
1194 */
1195DECL_FORCE_INLINE(bool) rtLockValidatorDdMoreWorkLeft(PRTLOCKVALRECUNION pRec, uint32_t iEntry, PRTLOCKVALRECUNION pFirstSibling)
1196{
1197 PRTLOCKVALRECUNION pSibling;
1198
1199 switch (pRec->Core.u32Magic)
1200 {
1201 case RTLOCKVALRECEXCL_MAGIC:
1202 pSibling = pRec->Excl.pSibling;
1203 break;
1204
1205 case RTLOCKVALRECSHRD_MAGIC:
1206 if (iEntry + 1 < pRec->Shared.cAllocated)
1207 return true;
1208 pSibling = pRec->Excl.pSibling;
1209 break;
1210
1211 default:
1212 return false;
1213 }
1214 return pSibling != NULL
1215 && pSibling != pFirstSibling;
1216}
1217
1218
1219/**
1220 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
1221 * detection.
1222 *
1223 * @retval VINF_SUCCESS
1224 * @retval VERR_SEM_LV_DEADLOCK
1225 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
1226 * @retval VERR_TRY_AGAIN
1227 *
1228 * @param pStack The stack to use.
1229 * @param pOriginalRec The original record.
1230 * @param pThreadSelf The calling thread.
1231 */
1232static int rtLockValidatorDdDoDetection(PRTLOCKVALIDATORDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
1233 PRTTHREADINT const pThreadSelf)
1234{
1235 pStack->c = 0;
1236
1237 /* We could use a single RTLOCKVALIDATORDDENTRY variable here, but the
1238 compiler may make a better job of it when using individual variables. */
1239 PRTLOCKVALRECUNION pRec = pOriginalRec;
1240 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
1241 uint32_t iEntry = UINT32_MAX;
1242 PRTTHREADINT pThread = NIL_RTTHREAD;
1243 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
1244 for (;;)
1245 {
1246 /*
1247 * Process the current record.
1248 */
1249 /* Find the next relevan owner thread. */
1250 PRTTHREADINT pNextThread;
1251 switch (pRec->Core.u32Magic)
1252 {
1253 case RTLOCKVALRECEXCL_MAGIC:
1254 Assert(iEntry == UINT32_MAX);
1255 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
1256 if ( pNextThread
1257 && !RTTHREAD_IS_SLEEPING(pNextThread->enmState)
1258 && pNextThread != pThreadSelf)
1259 pNextThread = NIL_RTTHREAD;
1260
1261 if ( pNextThread == NIL_RTTHREAD
1262 && pRec->Excl.pSibling
1263 && pRec->Excl.pSibling != pFirstSibling)
1264 {
1265 pRec = pRec->Excl.pSibling;
1266 continue;
1267 }
1268 break;
1269
1270 case RTLOCKVALRECSHRD_MAGIC:
1271 /* Skip to the next sibling if same side. ASSUMES reader priority. */
1272 /** @todo The read side of a read-write lock is problematic if
1273 * the implementation prioritizes writers over readers because
1274 * that means we should could deadlock against current readers
1275 * if a writer showed up. If the RW sem implementation is
1276 * wrapping some native API, it's not so easy to detect when we
1277 * should do this and when we shouldn't. Checking when we
1278 * shouldn't is subject to wakeup scheduling and cannot easily
1279 * be made reliable.
1280 *
1281 * At the moment we circumvent all this mess by declaring that
1282 * readers has priority. This is TRUE on linux, but probably
1283 * isn't on Solaris and FreeBSD. */
1284 if ( pRec == pFirstSibling
1285 && pRec->Shared.pSibling != NULL
1286 && pRec->Shared.pSibling != pFirstSibling)
1287 {
1288 pRec = pRec->Shared.pSibling;
1289 Assert(iEntry == UINT32_MAX);
1290 continue;
1291 }
1292
1293 /* Scan the owner table for blocked owners. */
1294 if (ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0)
1295 {
1296 uint32_t cAllocated = ASMAtomicUoReadU32(&pRec->Shared.cAllocated);
1297 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
1298 while (++iEntry < cAllocated)
1299 {
1300 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
1301 if ( pEntry
1302 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
1303 {
1304 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
1305 if (pNextThread)
1306 {
1307 if ( pNextThread->u32Magic == RTTHREADINT_MAGIC
1308 && RTTHREAD_IS_SLEEPING(pNextThread->enmState))
1309 break;
1310 pNextThread = NIL_RTTHREAD;
1311 }
1312 }
1313 else
1314 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
1315 }
1316 if (pNextThread == NIL_RTTHREAD)
1317 break;
1318 }
1319
1320 /* Advance to the next sibling, if any. */
1321 if ( pRec->Shared.pSibling != NULL
1322 && pRec->Shared.pSibling != pFirstSibling)
1323 {
1324 pRec = pRec->Shared.pSibling;
1325 iEntry = UINT32_MAX;
1326 continue;
1327 }
1328 break;
1329
1330 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1331 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1332 pNextThread = NIL_RTTHREAD;
1333 break;
1334
1335 case RTLOCKVALRECSHRDOWN_MAGIC:
1336 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
1337 default:
1338 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
1339 pNextThread = NIL_RTTHREAD;
1340 break;
1341 }
1342
1343 /* If we found a thread, check if it is still waiting for something. */
1344 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
1345 PRTLOCKVALRECUNION pNextRec = NULL;
1346 if ( pNextThread != NIL_RTTHREAD
1347 && RT_LIKELY(pNextThread->u32Magic == RTTHREADINT_MAGIC))
1348 {
1349 do
1350 {
1351 enmNextState = rtThreadGetState(pNextThread);
1352 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
1353 && pNextThread != pThreadSelf)
1354 break;
1355 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
1356 if (RT_LIKELY( !pNextRec
1357 || enmNextState == rtThreadGetState(pNextThread)))
1358 break;
1359 pNextRec = NULL;
1360 } while (pNextThread->u32Magic == RTTHREADINT_MAGIC);
1361 }
1362 if (pNextRec)
1363 {
1364 /*
1365 * Recurse and check for deadlock.
1366 */
1367 uint32_t i = pStack->c;
1368 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
1369 return rtLockValidatorDdHandleStackOverflow(pStack);
1370
1371 pStack->c++;
1372 pStack->a[i].pRec = pRec;
1373 pStack->a[i].iEntry = iEntry;
1374 pStack->a[i].enmState = enmState;
1375 pStack->a[i].pThread = pThread;
1376 pStack->a[i].pFirstSibling = pFirstSibling;
1377
1378 if (RT_UNLIKELY(pNextThread == pThreadSelf))
1379 return rtLockValidatorDdVerifyDeadlock(pStack);
1380
1381 pRec = pNextRec;
1382 pFirstSibling = pNextRec;
1383 iEntry = UINT32_MAX;
1384 enmState = enmNextState;
1385 pThread = pNextThread;
1386 }
1387 else if (RT_LIKELY(!pNextThread))
1388 {
1389 /*
1390 * No deadlock here, unwind the stack and deal with any unfinished
1391 * business there.
1392 */
1393 uint32_t i = pStack->c;
1394 for (;;)
1395 {
1396 /* pop */
1397 if (i == 0)
1398 return VINF_SUCCESS;
1399 i--;
1400
1401 /* examine it. */
1402 pRec = pStack->a[i].pRec;
1403 pFirstSibling = pStack->a[i].pFirstSibling;
1404 iEntry = pStack->a[i].iEntry;
1405 if (rtLockValidatorDdMoreWorkLeft(pRec, iEntry, pFirstSibling))
1406 {
1407 enmState = pStack->a[i].enmState;
1408 pThread = pStack->a[i].pThread;
1409 break;
1410 }
1411 }
1412 }
1413 /* else: see if there is another thread to check for this lock. */
1414 }
1415}
1416
1417
1418/**
1419 * Check for the simple no-deadlock case.
1420 *
1421 * @returns true if no deadlock, false if further investigation is required.
1422 *
1423 * @param pOriginalRec The original record.
1424 */
1425DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
1426{
1427 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1428 && !pOriginalRec->Excl.pSibling)
1429 {
1430 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
1431 if ( !pThread
1432 || pThread->u32Magic != RTTHREADINT_MAGIC)
1433 return true;
1434 RTTHREADSTATE enmState = rtThreadGetState(pThread);
1435 if (!RTTHREAD_IS_SLEEPING(enmState))
1436 return true;
1437 }
1438 return false;
1439}
1440
1441
1442/**
1443 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
1444 *
1445 * @param pStack The chain of locks causing the deadlock.
1446 * @param pRec The record relating to the current thread's lock
1447 * operation.
1448 * @param pThreadSelf This thread.
1449 * @param pSrcPos Where we are going to deadlock.
1450 * @param rc The return code.
1451 */
1452static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALIDATORDDSTACK pStack, PRTLOCKVALRECUNION pRec,
1453 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
1454{
1455 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
1456 {
1457 rtLockValidatorComplainFirst( rc == VERR_SEM_LV_DEADLOCK
1458 ? "Detected deadlock!"
1459 : rc == VERR_SEM_LV_EXISTING_DEADLOCK
1460 ? "Found existing deadlock!"
1461 : "!unexpected rc!",
1462 pSrcPos,
1463 pThreadSelf,
1464 pStack->a[0].pRec != pRec ? pRec : NULL);
1465 rtLockValidatorComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
1466 for (uint32_t i = 0; i < pStack->c; i++)
1467 {
1468 char szPrefix[24];
1469 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
1470 PRTLOCKVALRECSHRDOWN pSharedOwn = NULL;
1471 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1472 pSharedOwn = pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
1473 if (VALID_PTR(pSharedOwn) && pSharedOwn->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
1474 rtLockValidatorComplainAboutLock(szPrefix, (PRTLOCKVALRECUNION)pSharedOwn, "\n");
1475 else
1476 rtLockValidatorComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
1477 }
1478 rtLockValidatorComplainMore("---- end of deadlock chain ----\n");
1479 }
1480
1481 rtLockValidatorComplainPanic();
1482}
1483
1484
1485/**
1486 * Perform deadlock detection.
1487 *
1488 * @retval VINF_SUCCESS
1489 * @retval VERR_SEM_LV_DEADLOCK
1490 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
1491 *
1492 * @param pRec The record relating to the current thread's lock
1493 * operation.
1494 * @param pThreadSelf The current thread.
1495 * @param pSrcPos The position of the current lock operation.
1496 */
1497static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
1498{
1499#ifdef DEBUG_bird
1500 RTLOCKVALIDATORDDSTACK Stack;
1501 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
1502 if (RT_SUCCESS(rc))
1503 return VINF_SUCCESS;
1504
1505 if (rc == VERR_TRY_AGAIN)
1506 {
1507 for (uint32_t iLoop = 0; ; iLoop++)
1508 {
1509 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
1510 if (RT_SUCCESS_NP(rc))
1511 return VINF_SUCCESS;
1512 if (rc != VERR_TRY_AGAIN)
1513 break;
1514 RTThreadYield();
1515 if (iLoop >= 3)
1516 return VINF_SUCCESS;
1517 }
1518 }
1519
1520 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
1521 return rc;
1522#else
1523 return VINF_SUCCESS;
1524#endif
1525}
1526
1527
1528
1529RTDECL(int) RTLockValidatorCheckWriteOrderBlocking(PRTLOCKVALRECEXCL pWrite, PRTLOCKVALRECSHRD pRead,
1530 RTTHREAD hThread, RTTHREADSTATE enmState, bool fRecursiveOk,
1531 PCRTLOCKVALSRCPOS pSrcPos)
1532{
1533 /*
1534 * Fend off wild life.
1535 */
1536 PRTLOCKVALRECUNION pWriteU = (PRTLOCKVALRECUNION)pWrite; /* (avoid break aliasing rules) */
1537 AssertPtrReturn(pWriteU, VERR_SEM_LV_INVALID_PARAMETER);
1538 AssertReturn(pWriteU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1539 PRTLOCKVALRECUNION pReadU = (PRTLOCKVALRECUNION)pRead;
1540 AssertPtrReturn(pRead, VERR_SEM_LV_INVALID_PARAMETER);
1541 AssertReturn(pReadU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1542 AssertReturn(pReadU->Shared.fEnabled == pWriteU->Excl.fEnabled, VERR_SEM_LV_INVALID_PARAMETER);
1543 if (!pWriteU->Excl.fEnabled)
1544 return VINF_SUCCESS;
1545 AssertReturn(RTTHREAD_IS_SLEEPING(enmState), VERR_SEM_LV_INVALID_PARAMETER);
1546 PRTTHREADINT pThread = hThread;
1547 AssertPtrReturn(pThread, VERR_SEM_LV_INVALID_PARAMETER);
1548 AssertReturn(pThread->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1549 RTTHREADSTATE enmThreadState = rtThreadGetState(pThread);
1550 AssertReturn( enmThreadState == RTTHREADSTATE_RUNNING
1551 || enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
1552 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
1553 , VERR_SEM_LV_INVALID_PARAMETER);
1554
1555 /*
1556 * Check for attempts at doing a read upgrade.
1557 */
1558 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorSharedRecFindThread(&pReadU->Shared, hThread, NULL);
1559 if (pEntry)
1560 {
1561 rtLockValidatorComplainFirst("Read lock upgrade", pSrcPos, pThread, (PRTLOCKVALRECUNION)pEntry);
1562 rtLockValidatorComplainPanic();
1563 return VERR_SEM_LV_UPGRADE;
1564 }
1565
1566
1567
1568 return VINF_SUCCESS;
1569}
1570
1571
1572RTDECL(int) RTLockValidatorCheckReadOrderBlocking(PRTLOCKVALRECSHRD pRead, PRTLOCKVALRECEXCL pWrite,
1573 RTTHREAD hThread, RTTHREADSTATE enmState, bool fRecursiveOk,
1574 PCRTLOCKVALSRCPOS pSrcPos)
1575{
1576 /*
1577 * Fend off wild life.
1578 */
1579 AssertPtrReturn(pRead, VERR_SEM_LV_INVALID_PARAMETER);
1580 AssertReturn(pRead->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1581 AssertPtrReturn(pWrite, VERR_SEM_LV_INVALID_PARAMETER);
1582 AssertReturn(pWrite->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1583 AssertReturn(pRead->fEnabled == pWrite->fEnabled, VERR_SEM_LV_INVALID_PARAMETER);
1584 if (!pRead->fEnabled)
1585 return VINF_SUCCESS;
1586 AssertReturn(RTTHREAD_IS_SLEEPING(enmState), VERR_SEM_LV_INVALID_PARAMETER);
1587 PRTTHREADINT pThread = hThread;
1588 AssertPtrReturn(pThread, VERR_SEM_LV_INVALID_PARAMETER);
1589 AssertReturn(pThread->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1590 RTTHREADSTATE enmThreadState = rtThreadGetState(pThread);
1591 AssertReturn( enmThreadState == RTTHREADSTATE_RUNNING
1592 || enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
1593 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
1594 , VERR_SEM_LV_INVALID_PARAMETER);
1595 Assert(pWrite->hThread != pThread);
1596
1597
1598 return VINF_SUCCESS;
1599}
1600
1601
1602RTDECL(int) RTLockValidatorCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThread,
1603 RTTHREADSTATE enmState, bool fRecursiveOk,
1604 PCRTLOCKVALSRCPOS pSrcPos)
1605{
1606 /*
1607 * Fend off wild life.
1608 */
1609 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
1610 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
1611 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1612 if (!pRecU->Excl.fEnabled)
1613 return VINF_SUCCESS;
1614 AssertReturn(RTTHREAD_IS_SLEEPING(enmState), VERR_SEM_LV_INVALID_PARAMETER);
1615 PRTTHREADINT pThreadSelf = hThread;
1616 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
1617 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1618 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
1619 AssertReturn( enmThreadState == RTTHREADSTATE_RUNNING
1620 || enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
1621 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
1622 , VERR_SEM_LV_INVALID_PARAMETER);
1623
1624 /*
1625 * Record the location and everything before changing the state and
1626 * performing deadlock detection.
1627 */
1628 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
1629 rtLockValidatorCopySrcPos(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
1630
1631 /*
1632 * Don't do deadlock detection if we're recursing.
1633 *
1634 * On some hosts we don't do recursion accounting our selves and there
1635 * isn't any other place to check for this. semmutex-win.cpp for instance.
1636 */
1637 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
1638 {
1639 if (fRecursiveOk)
1640 return VINF_SUCCESS;
1641 rtLockValidatorComplainFirst("Recursion not allowed", pSrcPos, pThreadSelf, pRecU);
1642 rtLockValidatorComplainPanic();
1643 return VERR_SEM_LV_NESTED;
1644 }
1645
1646 /*
1647 * Perform deadlock detection.
1648 */
1649 if (rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
1650 return VINF_SUCCESS;
1651 return rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
1652}
1653RT_EXPORT_SYMBOL(RTLockValidatorCheckBlocking);
1654
1655
1656RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
1657{
1658 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
1659}
1660RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
1661
1662
1663RTDECL(bool) RTLockValidatorIsEnabled(void)
1664{
1665 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
1666}
1667RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
1668
1669
1670RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
1671{
1672 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
1673}
1674RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
1675
1676
1677RTDECL(bool) RTLockValidatorAreQuiet(void)
1678{
1679 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
1680}
1681RT_EXPORT_SYMBOL(RTLockValidatorAreQuiet);
1682
1683
1684RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
1685{
1686 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
1687}
1688RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
1689
1690
1691RTDECL(bool) RTLockValidatorMayPanic(void)
1692{
1693 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
1694}
1695RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
1696
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette