VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 28117

Last change on this file since 28117 was 27292, checked in by vboxsync, 15 years ago

Runtime/lockvalidator: free the right record

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 154.1 KB
Line 
1/* $Id: lockvalidator.cpp 27292 2010-03-11 16:46:48Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include <iprt/lockvalidator.h>
35#include "internal/iprt.h"
36
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/env.h>
40#include <iprt/err.h>
41#include <iprt/mem.h>
42#include <iprt/once.h>
43#include <iprt/semaphore.h>
44#include <iprt/string.h>
45#include <iprt/thread.h>
46
47#include "internal/lockvalidator.h"
48#include "internal/magics.h"
49#include "internal/thread.h"
50
51/*******************************************************************************
52* Defined Constants And Macros *
53*******************************************************************************/
54/** Macro that asserts that a pointer is aligned correctly.
55 * Only used when fighting bugs. */
56#if 1
57# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
58 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
59#else
60# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
61#endif
62
63/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
64#define RTLOCKVALCLASS_HASH(hClass) \
65 ( ((uintptr_t)(hClass) >> 6 ) \
66 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
67 / sizeof(PRTLOCKVALCLASSREF)) )
68
69/** The max value for RTLOCKVALCLASSINT::cRefs. */
70#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
71/** The max value for RTLOCKVALCLASSREF::cLookups. */
72#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
73/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
74 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
75#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
76
77
78/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
79 * Enable recursion records. */
80#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
81# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
82#endif
83
84/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
85 * Enables some extra verbosity in the lock dumping. */
86#if defined(DOXYGEN_RUNNING)
87# define RTLOCKVAL_WITH_VERBOSE_DUMPS
88#endif
89
90/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
91 * Enables collection prior class hash lookup statistics, dumping them when
92 * complaining about the class. */
93#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
94# define RTLOCKVAL_WITH_CLASS_HASH_STATS
95#endif
96
97
98/*******************************************************************************
99* Structures and Typedefs *
100*******************************************************************************/
101/**
102 * Deadlock detection stack entry.
103 */
104typedef struct RTLOCKVALDDENTRY
105{
106 /** The current record. */
107 PRTLOCKVALRECUNION pRec;
108 /** The current entry number if pRec is a shared one. */
109 uint32_t iEntry;
110 /** The thread state of the thread we followed to get to pFirstSibling.
111 * This is only used for validating a deadlock stack. */
112 RTTHREADSTATE enmState;
113 /** The thread we followed to get to pFirstSibling.
114 * This is only used for validating a deadlock stack. */
115 PRTTHREADINT pThread;
116 /** What pThread is waiting on, i.e. where we entered the circular list of
117 * siblings. This is used for validating a deadlock stack as well as
118 * terminating the sibling walk. */
119 PRTLOCKVALRECUNION pFirstSibling;
120} RTLOCKVALDDENTRY;
121
122
123/**
124 * Deadlock detection stack.
125 */
126typedef struct RTLOCKVALDDSTACK
127{
128 /** The number stack entries. */
129 uint32_t c;
130 /** The stack entries. */
131 RTLOCKVALDDENTRY a[32];
132} RTLOCKVALDDSTACK;
133/** Pointer to a deadlock detction stack. */
134typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
135
136
137/**
138 * Reference to another class.
139 */
140typedef struct RTLOCKVALCLASSREF
141{
142 /** The class. */
143 RTLOCKVALCLASS hClass;
144 /** The number of lookups of this class. */
145 uint32_t volatile cLookups;
146 /** Indicates whether the entry was added automatically during order checking
147 * (true) or manually via the API (false). */
148 bool fAutodidacticism;
149 /** Reserved / explicit alignment padding. */
150 bool afReserved[3];
151} RTLOCKVALCLASSREF;
152/** Pointer to a class reference. */
153typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
154
155
156/** Pointer to a chunk of class references. */
157typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
158/**
159 * Chunk of class references.
160 */
161typedef struct RTLOCKVALCLASSREFCHUNK
162{
163 /** Array of refs. */
164#if 0 /** @todo for testing alloction of new chunks. */
165 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
166#else
167 RTLOCKVALCLASSREF aRefs[2];
168#endif
169 /** Pointer to the next chunk. */
170 PRTLOCKVALCLASSREFCHUNK volatile pNext;
171} RTLOCKVALCLASSREFCHUNK;
172
173
174/**
175 * Lock class.
176 */
177typedef struct RTLOCKVALCLASSINT
178{
179 /** AVL node core. */
180 AVLLU32NODECORE Core;
181 /** Magic value (RTLOCKVALCLASS_MAGIC). */
182 uint32_t volatile u32Magic;
183 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
184 uint32_t volatile cRefs;
185 /** Whether the class is allowed to teach it self new locking order rules. */
186 bool fAutodidact;
187 /** Whether to allow recursion. */
188 bool fRecursionOk;
189 /** Strict release order. */
190 bool fStrictReleaseOrder;
191 /** Whether this class is in the tree. */
192 bool fInTree;
193 /** Donate a reference to the next retainer. This is a hack to make
194 * RTLockValidatorClassCreateUnique work. */
195 bool volatile fDonateRefToNextRetainer;
196 /** Reserved future use / explicit alignment. */
197 bool afReserved[3];
198 /** The minimum wait interval for which we do deadlock detection
199 * (milliseconds). */
200 RTMSINTERVAL cMsMinDeadlock;
201 /** The minimum wait interval for which we do order checks (milliseconds). */
202 RTMSINTERVAL cMsMinOrder;
203 /** More padding. */
204 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
205 /** Classes that may be taken prior to this one.
206 * This is a linked list where each node contains a chunk of locks so that we
207 * reduce the number of allocations as well as localize the data. */
208 RTLOCKVALCLASSREFCHUNK PriorLocks;
209 /** Hash table containing frequently encountered prior locks. */
210 PRTLOCKVALCLASSREF apPriorLocksHash[17];
211 /** Class name. (Allocated after the end of the block as usual.) */
212 char const *pszName;
213 /** Where this class was created.
214 * This is mainly used for finding automatically created lock classes.
215 * @remarks The strings are stored after this structure so we won't crash
216 * if the class lives longer than the module (dll/so/dylib) that
217 * spawned it. */
218 RTLOCKVALSRCPOS CreatePos;
219#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
220 /** Hash hits. */
221 uint32_t volatile cHashHits;
222 /** Hash misses. */
223 uint32_t volatile cHashMisses;
224#endif
225} RTLOCKVALCLASSINT;
226AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
227AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
228
229
230/*******************************************************************************
231* Global Variables *
232*******************************************************************************/
233/** Serializing object destruction and deadlock detection.
234 *
235 * This makes sure that none of the memory examined by the deadlock detection
236 * code will become invalid (reused for other purposes or made not present)
237 * while the detection is in progress.
238 *
239 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
240 * EW: Deadlock detection and some related activities.
241 */
242static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
243/** Serializing class tree insert and lookups. */
244static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
245/** Class tree. */
246static PAVLLU32NODECORE g_LockValClassTree = NULL;
247/** Critical section serializing the teaching new rules to the classes. */
248static RTCRITSECT g_LockValClassTeachCS;
249
250/** Whether the lock validator is enabled or disabled.
251 * Only applies to new locks. */
252static bool volatile g_fLockValidatorEnabled = true;
253/** Set if the lock validator is quiet. */
254#ifdef RT_STRICT
255static bool volatile g_fLockValidatorQuiet = false;
256#else
257static bool volatile g_fLockValidatorQuiet = true;
258#endif
259/** Set if the lock validator may panic. */
260#ifdef RT_STRICT
261static bool volatile g_fLockValidatorMayPanic = true;
262#else
263static bool volatile g_fLockValidatorMayPanic = false;
264#endif
265/** Whether to return an error status on wrong locking order. */
266static bool volatile g_fLockValSoftWrongOrder = false;
267
268
269/*******************************************************************************
270* Internal Functions *
271*******************************************************************************/
272static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
273static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
274
275
276/**
277 * Lazy initialization of the lock validator globals.
278 */
279static void rtLockValidatorLazyInit(void)
280{
281 static uint32_t volatile s_fInitializing = false;
282 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
283 {
284 /*
285 * The locks.
286 */
287 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
288 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
289 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
290
291 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
292 {
293 RTSEMRW hSemRW;
294 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
295 if (RT_SUCCESS(rc))
296 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
297 }
298
299 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
300 {
301 RTSEMXROADS hXRoads;
302 int rc = RTSemXRoadsCreate(&hXRoads);
303 if (RT_SUCCESS(rc))
304 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
305 }
306
307#ifdef IN_RING3
308 /*
309 * Check the environment for our config variables.
310 */
311 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
312 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
313 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
314 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
315
316 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
317 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
318 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
319 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
320
321 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
322 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
323 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
324 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
325
326 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
327 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
328 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
329 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
330#endif
331
332 /*
333 * Register cleanup
334 */
335 /** @todo register some cleanup callback if we care. */
336
337 ASMAtomicWriteU32(&s_fInitializing, false);
338 }
339}
340
341
342
343/** Wrapper around ASMAtomicReadPtr. */
344DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
345{
346 PRTLOCKVALRECUNION p = (PRTLOCKVALRECUNION)ASMAtomicReadPtr((void * volatile *)ppRec);
347 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
348 return p;
349}
350
351
352/** Wrapper around ASMAtomicWritePtr. */
353DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
354{
355 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
356 ASMAtomicWritePtr((void * volatile *)ppRec, pRecNew);
357}
358
359
360/** Wrapper around ASMAtomicReadPtr. */
361DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
362{
363 PRTTHREADINT p = (PRTTHREADINT)ASMAtomicReadPtr((void * volatile *)phThread);
364 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
365 return p;
366}
367
368
369/** Wrapper around ASMAtomicUoReadPtr. */
370DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
371{
372 PRTLOCKVALRECSHRDOWN p = (PRTLOCKVALRECSHRDOWN)ASMAtomicUoReadPtr((void * volatile *)ppOwner);
373 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
374 return p;
375}
376
377
378/**
379 * Reads a volatile thread handle field and returns the thread name.
380 *
381 * @returns Thread name (read only).
382 * @param phThread The thread handle field.
383 */
384static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
385{
386 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
387 if (!pThread)
388 return "<NIL>";
389 if (!VALID_PTR(pThread))
390 return "<INVALID>";
391 if (pThread->u32Magic != RTTHREADINT_MAGIC)
392 return "<BAD-THREAD-MAGIC>";
393 return pThread->szName;
394}
395
396
397/**
398 * Launch a simple assertion like complaint w/ panic.
399 *
400 * @param pszFile Where from - file.
401 * @param iLine Where from - line.
402 * @param pszFunction Where from - function.
403 * @param pszWhat What we're complaining about.
404 * @param ... Format arguments.
405 */
406static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
407{
408 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
409 {
410 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
411 va_list va;
412 va_start(va, pszWhat);
413 RTAssertMsg2WeakV(pszWhat, va);
414 va_end(va);
415 }
416 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
417 RTAssertPanic();
418}
419
420
421/**
422 * Describes the class.
423 *
424 * @param pszPrefix Message prefix.
425 * @param pClass The class to complain about.
426 * @param uSubClass My sub-class.
427 * @param fVerbose Verbose description including relations to other
428 * classes.
429 */
430static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
431{
432 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
433 return;
434
435 /* Stringify the sub-class. */
436 const char *pszSubClass;
437 char szSubClass[32];
438 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
439 switch (uSubClass)
440 {
441 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
442 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
443 default:
444 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
445 pszSubClass = szSubClass;
446 break;
447 }
448 else
449 {
450 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
451 pszSubClass = szSubClass;
452 }
453
454 /* Validate the class pointer. */
455 if (!VALID_PTR(pClass))
456 {
457 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
458 return;
459 }
460 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
461 {
462 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
463 return;
464 }
465
466 /* OK, dump the class info. */
467 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
468 pClass,
469 pClass->pszName,
470 pClass->CreatePos.pszFile,
471 pClass->CreatePos.uLine,
472 pClass->CreatePos.pszFunction,
473 pClass->CreatePos.uId,
474 pszSubClass);
475 if (fVerbose)
476 {
477 uint32_t i = 0;
478 uint32_t cPrinted = 0;
479 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
480 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
481 {
482 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
483 if (pCurClass != NIL_RTLOCKVALCLASS)
484 {
485 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
486 cPrinted == 0
487 ? "Prior:"
488 : " ",
489 i,
490 pCurClass->pszName,
491 pChunk->aRefs[j].fAutodidacticism
492 ? "autodidactic"
493 : "manually ",
494 pChunk->aRefs[j].cLookups,
495 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
496 cPrinted++;
497 }
498 }
499 if (!cPrinted)
500 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
501#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
502 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
503#endif
504 }
505 else
506 {
507 uint32_t cPrinted = 0;
508 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
509 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
510 {
511 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
512 if (pCurClass != NIL_RTLOCKVALCLASS)
513 {
514 if ((cPrinted % 10) == 0)
515 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
516 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
517 else if ((cPrinted % 10) != 9)
518 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
519 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
520 else
521 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
522 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
523 cPrinted++;
524 }
525 }
526 if (!cPrinted)
527 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
528 else if ((cPrinted % 10) != 0)
529 RTAssertMsg2AddWeak("\n");
530 }
531}
532
533
534/**
535 * Helper for getting the class name.
536 * @returns Class name string.
537 * @param pClass The class.
538 */
539static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
540{
541 if (!pClass)
542 return "<nil-class>";
543 if (!VALID_PTR(pClass))
544 return "<bad-class-ptr>";
545 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
546 return "<bad-class-magic>";
547 if (!pClass->pszName)
548 return "<no-class-name>";
549 return pClass->pszName;
550}
551
552/**
553 * Formats the sub-class.
554 *
555 * @returns Stringified sub-class.
556 * @param uSubClass The name.
557 * @param pszBuf Buffer that is big enough.
558 */
559static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
560{
561 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
562 switch (uSubClass)
563 {
564 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
565 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
566 default:
567 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
568 break;
569 }
570 else
571 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
572 return pszBuf;
573}
574
575
576/**
577 * Helper for rtLockValComplainAboutLock.
578 */
579DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
580 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
581 const char *pszFrameType)
582{
583 char szBuf[32];
584 switch (u32Magic)
585 {
586 case RTLOCKVALRECEXCL_MAGIC:
587#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
588 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
589 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
590 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
591 rtLockValComplainGetClassName(pRec->Excl.hClass),
592 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
593 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
594 pszFrameType, pszSuffix);
595#else
596 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
597 pRec->Excl.hLock, pRec->Excl.szName,
598 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
599 rtLockValComplainGetClassName(pRec->Excl.hClass),
600 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
601 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
602 pszFrameType, pszSuffix);
603#endif
604 break;
605
606 case RTLOCKVALRECSHRD_MAGIC:
607 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
608 pRec->Shared.hLock, pRec->Shared.szName, pRec,
609 rtLockValComplainGetClassName(pRec->Shared.hClass),
610 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
611 pszFrameType, pszSuffix);
612 break;
613
614 case RTLOCKVALRECSHRDOWN_MAGIC:
615 {
616 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
617 if ( VALID_PTR(pShared)
618 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
619#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
620 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
621 pShared->hLock, pShared->pszName, pShared,
622 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
623 rtLockValComplainGetClassName(pShared->hClass),
624 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
625 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
626 pszSuffix2, pszSuffix);
627#else
628 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
629 pShared->hLock, pShared->szName,
630 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
631 rtLockValComplainGetClassName(pShared->hClass),
632 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
633 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
634 pszFrameType, pszSuffix);
635#endif
636 else
637 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
638 pShared,
639 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
640 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
641 pszFrameType, pszSuffix);
642 break;
643 }
644
645 default:
646 AssertMsgFailed(("%#x\n", u32Magic));
647 }
648}
649
650
651/**
652 * Describes the lock.
653 *
654 * @param pszPrefix Message prefix.
655 * @param pRec The lock record we're working on.
656 * @param pszSuffix Message suffix.
657 */
658static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
659{
660#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
661# define FIX_REC(r) 1
662#else
663# define FIX_REC(r) (r)
664#endif
665 if ( VALID_PTR(pRec)
666 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
667 {
668 switch (pRec->Core.u32Magic)
669 {
670 case RTLOCKVALRECEXCL_MAGIC:
671 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
672 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
673 break;
674
675 case RTLOCKVALRECSHRD_MAGIC:
676 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
677 break;
678
679 case RTLOCKVALRECSHRDOWN_MAGIC:
680 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
681 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
682 break;
683
684 case RTLOCKVALRECNEST_MAGIC:
685 {
686 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
687 uint32_t u32Magic;
688 if ( VALID_PTR(pRealRec)
689 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
690 || u32Magic == RTLOCKVALRECSHRD_MAGIC
691 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
692 )
693 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
694 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
695 else
696 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
697 pRealRec, pRec, pRec->Nest.cRecursion,
698 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
699 pszSuffix);
700 break;
701 }
702
703 default:
704 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
705 break;
706 }
707 }
708#undef FIX_REC
709}
710
711
712/**
713 * Dump the lock stack.
714 *
715 * @param pThread The thread which lock stack we're gonna dump.
716 * @param cchIndent The indentation in chars.
717 * @param cMinFrames The minimum number of frames to consider
718 * dumping.
719 * @param pHighightRec Record that should be marked specially in the
720 * dump.
721 */
722static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
723 PRTLOCKVALRECUNION pHighightRec)
724{
725 if ( VALID_PTR(pThread)
726 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
727 && pThread->u32Magic == RTTHREADINT_MAGIC
728 )
729 {
730 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
731 if (cEntries >= cMinFrames)
732 {
733 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
734 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
735 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
736 for (uint32_t i = 0; VALID_PTR(pCur); i++)
737 {
738 char szPrefix[80];
739 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
740 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
741 switch (pCur->Core.u32Magic)
742 {
743 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
744 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
745 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
746 default:
747 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
748 pCur = NULL;
749 break;
750 }
751 }
752 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
753 }
754 }
755}
756
757
758/**
759 * Launch the initial complaint.
760 *
761 * @param pszWhat What we're complaining about.
762 * @param pSrcPos Where we are complaining from, as it were.
763 * @param pThreadSelf The calling thread.
764 * @param pRec The main lock involved. Can be NULL.
765 * @param fDumpStack Whether to dump the lock stack (true) or not
766 * (false).
767 */
768static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
769 PRTLOCKVALRECUNION pRec, bool fDumpStack)
770{
771 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
772 {
773 ASMCompilerBarrier(); /* paranoia */
774 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
775 if (pSrcPos && pSrcPos->uId)
776 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
777 else
778 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
779 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
780 if (fDumpStack)
781 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
782 }
783}
784
785
786/**
787 * Continue bitching.
788 *
789 * @param pszFormat Format string.
790 * @param ... Format arguments.
791 */
792static void rtLockValComplainMore(const char *pszFormat, ...)
793{
794 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
795 {
796 va_list va;
797 va_start(va, pszFormat);
798 RTAssertMsg2AddWeakV(pszFormat, va);
799 va_end(va);
800 }
801}
802
803
804/**
805 * Raise a panic if enabled.
806 */
807static void rtLockValComplainPanic(void)
808{
809 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
810 RTAssertPanic();
811}
812
813
814/**
815 * Copy a source position record.
816 *
817 * @param pDst The destination.
818 * @param pSrc The source. Can be NULL.
819 */
820DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
821{
822 if (pSrc)
823 {
824 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
825 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFile, pSrc->pszFile);
826 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFunction, pSrc->pszFunction);
827 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
828 }
829 else
830 {
831 ASMAtomicUoWriteU32(&pDst->uLine, 0);
832 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFile, NULL);
833 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFunction, NULL);
834 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, 0);
835 }
836}
837
838
839/**
840 * Init a source position record.
841 *
842 * @param pSrcPos The source position record.
843 */
844DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
845{
846 pSrcPos->pszFile = NULL;
847 pSrcPos->pszFunction = NULL;
848 pSrcPos->uId = 0;
849 pSrcPos->uLine = 0;
850#if HC_ARCH_BITS == 64
851 pSrcPos->u32Padding = 0;
852#endif
853}
854
855
856/* sdbm:
857 This algorithm was created for sdbm (a public-domain reimplementation of
858 ndbm) database library. it was found to do well in scrambling bits,
859 causing better distribution of the keys and fewer splits. it also happens
860 to be a good general hashing function with good distribution. the actual
861 function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
862 is the faster version used in gawk. [there is even a faster, duff-device
863 version] the magic constant 65599 was picked out of thin air while
864 experimenting with different constants, and turns out to be a prime.
865 this is one of the algorithms used in berkeley db (see sleepycat) and
866 elsewhere. */
867DECL_FORCE_INLINE(uint32_t) sdbm(const char *str, uint32_t hash)
868{
869 uint8_t *pu8 = (uint8_t *)str;
870 int c;
871
872 while ((c = *pu8++))
873 hash = c + (hash << 6) + (hash << 16) - hash;
874
875 return hash;
876}
877
878
879/**
880 * Hashes the specified source position.
881 *
882 * @returns Hash.
883 * @param pSrcPos The source position record.
884 */
885static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
886{
887 uint32_t uHash;
888 if ( ( pSrcPos->pszFile
889 || pSrcPos->pszFunction)
890 && pSrcPos->uLine != 0)
891 {
892 uHash = 0;
893 if (pSrcPos->pszFile)
894 uHash = sdbm(pSrcPos->pszFile, uHash);
895 if (pSrcPos->pszFunction)
896 uHash = sdbm(pSrcPos->pszFunction, uHash);
897 uHash += pSrcPos->uLine;
898 }
899 else
900 {
901 Assert(pSrcPos->uId);
902 uHash = (uint32_t)pSrcPos->uId;
903 }
904
905 return uHash;
906}
907
908
909/**
910 * Compares two source positions.
911 *
912 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
913 * otherwise.
914 * @param pSrcPos1 The first source position.
915 * @param pSrcPos2 The second source position.
916 */
917static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
918{
919 if (pSrcPos1->uLine != pSrcPos2->uLine)
920 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
921
922 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
923 if (iDiff != 0)
924 return iDiff;
925
926 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
927 if (iDiff != 0)
928 return iDiff;
929
930 if (pSrcPos1->uId != pSrcPos2->uId)
931 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
932 return 0;
933}
934
935
936
937/**
938 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
939 */
940DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
941{
942 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
943 if (hXRoads != NIL_RTSEMXROADS)
944 RTSemXRoadsNSEnter(hXRoads);
945}
946
947
948/**
949 * Call after rtLockValidatorSerializeDestructEnter.
950 */
951DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
952{
953 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
954 if (hXRoads != NIL_RTSEMXROADS)
955 RTSemXRoadsNSLeave(hXRoads);
956}
957
958
959/**
960 * Serializes deadlock detection against destruction of the objects being
961 * inspected.
962 */
963DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
964{
965 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
966 if (hXRoads != NIL_RTSEMXROADS)
967 RTSemXRoadsEWEnter(hXRoads);
968}
969
970
971/**
972 * Call after rtLockValidatorSerializeDetectionEnter.
973 */
974DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
975{
976 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
977 if (hXRoads != NIL_RTSEMXROADS)
978 RTSemXRoadsEWLeave(hXRoads);
979}
980
981
982/**
983 * Initializes the per thread lock validator data.
984 *
985 * @param pPerThread The data.
986 */
987DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
988{
989 pPerThread->bmFreeShrdOwners = UINT32_MAX;
990
991 /* ASSUMES the rest has already been zeroed. */
992 Assert(pPerThread->pRec == NULL);
993 Assert(pPerThread->cWriteLocks == 0);
994 Assert(pPerThread->cReadLocks == 0);
995 Assert(pPerThread->fInValidator == false);
996 Assert(pPerThread->pStackTop == NULL);
997}
998
999
1000/**
1001 * Delete the per thread lock validator data.
1002 *
1003 * @param pPerThread The data.
1004 */
1005DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
1006{
1007 /*
1008 * Check that the thread doesn't own any locks at this time.
1009 */
1010 if (pPerThread->pStackTop)
1011 {
1012 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
1013 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
1014 pPerThread->pStackTop, true);
1015 rtLockValComplainPanic();
1016 }
1017
1018 /*
1019 * Free the recursion records.
1020 */
1021 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
1022 pPerThread->pFreeNestRecs = NULL;
1023 while (pCur)
1024 {
1025 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1026 RTMemFree(pCur);
1027 pCur = pNext;
1028 }
1029}
1030
1031RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1032 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1033 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1034 const char *pszNameFmt, ...)
1035{
1036 va_list va;
1037 va_start(va, pszNameFmt);
1038 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1039 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1040 va_end(va);
1041 return rc;
1042}
1043
1044
1045RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1046 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1047 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1048 const char *pszNameFmt, va_list va)
1049{
1050 Assert(cMsMinDeadlock >= 1);
1051 Assert(cMsMinOrder >= 1);
1052 AssertPtr(pSrcPos);
1053
1054 /*
1055 * Format the name and calc its length.
1056 */
1057 size_t cbName;
1058 char szName[32];
1059 if (pszNameFmt && *pszNameFmt)
1060 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1061 else
1062 {
1063 static uint32_t volatile s_cAnonymous = 0;
1064 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1065 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1066 }
1067
1068 /*
1069 * Figure out the file and function name lengths and allocate memory for
1070 * it all.
1071 */
1072 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1073 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1074 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAlloc(sizeof(*pThis) + cbFile + cbFunction + cbName);
1075 if (!pThis)
1076 return VERR_NO_MEMORY;
1077
1078 /*
1079 * Initialize the class data.
1080 */
1081 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1082 pThis->Core.uchHeight = 0;
1083 pThis->Core.pLeft = NULL;
1084 pThis->Core.pRight = NULL;
1085 pThis->Core.pList = NULL;
1086 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1087 pThis->cRefs = 1;
1088 pThis->fAutodidact = fAutodidact;
1089 pThis->fRecursionOk = fRecursionOk;
1090 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1091 pThis->fInTree = false;
1092 pThis->fDonateRefToNextRetainer = false;
1093 pThis->afReserved[0] = false;
1094 pThis->afReserved[1] = false;
1095 pThis->afReserved[2] = false;
1096 pThis->cMsMinDeadlock = cMsMinDeadlock;
1097 pThis->cMsMinOrder = cMsMinOrder;
1098 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1099 pThis->au32Reserved[i] = 0;
1100 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1101 {
1102 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1103 pThis->PriorLocks.aRefs[i].cLookups = 0;
1104 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1105 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1106 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1107 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1108 }
1109 pThis->PriorLocks.pNext = NULL;
1110 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1111 pThis->apPriorLocksHash[i] = NULL;
1112 char *pszDst = (char *)(pThis + 1);
1113 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1114 pszDst += cbName;
1115 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1116 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1117 pszDst += cbFile;
1118 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1119 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1120#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1121 pThis->cHashHits = 0;
1122 pThis->cHashMisses = 0;
1123#endif
1124
1125 *phClass = pThis;
1126 return VINF_SUCCESS;
1127}
1128
1129
1130RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1131{
1132 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1133 va_list va;
1134 va_start(va, pszNameFmt);
1135 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1136 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1137 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1138 pszNameFmt, va);
1139 va_end(va);
1140 return rc;
1141}
1142
1143
1144/**
1145 * Creates a new lock validator class with a reference that is consumed by the
1146 * first call to RTLockValidatorClassRetain.
1147 *
1148 * This is tailored for use in the parameter list of a semaphore constructor.
1149 *
1150 * @returns Class handle with a reference that is automatically consumed by the
1151 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1152 *
1153 * @param pszFile The source position of the call, file.
1154 * @param iLine The source position of the call, line.
1155 * @param pszFunction The source position of the call, function.
1156 * @param pszNameFmt Class name format string, optional (NULL). Max
1157 * length is 32 bytes.
1158 * @param ... Format string arguments.
1159 */
1160RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1161{
1162 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1163 RTLOCKVALCLASSINT *pClass;
1164 va_list va;
1165 va_start(va, pszNameFmt);
1166 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1167 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1168 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1169 pszNameFmt, va);
1170 va_end(va);
1171 if (RT_FAILURE(rc))
1172 return NIL_RTLOCKVALCLASS;
1173 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1174 return pClass;
1175}
1176
1177
1178/**
1179 * Internal class retainer.
1180 * @returns The new reference count.
1181 * @param pClass The class.
1182 */
1183DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1184{
1185 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1186 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1187 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1188 else if ( cRefs == 2
1189 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1190 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1191 return cRefs;
1192}
1193
1194
1195/**
1196 * Validates and retains a lock validator class.
1197 *
1198 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1199 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1200 */
1201DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1202{
1203 if (hClass == NIL_RTLOCKVALCLASS)
1204 return hClass;
1205 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1206 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1207 rtLockValidatorClassRetain(hClass);
1208 return hClass;
1209}
1210
1211
1212/**
1213 * Internal class releaser.
1214 * @returns The new reference count.
1215 * @param pClass The class.
1216 */
1217DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1218{
1219 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1220 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1221 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1222 else if (!cRefs)
1223 rtLockValidatorClassDestroy(pClass);
1224 return cRefs;
1225}
1226
1227
1228/**
1229 * Destroys a class once there are not more references to it.
1230 *
1231 * @param Class The class.
1232 */
1233static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1234{
1235 AssertReturnVoid(!pClass->fInTree);
1236 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1237
1238 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1239 while (pChunk)
1240 {
1241 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1242 {
1243 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1244 if (pClass2 != NIL_RTLOCKVALCLASS)
1245 {
1246 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1247 rtLockValidatorClassRelease(pClass2);
1248 }
1249 }
1250
1251 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1252 pChunk->pNext = NULL;
1253 if (pChunk != &pClass->PriorLocks)
1254 RTMemFree(pChunk);
1255 pChunk = pNext;
1256 }
1257
1258 RTMemFree(pClass);
1259}
1260
1261
1262RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1263{
1264 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1265 rtLockValidatorLazyInit();
1266 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1267
1268 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1269 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1270 while (pClass)
1271 {
1272 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1273 break;
1274 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1275 }
1276
1277 if (RT_SUCCESS(rcLock))
1278 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1279 return pClass;
1280}
1281
1282
1283RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1284{
1285 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1286 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1287 if (hClass == NIL_RTLOCKVALCLASS)
1288 {
1289 /*
1290 * Create a new class and insert it into the tree.
1291 */
1292 va_list va;
1293 va_start(va, pszNameFmt);
1294 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1295 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1296 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1297 pszNameFmt, va);
1298 va_end(va);
1299 if (RT_SUCCESS(rc))
1300 {
1301 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1302 rtLockValidatorLazyInit();
1303 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1304
1305 Assert(!hClass->fInTree);
1306 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1307 Assert(hClass->fInTree);
1308
1309 if (RT_SUCCESS(rcLock))
1310 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1311 return hClass;
1312 }
1313 }
1314 return hClass;
1315}
1316
1317
1318RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1319{
1320 RTLOCKVALCLASSINT *pClass = hClass;
1321 AssertPtrReturn(pClass, UINT32_MAX);
1322 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1323 return rtLockValidatorClassRetain(pClass);
1324}
1325
1326
1327RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1328{
1329 RTLOCKVALCLASSINT *pClass = hClass;
1330 if (pClass == NIL_RTLOCKVALCLASS)
1331 return 0;
1332 AssertPtrReturn(pClass, UINT32_MAX);
1333 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1334 return rtLockValidatorClassRelease(pClass);
1335}
1336
1337
1338/**
1339 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1340 * all the chunks for @a pPriorClass.
1341 *
1342 * @returns true / false.
1343 * @param pClass The class to search.
1344 * @param pPriorClass The class to search for.
1345 */
1346static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1347{
1348 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1349 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1350 {
1351 if (pChunk->aRefs[i].hClass == pPriorClass)
1352 {
1353 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1354 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1355 {
1356 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1357 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1358 }
1359
1360 /* update the hash table entry. */
1361 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1362 if ( !(*ppHashEntry)
1363 || (*ppHashEntry)->cLookups + 128 < cLookups)
1364 ASMAtomicWritePtr((void * volatile *)ppHashEntry, &pChunk->aRefs[i]);
1365
1366#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1367 ASMAtomicIncU32(&pClass->cHashMisses);
1368#endif
1369 return true;
1370 }
1371 }
1372
1373 return false;
1374}
1375
1376
1377/**
1378 * Checks if @a pPriorClass is a known prior class.
1379 *
1380 * @returns true / false.
1381 * @param pClass The class to search.
1382 * @param pPriorClass The class to search for.
1383 */
1384DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1385{
1386 /*
1387 * Hash lookup here.
1388 */
1389 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1390 if ( pRef
1391 && pRef->hClass == pPriorClass)
1392 {
1393 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1394 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1395 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1396#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1397 ASMAtomicIncU32(&pClass->cHashHits);
1398#endif
1399 return true;
1400 }
1401
1402 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1403}
1404
1405
1406/**
1407 * Adds a class to the prior list.
1408 *
1409 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1410 * @param pClass The class to work on.
1411 * @param pPriorClass The class to add.
1412 * @param fAutodidacticism Whether we're teaching ourselfs (true) or
1413 * somebody is teaching us via the API (false).
1414 * @param pSrcPos Where this rule was added (optional).
1415 */
1416static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1417 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1418{
1419 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1420 rtLockValidatorLazyInit();
1421 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1422
1423 /*
1424 * Check that there are no conflict (no assert since we might race each other).
1425 */
1426 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1427 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1428 {
1429 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1430 {
1431 /*
1432 * Scan the table for a free entry, allocating a new chunk if necessary.
1433 */
1434 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1435 {
1436 bool fDone = false;
1437 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1438 {
1439 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1440 if (fDone)
1441 {
1442 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1443 rtLockValidatorClassRetain(pPriorClass);
1444 rc = VINF_SUCCESS;
1445 break;
1446 }
1447 }
1448 if (fDone)
1449 break;
1450
1451 /* If no more chunks, allocate a new one and insert the class before linking it. */
1452 if (!pChunk->pNext)
1453 {
1454 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1455 if (!pNew)
1456 {
1457 rc = VERR_NO_MEMORY;
1458 break;
1459 }
1460 pNew->pNext = NULL;
1461 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1462 {
1463 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1464 pNew->aRefs[i].cLookups = 0;
1465 pNew->aRefs[i].fAutodidacticism = false;
1466 pNew->aRefs[i].afReserved[0] = false;
1467 pNew->aRefs[i].afReserved[1] = false;
1468 pNew->aRefs[i].afReserved[2] = false;
1469 }
1470
1471 pNew->aRefs[0].hClass = pPriorClass;
1472 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1473
1474 ASMAtomicWritePtr((void * volatile *)&pChunk->pNext, pNew);
1475 rtLockValidatorClassRetain(pPriorClass);
1476 rc = VINF_SUCCESS;
1477 break;
1478 }
1479 } /* chunk loop */
1480 }
1481 else
1482 rc = VINF_SUCCESS;
1483 }
1484 else
1485 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1486
1487 if (RT_SUCCESS(rcLock))
1488 RTCritSectLeave(&g_LockValClassTeachCS);
1489 return rc;
1490}
1491
1492
1493RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1494{
1495 RTLOCKVALCLASSINT *pClass = hClass;
1496 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1497 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1498
1499 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1500 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1501 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1502
1503 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1504}
1505
1506
1507RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1508{
1509 RTLOCKVALCLASSINT *pClass = hClass;
1510 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1511 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1512
1513 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1514 return VINF_SUCCESS;
1515}
1516
1517
1518/**
1519 * Unlinks all siblings.
1520 *
1521 * This is used during record deletion and assumes no races.
1522 *
1523 * @param pCore One of the siblings.
1524 */
1525static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1526{
1527 /* ASSUMES sibling destruction doesn't involve any races and that all
1528 related records are to be disposed off now. */
1529 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1530 while (pSibling)
1531 {
1532 PRTLOCKVALRECUNION volatile *ppCoreNext;
1533 switch (pSibling->Core.u32Magic)
1534 {
1535 case RTLOCKVALRECEXCL_MAGIC:
1536 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1537 ppCoreNext = &pSibling->Excl.pSibling;
1538 break;
1539
1540 case RTLOCKVALRECSHRD_MAGIC:
1541 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1542 ppCoreNext = &pSibling->Shared.pSibling;
1543 break;
1544
1545 default:
1546 AssertFailed();
1547 ppCoreNext = NULL;
1548 break;
1549 }
1550 if (RT_UNLIKELY(ppCoreNext))
1551 break;
1552 pSibling = (PRTLOCKVALRECUNION)ASMAtomicXchgPtr((void * volatile *)ppCoreNext, NULL);
1553 }
1554}
1555
1556
1557RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1558{
1559 /*
1560 * Validate input.
1561 */
1562 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1563 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1564
1565 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1566 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1567 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1568 , VERR_SEM_LV_INVALID_PARAMETER);
1569
1570 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1571 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1572 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1573 , VERR_SEM_LV_INVALID_PARAMETER);
1574
1575 /*
1576 * Link them (circular list).
1577 */
1578 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1579 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1580 {
1581 p1->Excl.pSibling = p2;
1582 p2->Shared.pSibling = p1;
1583 }
1584 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1585 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1586 {
1587 p1->Shared.pSibling = p2;
1588 p2->Excl.pSibling = p1;
1589 }
1590 else
1591 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1592
1593 return VINF_SUCCESS;
1594}
1595
1596
1597/**
1598 * Gets the lock name for the given record.
1599 *
1600 * @returns Read-only lock name.
1601 * @param pRec The lock record.
1602 */
1603DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1604{
1605 switch (pRec->Core.u32Magic)
1606 {
1607 case RTLOCKVALRECEXCL_MAGIC:
1608 return pRec->Excl.szName;
1609 case RTLOCKVALRECSHRD_MAGIC:
1610 return pRec->Shared.szName;
1611 case RTLOCKVALRECSHRDOWN_MAGIC:
1612 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1613 case RTLOCKVALRECNEST_MAGIC:
1614 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1615 if (VALID_PTR(pRec))
1616 {
1617 switch (pRec->Core.u32Magic)
1618 {
1619 case RTLOCKVALRECEXCL_MAGIC:
1620 return pRec->Excl.szName;
1621 case RTLOCKVALRECSHRD_MAGIC:
1622 return pRec->Shared.szName;
1623 case RTLOCKVALRECSHRDOWN_MAGIC:
1624 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1625 default:
1626 return "unknown-nested";
1627 }
1628 }
1629 return "orphaned-nested";
1630 default:
1631 return "unknown";
1632 }
1633}
1634
1635
1636/**
1637 * Gets the class for this locking record.
1638 *
1639 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1640 * @param pRec The lock validator record.
1641 */
1642DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1643{
1644 switch (pRec->Core.u32Magic)
1645 {
1646 case RTLOCKVALRECEXCL_MAGIC:
1647 return pRec->Excl.hClass;
1648
1649 case RTLOCKVALRECSHRD_MAGIC:
1650 return pRec->Shared.hClass;
1651
1652 case RTLOCKVALRECSHRDOWN_MAGIC:
1653 {
1654 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1655 if (RT_LIKELY( VALID_PTR(pSharedRec)
1656 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1657 return pSharedRec->hClass;
1658 return NIL_RTLOCKVALCLASS;
1659 }
1660
1661 case RTLOCKVALRECNEST_MAGIC:
1662 {
1663 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1664 if (VALID_PTR(pRealRec))
1665 {
1666 switch (pRealRec->Core.u32Magic)
1667 {
1668 case RTLOCKVALRECEXCL_MAGIC:
1669 return pRealRec->Excl.hClass;
1670
1671 case RTLOCKVALRECSHRDOWN_MAGIC:
1672 {
1673 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1674 if (RT_LIKELY( VALID_PTR(pSharedRec)
1675 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1676 return pSharedRec->hClass;
1677 break;
1678 }
1679
1680 default:
1681 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1682 break;
1683 }
1684 }
1685 return NIL_RTLOCKVALCLASS;
1686 }
1687
1688 default:
1689 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1690 return NIL_RTLOCKVALCLASS;
1691 }
1692}
1693
1694
1695/**
1696 * Gets the class for this locking record and the pointer to the one below it in
1697 * the stack.
1698 *
1699 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1700 * @param pRec The lock validator record.
1701 * @param puSubClass Where to return the sub-class.
1702 * @param ppDown Where to return the pointer to the record below.
1703 */
1704DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1705rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1706{
1707 switch (pRec->Core.u32Magic)
1708 {
1709 case RTLOCKVALRECEXCL_MAGIC:
1710 *ppDown = pRec->Excl.pDown;
1711 *puSubClass = pRec->Excl.uSubClass;
1712 return pRec->Excl.hClass;
1713
1714 case RTLOCKVALRECSHRD_MAGIC:
1715 *ppDown = NULL;
1716 *puSubClass = pRec->Shared.uSubClass;
1717 return pRec->Shared.hClass;
1718
1719 case RTLOCKVALRECSHRDOWN_MAGIC:
1720 {
1721 *ppDown = pRec->ShrdOwner.pDown;
1722
1723 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1724 if (RT_LIKELY( VALID_PTR(pSharedRec)
1725 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1726 {
1727 *puSubClass = pSharedRec->uSubClass;
1728 return pSharedRec->hClass;
1729 }
1730 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1731 return NIL_RTLOCKVALCLASS;
1732 }
1733
1734 case RTLOCKVALRECNEST_MAGIC:
1735 {
1736 *ppDown = pRec->Nest.pDown;
1737
1738 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1739 if (VALID_PTR(pRealRec))
1740 {
1741 switch (pRealRec->Core.u32Magic)
1742 {
1743 case RTLOCKVALRECEXCL_MAGIC:
1744 *puSubClass = pRealRec->Excl.uSubClass;
1745 return pRealRec->Excl.hClass;
1746
1747 case RTLOCKVALRECSHRDOWN_MAGIC:
1748 {
1749 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1750 if (RT_LIKELY( VALID_PTR(pSharedRec)
1751 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1752 {
1753 *puSubClass = pSharedRec->uSubClass;
1754 return pSharedRec->hClass;
1755 }
1756 break;
1757 }
1758
1759 default:
1760 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1761 break;
1762 }
1763 }
1764 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1765 return NIL_RTLOCKVALCLASS;
1766 }
1767
1768 default:
1769 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1770 *ppDown = NULL;
1771 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1772 return NIL_RTLOCKVALCLASS;
1773 }
1774}
1775
1776
1777/**
1778 * Gets the sub-class for a lock record.
1779 *
1780 * @returns the sub-class.
1781 * @param pRec The lock validator record.
1782 */
1783DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1784{
1785 switch (pRec->Core.u32Magic)
1786 {
1787 case RTLOCKVALRECEXCL_MAGIC:
1788 return pRec->Excl.uSubClass;
1789
1790 case RTLOCKVALRECSHRD_MAGIC:
1791 return pRec->Shared.uSubClass;
1792
1793 case RTLOCKVALRECSHRDOWN_MAGIC:
1794 {
1795 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1796 if (RT_LIKELY( VALID_PTR(pSharedRec)
1797 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1798 return pSharedRec->uSubClass;
1799 return RTLOCKVAL_SUB_CLASS_NONE;
1800 }
1801
1802 case RTLOCKVALRECNEST_MAGIC:
1803 {
1804 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1805 if (VALID_PTR(pRealRec))
1806 {
1807 switch (pRealRec->Core.u32Magic)
1808 {
1809 case RTLOCKVALRECEXCL_MAGIC:
1810 return pRec->Excl.uSubClass;
1811
1812 case RTLOCKVALRECSHRDOWN_MAGIC:
1813 {
1814 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1815 if (RT_LIKELY( VALID_PTR(pSharedRec)
1816 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1817 return pSharedRec->uSubClass;
1818 break;
1819 }
1820
1821 default:
1822 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1823 break;
1824 }
1825 }
1826 return RTLOCKVAL_SUB_CLASS_NONE;
1827 }
1828
1829 default:
1830 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1831 return RTLOCKVAL_SUB_CLASS_NONE;
1832 }
1833}
1834
1835
1836
1837
1838/**
1839 * Calculates the depth of a lock stack.
1840 *
1841 * @returns Number of stack frames.
1842 * @param pThread The thread.
1843 */
1844static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1845{
1846 uint32_t cEntries = 0;
1847 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1848 while (VALID_PTR(pCur))
1849 {
1850 switch (pCur->Core.u32Magic)
1851 {
1852 case RTLOCKVALRECEXCL_MAGIC:
1853 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1854 break;
1855
1856 case RTLOCKVALRECSHRDOWN_MAGIC:
1857 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1858 break;
1859
1860 case RTLOCKVALRECNEST_MAGIC:
1861 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1862 break;
1863
1864 default:
1865 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1866 }
1867 cEntries++;
1868 }
1869 return cEntries;
1870}
1871
1872
1873/**
1874 * Checks if the stack contains @a pRec.
1875 *
1876 * @returns true / false.
1877 * @param pThreadSelf The curren thread.
1878 * @param pRec The lock record.
1879 */
1880static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1881{
1882 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1883 while (pCur)
1884 {
1885 AssertPtrReturn(pCur, false);
1886 if (pCur == pRec)
1887 return true;
1888 switch (pCur->Core.u32Magic)
1889 {
1890 case RTLOCKVALRECEXCL_MAGIC:
1891 Assert(pCur->Excl.cRecursion >= 1);
1892 pCur = pCur->Excl.pDown;
1893 break;
1894
1895 case RTLOCKVALRECSHRDOWN_MAGIC:
1896 Assert(pCur->ShrdOwner.cRecursion >= 1);
1897 pCur = pCur->ShrdOwner.pDown;
1898 break;
1899
1900 case RTLOCKVALRECNEST_MAGIC:
1901 Assert(pCur->Nest.cRecursion > 1);
1902 pCur = pCur->Nest.pDown;
1903 break;
1904
1905 default:
1906 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1907 }
1908 }
1909 return false;
1910}
1911
1912
1913/**
1914 * Pushes a lock record onto the stack.
1915 *
1916 * @param pThreadSelf The current thread.
1917 * @param pRec The lock record.
1918 */
1919static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1920{
1921 Assert(pThreadSelf == RTThreadSelf());
1922 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1923
1924 switch (pRec->Core.u32Magic)
1925 {
1926 case RTLOCKVALRECEXCL_MAGIC:
1927 Assert(pRec->Excl.cRecursion == 1);
1928 Assert(pRec->Excl.pDown == NULL);
1929 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1930 break;
1931
1932 case RTLOCKVALRECSHRDOWN_MAGIC:
1933 Assert(pRec->ShrdOwner.cRecursion == 1);
1934 Assert(pRec->ShrdOwner.pDown == NULL);
1935 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1936 break;
1937
1938 default:
1939 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1940 }
1941 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1942}
1943
1944
1945/**
1946 * Pops a lock record off the stack.
1947 *
1948 * @param pThreadSelf The current thread.
1949 * @param pRec The lock.
1950 */
1951static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1952{
1953 Assert(pThreadSelf == RTThreadSelf());
1954
1955 PRTLOCKVALRECUNION pDown;
1956 switch (pRec->Core.u32Magic)
1957 {
1958 case RTLOCKVALRECEXCL_MAGIC:
1959 Assert(pRec->Excl.cRecursion == 0);
1960 pDown = pRec->Excl.pDown;
1961 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1962 break;
1963
1964 case RTLOCKVALRECSHRDOWN_MAGIC:
1965 Assert(pRec->ShrdOwner.cRecursion == 0);
1966 pDown = pRec->ShrdOwner.pDown;
1967 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1968 break;
1969
1970 default:
1971 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1972 }
1973 if (pThreadSelf->LockValidator.pStackTop == pRec)
1974 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1975 else
1976 {
1977 /* Find the pointer to our record and unlink ourselves. */
1978 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1979 while (pCur)
1980 {
1981 PRTLOCKVALRECUNION volatile *ppDown;
1982 switch (pCur->Core.u32Magic)
1983 {
1984 case RTLOCKVALRECEXCL_MAGIC:
1985 Assert(pCur->Excl.cRecursion >= 1);
1986 ppDown = &pCur->Excl.pDown;
1987 break;
1988
1989 case RTLOCKVALRECSHRDOWN_MAGIC:
1990 Assert(pCur->ShrdOwner.cRecursion >= 1);
1991 ppDown = &pCur->ShrdOwner.pDown;
1992 break;
1993
1994 case RTLOCKVALRECNEST_MAGIC:
1995 Assert(pCur->Nest.cRecursion >= 1);
1996 ppDown = &pCur->Nest.pDown;
1997 break;
1998
1999 default:
2000 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
2001 }
2002 pCur = *ppDown;
2003 if (pCur == pRec)
2004 {
2005 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
2006 return;
2007 }
2008 }
2009 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
2010 }
2011}
2012
2013
2014/**
2015 * Creates and pushes lock recursion record onto the stack.
2016 *
2017 * @param pThreadSelf The current thread.
2018 * @param pRec The lock record.
2019 * @param pSrcPos Where the recursion occured.
2020 */
2021static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2022{
2023 Assert(pThreadSelf == RTThreadSelf());
2024 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2025
2026#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2027 /*
2028 * Allocate a new recursion record
2029 */
2030 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2031 if (pRecursionRec)
2032 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2033 else
2034 {
2035 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2036 if (!pRecursionRec)
2037 return;
2038 }
2039
2040 /*
2041 * Initialize it.
2042 */
2043 switch (pRec->Core.u32Magic)
2044 {
2045 case RTLOCKVALRECEXCL_MAGIC:
2046 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2047 break;
2048
2049 case RTLOCKVALRECSHRDOWN_MAGIC:
2050 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2051 break;
2052
2053 default:
2054 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2055 rtLockValidatorSerializeDestructEnter();
2056 rtLockValidatorSerializeDestructLeave();
2057 RTMemFree(pRecursionRec);
2058 return;
2059 }
2060 Assert(pRecursionRec->cRecursion > 1);
2061 pRecursionRec->pRec = pRec;
2062 pRecursionRec->pDown = NULL;
2063 pRecursionRec->pNextFree = NULL;
2064 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2065 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2066
2067 /*
2068 * Link it.
2069 */
2070 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2071 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2072#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2073}
2074
2075
2076/**
2077 * Pops a lock recursion record off the stack.
2078 *
2079 * @param pThreadSelf The current thread.
2080 * @param pRec The lock record.
2081 */
2082static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2083{
2084 Assert(pThreadSelf == RTThreadSelf());
2085 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2086
2087 uint32_t cRecursion;
2088 switch (pRec->Core.u32Magic)
2089 {
2090 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2091 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2092 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2093 }
2094 Assert(cRecursion >= 1);
2095
2096#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2097 /*
2098 * Pop the recursion record.
2099 */
2100 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2101 if ( pNest != NULL
2102 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2103 && pNest->Nest.pRec == pRec
2104 )
2105 {
2106 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2107 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2108 }
2109 else
2110 {
2111 /* Find the record above ours. */
2112 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2113 for (;;)
2114 {
2115 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2116 switch (pNest->Core.u32Magic)
2117 {
2118 case RTLOCKVALRECEXCL_MAGIC:
2119 ppDown = &pNest->Excl.pDown;
2120 pNest = *ppDown;
2121 continue;
2122 case RTLOCKVALRECSHRDOWN_MAGIC:
2123 ppDown = &pNest->ShrdOwner.pDown;
2124 pNest = *ppDown;
2125 continue;
2126 case RTLOCKVALRECNEST_MAGIC:
2127 if (pNest->Nest.pRec == pRec)
2128 break;
2129 ppDown = &pNest->Nest.pDown;
2130 pNest = *ppDown;
2131 continue;
2132 default:
2133 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2134 }
2135 break; /* ugly */
2136 }
2137 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2138 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2139 }
2140
2141 /*
2142 * Invalidate and free the record.
2143 */
2144 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2145 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2146 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2147 pNest->Nest.cRecursion = 0;
2148 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2149 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2150#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2151}
2152
2153
2154/**
2155 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2156 * returns VERR_SEM_LV_WRONG_ORDER.
2157 */
2158static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2159 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2160 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2161
2162
2163{
2164 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2165 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2166 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2167 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2168 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2169 rtLockValComplainPanic();
2170 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2171}
2172
2173
2174/**
2175 * Checks if the sub-class order is ok or not.
2176 *
2177 * Used to deal with two locks from the same class.
2178 *
2179 * @returns true if ok, false if not.
2180 * @param uSubClass1 The sub-class of the lock that is being
2181 * considered.
2182 * @param uSubClass2 The sub-class of the lock that is already being
2183 * held.
2184 */
2185DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2186{
2187 if (uSubClass1 > uSubClass2)
2188 {
2189 /* NONE kills ANY. */
2190 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2191 return false;
2192 return true;
2193 }
2194
2195 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2196 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2197 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2198 return true;
2199 return false;
2200}
2201
2202
2203/**
2204 * Checks if the class and sub-class lock order is ok.
2205 *
2206 * @returns true if ok, false if not.
2207 * @param pClass1 The class of the lock that is being considered.
2208 * @param uSubClass1 The sub-class that goes with @a pClass1.
2209 * @param pClass2 The class of the lock that is already being
2210 * held.
2211 * @param uSubClass2 The sub-class that goes with @a pClass2.
2212 */
2213DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2214 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2215{
2216 if (pClass1 == pClass2)
2217 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2218 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2219}
2220
2221
2222/**
2223 * Checks the locking order, part two.
2224 *
2225 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2226 * @param pClass The lock class.
2227 * @param uSubClass The lock sub-class.
2228 * @param pThreadSelf The current thread.
2229 * @param pRec The lock record.
2230 * @param pSrcPos The source position of the locking operation.
2231 */
2232static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2233 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2234 PCRTLOCKVALSRCPOS const pSrcPos,
2235 RTLOCKVALCLASSINT * const pFirstBadClass,
2236 PRTLOCKVALRECUNION const pFirstBadRec,
2237 PRTLOCKVALRECUNION const pFirstBadDown)
2238{
2239 /*
2240 * Something went wrong, pCur is pointing to where.
2241 */
2242 if ( pClass == pFirstBadClass
2243 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2244 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2245 pRec, pFirstBadRec, pClass, pFirstBadClass);
2246 if (!pClass->fAutodidact)
2247 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2248 pRec, pFirstBadRec, pClass, pFirstBadClass);
2249
2250 /*
2251 * This class is an autodidact, so we have to check out the rest of the stack
2252 * for direct violations.
2253 */
2254 uint32_t cNewRules = 1;
2255 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2256 while (pCur)
2257 {
2258 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2259
2260 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2261 pCur = pCur->Nest.pDown;
2262 else
2263 {
2264 PRTLOCKVALRECUNION pDown;
2265 uint32_t uPriorSubClass;
2266 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2267 if (pPriorClass != NIL_RTLOCKVALCLASS)
2268 {
2269 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2270 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2271 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2272 {
2273 if ( pClass == pPriorClass
2274 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2275 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2276 pRec, pCur, pClass, pPriorClass);
2277 cNewRules++;
2278 }
2279 }
2280 pCur = pDown;
2281 }
2282 }
2283
2284 if (cNewRules == 1)
2285 {
2286 /*
2287 * Special case the simple operation, hoping that it will be a
2288 * frequent case.
2289 */
2290 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2291 if (rc == VERR_SEM_LV_WRONG_ORDER)
2292 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2293 pRec, pFirstBadRec, pClass, pFirstBadClass);
2294 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2295 }
2296 else
2297 {
2298 /*
2299 * We may be adding more than one rule, so we have to take the lock
2300 * before starting to add the rules. This means we have to check
2301 * the state after taking it since we might be racing someone adding
2302 * a conflicting rule.
2303 */
2304 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2305 rtLockValidatorLazyInit();
2306 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2307
2308 /* Check */
2309 pCur = pFirstBadRec;
2310 while (pCur)
2311 {
2312 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2313 pCur = pCur->Nest.pDown;
2314 else
2315 {
2316 uint32_t uPriorSubClass;
2317 PRTLOCKVALRECUNION pDown;
2318 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2319 if (pPriorClass != NIL_RTLOCKVALCLASS)
2320 {
2321 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2322 {
2323 if ( pClass == pPriorClass
2324 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2325 {
2326 if (RT_SUCCESS(rcLock))
2327 RTCritSectLeave(&g_LockValClassTeachCS);
2328 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2329 pRec, pCur, pClass, pPriorClass);
2330 }
2331 }
2332 }
2333 pCur = pDown;
2334 }
2335 }
2336
2337 /* Iterate the stack yet again, adding new rules this time. */
2338 pCur = pFirstBadRec;
2339 while (pCur)
2340 {
2341 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2342 pCur = pCur->Nest.pDown;
2343 else
2344 {
2345 uint32_t uPriorSubClass;
2346 PRTLOCKVALRECUNION pDown;
2347 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2348 if (pPriorClass != NIL_RTLOCKVALCLASS)
2349 {
2350 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2351 {
2352 Assert( pClass != pPriorClass
2353 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2354 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2355 if (RT_FAILURE(rc))
2356 {
2357 Assert(rc == VERR_NO_MEMORY);
2358 break;
2359 }
2360 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2361 }
2362 }
2363 pCur = pDown;
2364 }
2365 }
2366
2367 if (RT_SUCCESS(rcLock))
2368 RTCritSectLeave(&g_LockValClassTeachCS);
2369 }
2370
2371 return VINF_SUCCESS;
2372}
2373
2374
2375
2376/**
2377 * Checks the locking order.
2378 *
2379 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2380 * @param pClass The lock class.
2381 * @param uSubClass The lock sub-class.
2382 * @param pThreadSelf The current thread.
2383 * @param pRec The lock record.
2384 * @param pSrcPos The source position of the locking operation.
2385 */
2386static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2387 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2388 PCRTLOCKVALSRCPOS pSrcPos)
2389{
2390 /*
2391 * Some internal paranoia first.
2392 */
2393 AssertPtr(pClass);
2394 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2395 AssertPtr(pThreadSelf);
2396 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2397 AssertPtr(pRec);
2398 AssertPtrNull(pSrcPos);
2399
2400 /*
2401 * Walk the stack, delegate problems to a worker routine.
2402 */
2403 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2404 if (!pCur)
2405 return VINF_SUCCESS;
2406
2407 for (;;)
2408 {
2409 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2410
2411 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2412 pCur = pCur->Nest.pDown;
2413 else
2414 {
2415 uint32_t uPriorSubClass;
2416 PRTLOCKVALRECUNION pDown;
2417 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2418 if (pPriorClass != NIL_RTLOCKVALCLASS)
2419 {
2420 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2421 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2422 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2423 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2424 pPriorClass, pCur, pDown);
2425 }
2426 pCur = pDown;
2427 }
2428 if (!pCur)
2429 return VINF_SUCCESS;
2430 }
2431}
2432
2433
2434/**
2435 * Check that the lock record is the topmost one on the stack, complain and fail
2436 * if it isn't.
2437 *
2438 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2439 * VERR_SEM_LV_INVALID_PARAMETER.
2440 * @param pThreadSelf The current thread.
2441 * @param pRec The record.
2442 */
2443static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2444{
2445 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2446 Assert(pThreadSelf == RTThreadSelf());
2447
2448 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2449 if (RT_LIKELY( pTop == pRec
2450 || ( pTop
2451 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2452 && pTop->Nest.pRec == pRec) ))
2453 return VINF_SUCCESS;
2454
2455#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2456 /* Look for a recursion record so the right frame is dumped and marked. */
2457 while (pTop)
2458 {
2459 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2460 {
2461 if (pTop->Nest.pRec == pRec)
2462 {
2463 pRec = pTop;
2464 break;
2465 }
2466 pTop = pTop->Nest.pDown;
2467 }
2468 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2469 pTop = pTop->Excl.pDown;
2470 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2471 pTop = pTop->ShrdOwner.pDown;
2472 else
2473 break;
2474 }
2475#endif
2476
2477 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2478 rtLockValComplainPanic();
2479 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2480}
2481
2482
2483/**
2484 * Checks if all owners are blocked - shared record operated in signaller mode.
2485 *
2486 * @returns true / false accordingly.
2487 * @param pRec The record.
2488 * @param pThreadSelf The current thread.
2489 */
2490DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2491{
2492 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2493 uint32_t cAllocated = pRec->cAllocated;
2494 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2495 if (cEntries == 0)
2496 return false;
2497
2498 for (uint32_t i = 0; i < cAllocated; i++)
2499 {
2500 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2501 if ( pEntry
2502 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2503 {
2504 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2505 if (!pCurThread)
2506 return false;
2507 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2508 return false;
2509 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2510 && pCurThread != pThreadSelf)
2511 return false;
2512 if (--cEntries == 0)
2513 break;
2514 }
2515 else
2516 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2517 }
2518
2519 return true;
2520}
2521
2522
2523/**
2524 * Verifies the deadlock stack before calling it a deadlock.
2525 *
2526 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2527 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2528 * @retval VERR_TRY_AGAIN if something changed.
2529 *
2530 * @param pStack The deadlock detection stack.
2531 * @param pThreadSelf The current thread.
2532 */
2533static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2534{
2535 uint32_t const c = pStack->c;
2536 for (uint32_t iPass = 0; iPass < 3; iPass++)
2537 {
2538 for (uint32_t i = 1; i < c; i++)
2539 {
2540 PRTTHREADINT pThread = pStack->a[i].pThread;
2541 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2542 return VERR_TRY_AGAIN;
2543 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2544 return VERR_TRY_AGAIN;
2545 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2546 return VERR_TRY_AGAIN;
2547 /* ASSUMES the signaller records won't have siblings! */
2548 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2549 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2550 && pRec->Shared.fSignaller
2551 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2552 return VERR_TRY_AGAIN;
2553 }
2554 RTThreadYield();
2555 }
2556
2557 if (c == 1)
2558 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2559 return VERR_SEM_LV_DEADLOCK;
2560}
2561
2562
2563/**
2564 * Checks for stack cycles caused by another deadlock before returning.
2565 *
2566 * @retval VINF_SUCCESS if the stack is simply too small.
2567 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2568 *
2569 * @param pStack The deadlock detection stack.
2570 */
2571static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2572{
2573 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2574 {
2575 PRTTHREADINT pThread = pStack->a[i].pThread;
2576 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2577 if (pStack->a[j].pThread == pThread)
2578 return VERR_SEM_LV_EXISTING_DEADLOCK;
2579 }
2580 static bool volatile s_fComplained = false;
2581 if (!s_fComplained)
2582 {
2583 s_fComplained = true;
2584 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2585 }
2586 return VINF_SUCCESS;
2587}
2588
2589
2590/**
2591 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2592 * detection.
2593 *
2594 * @retval VINF_SUCCESS
2595 * @retval VERR_SEM_LV_DEADLOCK
2596 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2597 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2598 * @retval VERR_TRY_AGAIN
2599 *
2600 * @param pStack The stack to use.
2601 * @param pOriginalRec The original record.
2602 * @param pThreadSelf The calling thread.
2603 */
2604static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2605 PRTTHREADINT const pThreadSelf)
2606{
2607 pStack->c = 0;
2608
2609 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2610 compiler may make a better job of it when using individual variables. */
2611 PRTLOCKVALRECUNION pRec = pOriginalRec;
2612 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2613 uint32_t iEntry = UINT32_MAX;
2614 PRTTHREADINT pThread = NIL_RTTHREAD;
2615 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2616 for (uint32_t iLoop = 0; ; iLoop++)
2617 {
2618 /*
2619 * Process the current record.
2620 */
2621 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2622
2623 /* Find the next relevant owner thread and record. */
2624 PRTLOCKVALRECUNION pNextRec = NULL;
2625 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2626 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2627 switch (pRec->Core.u32Magic)
2628 {
2629 case RTLOCKVALRECEXCL_MAGIC:
2630 Assert(iEntry == UINT32_MAX);
2631 for (;;)
2632 {
2633 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2634 if ( !pNextThread
2635 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2636 break;
2637 enmNextState = rtThreadGetState(pNextThread);
2638 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2639 && pNextThread != pThreadSelf)
2640 break;
2641 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2642 if (RT_LIKELY( !pNextRec
2643 || enmNextState == rtThreadGetState(pNextThread)))
2644 break;
2645 pNextRec = NULL;
2646 }
2647 if (!pNextRec)
2648 {
2649 pRec = pRec->Excl.pSibling;
2650 if ( pRec
2651 && pRec != pFirstSibling)
2652 continue;
2653 pNextThread = NIL_RTTHREAD;
2654 }
2655 break;
2656
2657 case RTLOCKVALRECSHRD_MAGIC:
2658 if (!pRec->Shared.fSignaller)
2659 {
2660 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2661 /** @todo The read side of a read-write lock is problematic if
2662 * the implementation prioritizes writers over readers because
2663 * that means we should could deadlock against current readers
2664 * if a writer showed up. If the RW sem implementation is
2665 * wrapping some native API, it's not so easy to detect when we
2666 * should do this and when we shouldn't. Checking when we
2667 * shouldn't is subject to wakeup scheduling and cannot easily
2668 * be made reliable.
2669 *
2670 * At the moment we circumvent all this mess by declaring that
2671 * readers has priority. This is TRUE on linux, but probably
2672 * isn't on Solaris and FreeBSD. */
2673 if ( pRec == pFirstSibling
2674 && pRec->Shared.pSibling != NULL
2675 && pRec->Shared.pSibling != pFirstSibling)
2676 {
2677 pRec = pRec->Shared.pSibling;
2678 Assert(iEntry == UINT32_MAX);
2679 continue;
2680 }
2681 }
2682
2683 /* Scan the owner table for blocked owners. */
2684 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2685 && ( !pRec->Shared.fSignaller
2686 || iEntry != UINT32_MAX
2687 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2688 )
2689 )
2690 {
2691 uint32_t cAllocated = pRec->Shared.cAllocated;
2692 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2693 while (++iEntry < cAllocated)
2694 {
2695 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2696 if (pEntry)
2697 {
2698 for (;;)
2699 {
2700 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2701 break;
2702 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2703 if ( !pNextThread
2704 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2705 break;
2706 enmNextState = rtThreadGetState(pNextThread);
2707 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2708 && pNextThread != pThreadSelf)
2709 break;
2710 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2711 if (RT_LIKELY( !pNextRec
2712 || enmNextState == rtThreadGetState(pNextThread)))
2713 break;
2714 pNextRec = NULL;
2715 }
2716 if (pNextRec)
2717 break;
2718 }
2719 else
2720 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2721 }
2722 if (pNextRec)
2723 break;
2724 pNextThread = NIL_RTTHREAD;
2725 }
2726
2727 /* Advance to the next sibling, if any. */
2728 pRec = pRec->Shared.pSibling;
2729 if ( pRec != NULL
2730 && pRec != pFirstSibling)
2731 {
2732 iEntry = UINT32_MAX;
2733 continue;
2734 }
2735 break;
2736
2737 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2738 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2739 break;
2740
2741 case RTLOCKVALRECSHRDOWN_MAGIC:
2742 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2743 default:
2744 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2745 break;
2746 }
2747
2748 if (pNextRec)
2749 {
2750 /*
2751 * Recurse and check for deadlock.
2752 */
2753 uint32_t i = pStack->c;
2754 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2755 return rtLockValidatorDdHandleStackOverflow(pStack);
2756
2757 pStack->c++;
2758 pStack->a[i].pRec = pRec;
2759 pStack->a[i].iEntry = iEntry;
2760 pStack->a[i].enmState = enmState;
2761 pStack->a[i].pThread = pThread;
2762 pStack->a[i].pFirstSibling = pFirstSibling;
2763
2764 if (RT_UNLIKELY( pNextThread == pThreadSelf
2765 && ( i != 0
2766 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2767 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2768 )
2769 )
2770 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2771
2772 pRec = pNextRec;
2773 pFirstSibling = pNextRec;
2774 iEntry = UINT32_MAX;
2775 enmState = enmNextState;
2776 pThread = pNextThread;
2777 }
2778 else
2779 {
2780 /*
2781 * No deadlock here, unwind the stack and deal with any unfinished
2782 * business there.
2783 */
2784 uint32_t i = pStack->c;
2785 for (;;)
2786 {
2787 /* pop */
2788 if (i == 0)
2789 return VINF_SUCCESS;
2790 i--;
2791 pRec = pStack->a[i].pRec;
2792 iEntry = pStack->a[i].iEntry;
2793
2794 /* Examine it. */
2795 uint32_t u32Magic = pRec->Core.u32Magic;
2796 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2797 pRec = pRec->Excl.pSibling;
2798 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2799 {
2800 if (iEntry + 1 < pRec->Shared.cAllocated)
2801 break; /* continue processing this record. */
2802 pRec = pRec->Shared.pSibling;
2803 }
2804 else
2805 {
2806 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2807 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2808 continue;
2809 }
2810
2811 /* Any next record to advance to? */
2812 if ( !pRec
2813 || pRec == pStack->a[i].pFirstSibling)
2814 continue;
2815 iEntry = UINT32_MAX;
2816 break;
2817 }
2818
2819 /* Restore the rest of the state and update the stack. */
2820 pFirstSibling = pStack->a[i].pFirstSibling;
2821 enmState = pStack->a[i].enmState;
2822 pThread = pStack->a[i].pThread;
2823 pStack->c = i;
2824 }
2825
2826 Assert(iLoop != 1000000);
2827 }
2828}
2829
2830
2831/**
2832 * Check for the simple no-deadlock case.
2833 *
2834 * @returns true if no deadlock, false if further investigation is required.
2835 *
2836 * @param pOriginalRec The original record.
2837 */
2838DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2839{
2840 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2841 && !pOriginalRec->Excl.pSibling)
2842 {
2843 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2844 if ( !pThread
2845 || pThread->u32Magic != RTTHREADINT_MAGIC)
2846 return true;
2847 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2848 if (!RTTHREAD_IS_SLEEPING(enmState))
2849 return true;
2850 }
2851 return false;
2852}
2853
2854
2855/**
2856 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2857 *
2858 * @param pStack The chain of locks causing the deadlock.
2859 * @param pRec The record relating to the current thread's lock
2860 * operation.
2861 * @param pThreadSelf This thread.
2862 * @param pSrcPos Where we are going to deadlock.
2863 * @param rc The return code.
2864 */
2865static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2866 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2867{
2868 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2869 {
2870 const char *pszWhat;
2871 switch (rc)
2872 {
2873 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2874 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2875 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2876 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2877 }
2878 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2879 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2880 for (uint32_t i = 0; i < pStack->c; i++)
2881 {
2882 char szPrefix[24];
2883 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2884 PRTLOCKVALRECUNION pShrdOwner = NULL;
2885 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2886 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2887 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2888 {
2889 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2890 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2891 }
2892 else
2893 {
2894 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2895 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2896 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2897 }
2898 }
2899 rtLockValComplainMore("---- end of deadlock chain ----\n");
2900 }
2901
2902 rtLockValComplainPanic();
2903}
2904
2905
2906/**
2907 * Perform deadlock detection.
2908 *
2909 * @retval VINF_SUCCESS
2910 * @retval VERR_SEM_LV_DEADLOCK
2911 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2912 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2913 *
2914 * @param pRec The record relating to the current thread's lock
2915 * operation.
2916 * @param pThreadSelf The current thread.
2917 * @param pSrcPos The position of the current lock operation.
2918 */
2919static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2920{
2921 RTLOCKVALDDSTACK Stack;
2922 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2923 if (RT_SUCCESS(rc))
2924 return VINF_SUCCESS;
2925
2926 if (rc == VERR_TRY_AGAIN)
2927 {
2928 for (uint32_t iLoop = 0; ; iLoop++)
2929 {
2930 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2931 if (RT_SUCCESS_NP(rc))
2932 return VINF_SUCCESS;
2933 if (rc != VERR_TRY_AGAIN)
2934 break;
2935 RTThreadYield();
2936 if (iLoop >= 3)
2937 return VINF_SUCCESS;
2938 }
2939 }
2940
2941 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2942 return rc;
2943}
2944
2945
2946RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2947 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2948{
2949 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2950 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2951 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2952 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2953 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2954
2955 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2956 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2957 pRec->afReserved[0] = 0;
2958 pRec->afReserved[1] = 0;
2959 pRec->afReserved[2] = 0;
2960 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2961 pRec->hThread = NIL_RTTHREAD;
2962 pRec->pDown = NULL;
2963 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2964 pRec->uSubClass = uSubClass;
2965 pRec->cRecursion = 0;
2966 pRec->hLock = hLock;
2967 pRec->pSibling = NULL;
2968 if (pszNameFmt)
2969 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2970 else
2971 {
2972 static uint32_t volatile s_cAnonymous = 0;
2973 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2974 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2975 }
2976
2977 /* Lazy initialization. */
2978 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2979 rtLockValidatorLazyInit();
2980}
2981
2982
2983RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2984 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2985{
2986 va_list va;
2987 va_start(va, pszNameFmt);
2988 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2989 va_end(va);
2990}
2991
2992
2993RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2994 uint32_t uSubClass, void *pvLock, bool fEnabled,
2995 const char *pszNameFmt, va_list va)
2996{
2997 PRTLOCKVALRECEXCL pRec;
2998 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2999 if (!pRec)
3000 return VERR_NO_MEMORY;
3001 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
3002 return VINF_SUCCESS;
3003}
3004
3005
3006RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
3007 uint32_t uSubClass, void *pvLock, bool fEnabled,
3008 const char *pszNameFmt, ...)
3009{
3010 va_list va;
3011 va_start(va, pszNameFmt);
3012 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
3013 va_end(va);
3014 return rc;
3015}
3016
3017
3018RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
3019{
3020 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3021
3022 rtLockValidatorSerializeDestructEnter();
3023
3024 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3025 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3026 RTLOCKVALCLASS hClass;
3027 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3028 if (pRec->pSibling)
3029 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3030 rtLockValidatorSerializeDestructLeave();
3031 if (hClass != NIL_RTLOCKVALCLASS)
3032 RTLockValidatorClassRelease(hClass);
3033}
3034
3035
3036RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3037{
3038 PRTLOCKVALRECEXCL pRec = *ppRec;
3039 *ppRec = NULL;
3040 if (pRec)
3041 {
3042 RTLockValidatorRecExclDelete(pRec);
3043 RTMemFree(pRec);
3044 }
3045}
3046
3047
3048RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3049{
3050 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3051 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3052 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3053 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3054 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3055 RTLOCKVAL_SUB_CLASS_INVALID);
3056 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3057}
3058
3059
3060RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3061 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3062{
3063 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3064 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3065 if (!pRecU->Excl.fEnabled)
3066 return;
3067 if (hThreadSelf == NIL_RTTHREAD)
3068 {
3069 hThreadSelf = RTThreadSelfAutoAdopt();
3070 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3071 }
3072 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3073 Assert(hThreadSelf == RTThreadSelf());
3074
3075 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3076
3077 if (pRecU->Excl.hThread == hThreadSelf)
3078 {
3079 Assert(!fFirstRecursion);
3080 pRecU->Excl.cRecursion++;
3081 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3082 }
3083 else
3084 {
3085 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3086
3087 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3088 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3089 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3090
3091 rtLockValidatorStackPush(hThreadSelf, pRecU);
3092 }
3093}
3094
3095
3096/**
3097 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3098 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3099 */
3100static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3101{
3102 RTTHREADINT *pThread = pRec->Excl.hThread;
3103 AssertReturnVoid(pThread != NIL_RTTHREAD);
3104 Assert(pThread == RTThreadSelf());
3105
3106 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3107 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3108 if (c == 0)
3109 {
3110 rtLockValidatorStackPop(pThread, pRec);
3111 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3112 }
3113 else
3114 {
3115 Assert(c < UINT32_C(0xffff0000));
3116 Assert(!fFinalRecursion);
3117 rtLockValidatorStackPopRecursion(pThread, pRec);
3118 }
3119}
3120
3121RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3122{
3123 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3124 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3125 if (!pRecU->Excl.fEnabled)
3126 return VINF_SUCCESS;
3127
3128 /*
3129 * Check the release order.
3130 */
3131 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3132 && pRecU->Excl.hClass->fStrictReleaseOrder
3133 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3134 )
3135 {
3136 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3137 if (RT_FAILURE(rc))
3138 return rc;
3139 }
3140
3141 /*
3142 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3143 */
3144 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3145 return VINF_SUCCESS;
3146}
3147
3148
3149RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3150{
3151 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3152 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3153 if (pRecU->Excl.fEnabled)
3154 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3155}
3156
3157
3158RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3159{
3160 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3161 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3162 if (!pRecU->Excl.fEnabled)
3163 return VINF_SUCCESS;
3164 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3165 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3166
3167 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3168 && !pRecU->Excl.hClass->fRecursionOk)
3169 {
3170 rtLockValComplainFirst("Recursion not allowed by the class!",
3171 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3172 rtLockValComplainPanic();
3173 return VERR_SEM_LV_NESTED;
3174 }
3175
3176 Assert(pRecU->Excl.cRecursion < _1M);
3177 pRecU->Excl.cRecursion++;
3178 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3179 return VINF_SUCCESS;
3180}
3181
3182
3183RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3184{
3185 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3186 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3187 if (!pRecU->Excl.fEnabled)
3188 return VINF_SUCCESS;
3189 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3190 Assert(pRecU->Excl.hThread == RTThreadSelf());
3191 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3192
3193 /*
3194 * Check the release order.
3195 */
3196 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3197 && pRecU->Excl.hClass->fStrictReleaseOrder
3198 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3199 )
3200 {
3201 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3202 if (RT_FAILURE(rc))
3203 return rc;
3204 }
3205
3206 /*
3207 * Perform the unwind.
3208 */
3209 pRecU->Excl.cRecursion--;
3210 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3211 return VINF_SUCCESS;
3212}
3213
3214
3215RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3216{
3217 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3218 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3219 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3220 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3221 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3222 , VERR_SEM_LV_INVALID_PARAMETER);
3223 if (!pRecU->Excl.fEnabled)
3224 return VINF_SUCCESS;
3225 Assert(pRecU->Excl.hThread == RTThreadSelf());
3226 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3227 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3228
3229 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3230 && !pRecU->Excl.hClass->fRecursionOk)
3231 {
3232 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3233 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3234 rtLockValComplainPanic();
3235 return VERR_SEM_LV_NESTED;
3236 }
3237
3238 Assert(pRecU->Excl.cRecursion < _1M);
3239 pRecU->Excl.cRecursion++;
3240 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3241
3242 return VINF_SUCCESS;
3243}
3244
3245
3246RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3247{
3248 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3249 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3250 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3251 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3252 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3253 , VERR_SEM_LV_INVALID_PARAMETER);
3254 if (!pRecU->Excl.fEnabled)
3255 return VINF_SUCCESS;
3256 Assert(pRecU->Excl.hThread == RTThreadSelf());
3257 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3258 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3259
3260 /*
3261 * Check the release order.
3262 */
3263 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3264 && pRecU->Excl.hClass->fStrictReleaseOrder
3265 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3266 )
3267 {
3268 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3269 if (RT_FAILURE(rc))
3270 return rc;
3271 }
3272
3273 /*
3274 * Perform the unwind.
3275 */
3276 pRecU->Excl.cRecursion--;
3277 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3278 return VINF_SUCCESS;
3279}
3280
3281
3282RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3283 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3284{
3285 /*
3286 * Validate and adjust input. Quit early if order validation is disabled.
3287 */
3288 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3289 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3290 if ( !pRecU->Excl.fEnabled
3291 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3292 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3293 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3294 return VINF_SUCCESS;
3295
3296 if (hThreadSelf == NIL_RTTHREAD)
3297 {
3298 hThreadSelf = RTThreadSelfAutoAdopt();
3299 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3300 }
3301 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3302 Assert(hThreadSelf == RTThreadSelf());
3303
3304 /*
3305 * Detect recursion as it isn't subject to order restrictions.
3306 */
3307 if (pRec->hThread == hThreadSelf)
3308 return VINF_SUCCESS;
3309
3310 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3311}
3312
3313
3314RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3315 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3316 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3317{
3318 /*
3319 * Fend off wild life.
3320 */
3321 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3322 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3323 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3324 if (!pRec->fEnabled)
3325 return VINF_SUCCESS;
3326
3327 PRTTHREADINT pThreadSelf = hThreadSelf;
3328 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3329 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3330 Assert(pThreadSelf == RTThreadSelf());
3331
3332 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3333
3334 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3335 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3336 {
3337 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3338 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3339 , VERR_SEM_LV_INVALID_PARAMETER);
3340 enmSleepState = enmThreadState;
3341 }
3342
3343 /*
3344 * Record the location.
3345 */
3346 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3347 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3348 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3349 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3350 rtThreadSetState(pThreadSelf, enmSleepState);
3351
3352 /*
3353 * Don't do deadlock detection if we're recursing.
3354 *
3355 * On some hosts we don't do recursion accounting our selves and there
3356 * isn't any other place to check for this.
3357 */
3358 int rc = VINF_SUCCESS;
3359 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3360 {
3361 if ( !fRecursiveOk
3362 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3363 && !pRecU->Excl.hClass->fRecursionOk))
3364 {
3365 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3366 rtLockValComplainPanic();
3367 rc = VERR_SEM_LV_NESTED;
3368 }
3369 }
3370 /*
3371 * Perform deadlock detection.
3372 */
3373 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3374 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3375 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3376 rc = VINF_SUCCESS;
3377 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3378 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3379
3380 if (RT_SUCCESS(rc))
3381 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3382 else
3383 {
3384 rtThreadSetState(pThreadSelf, enmThreadState);
3385 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3386 }
3387 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3388 return rc;
3389}
3390RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3391
3392
3393RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3394 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3395 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3396{
3397 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3398 if (RT_SUCCESS(rc))
3399 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3400 enmSleepState, fReallySleeping);
3401 return rc;
3402}
3403RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3404
3405
3406RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3407 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3408{
3409 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3410 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3411 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3412 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3413 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3414
3415 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3416 pRec->uSubClass = uSubClass;
3417 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3418 pRec->hLock = hLock;
3419 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3420 pRec->fSignaller = fSignaller;
3421 pRec->pSibling = NULL;
3422
3423 /* the table */
3424 pRec->cEntries = 0;
3425 pRec->iLastEntry = 0;
3426 pRec->cAllocated = 0;
3427 pRec->fReallocating = false;
3428 pRec->fPadding = false;
3429 pRec->papOwners = NULL;
3430
3431 /* the name */
3432 if (pszNameFmt)
3433 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3434 else
3435 {
3436 static uint32_t volatile s_cAnonymous = 0;
3437 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3438 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3439 }
3440}
3441
3442
3443RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3444 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3445{
3446 va_list va;
3447 va_start(va, pszNameFmt);
3448 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3449 va_end(va);
3450}
3451
3452
3453RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3454{
3455 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3456
3457 /*
3458 * Flip it into table realloc mode and take the destruction lock.
3459 */
3460 rtLockValidatorSerializeDestructEnter();
3461 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3462 {
3463 rtLockValidatorSerializeDestructLeave();
3464
3465 rtLockValidatorSerializeDetectionEnter();
3466 rtLockValidatorSerializeDetectionLeave();
3467
3468 rtLockValidatorSerializeDestructEnter();
3469 }
3470
3471 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3472 RTLOCKVALCLASS hClass;
3473 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3474 if (pRec->papOwners)
3475 {
3476 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3477 ASMAtomicUoWritePtr((void * volatile *)&pRec->papOwners, NULL);
3478 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3479
3480 RTMemFree((void *)pRec->papOwners);
3481 }
3482 if (pRec->pSibling)
3483 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3484 ASMAtomicWriteBool(&pRec->fReallocating, false);
3485
3486 rtLockValidatorSerializeDestructLeave();
3487
3488 if (hClass != NIL_RTLOCKVALCLASS)
3489 RTLockValidatorClassRelease(hClass);
3490}
3491
3492
3493RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3494{
3495 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3496 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3497 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3498 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3499 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3500 RTLOCKVAL_SUB_CLASS_INVALID);
3501 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3502}
3503
3504
3505/**
3506 * Locates an owner (thread) in a shared lock record.
3507 *
3508 * @returns Pointer to the owner entry on success, NULL on failure..
3509 * @param pShared The shared lock record.
3510 * @param hThread The thread (owner) to find.
3511 * @param piEntry Where to optionally return the table in index.
3512 * Optional.
3513 */
3514DECLINLINE(PRTLOCKVALRECUNION)
3515rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3516{
3517 rtLockValidatorSerializeDetectionEnter();
3518
3519 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3520 if (papOwners)
3521 {
3522 uint32_t const cMax = pShared->cAllocated;
3523 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3524 {
3525 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3526 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3527 {
3528 rtLockValidatorSerializeDetectionLeave();
3529 if (piEntry)
3530 *piEntry = iEntry;
3531 return pEntry;
3532 }
3533 }
3534 }
3535
3536 rtLockValidatorSerializeDetectionLeave();
3537 return NULL;
3538}
3539
3540
3541RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3542 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3543{
3544 /*
3545 * Validate and adjust input. Quit early if order validation is disabled.
3546 */
3547 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3548 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3549 if ( !pRecU->Shared.fEnabled
3550 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3551 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3552 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3553 )
3554 return VINF_SUCCESS;
3555
3556 if (hThreadSelf == NIL_RTTHREAD)
3557 {
3558 hThreadSelf = RTThreadSelfAutoAdopt();
3559 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3560 }
3561 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3562 Assert(hThreadSelf == RTThreadSelf());
3563
3564 /*
3565 * Detect recursion as it isn't subject to order restrictions.
3566 */
3567 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3568 if (pEntry)
3569 return VINF_SUCCESS;
3570
3571 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3572}
3573
3574
3575RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3576 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3577 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3578{
3579 /*
3580 * Fend off wild life.
3581 */
3582 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3583 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3584 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3585 if (!pRecU->Shared.fEnabled)
3586 return VINF_SUCCESS;
3587
3588 PRTTHREADINT pThreadSelf = hThreadSelf;
3589 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3590 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3591 Assert(pThreadSelf == RTThreadSelf());
3592
3593 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3594
3595 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3596 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3597 {
3598 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3599 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3600 , VERR_SEM_LV_INVALID_PARAMETER);
3601 enmSleepState = enmThreadState;
3602 }
3603
3604 /*
3605 * Record the location.
3606 */
3607 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3608 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3609 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3610 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3611 rtThreadSetState(pThreadSelf, enmSleepState);
3612
3613 /*
3614 * Don't do deadlock detection if we're recursing.
3615 */
3616 int rc = VINF_SUCCESS;
3617 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3618 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3619 : NULL;
3620 if (pEntry)
3621 {
3622 if ( !fRecursiveOk
3623 || ( pRec->hClass
3624 && !pRec->hClass->fRecursionOk)
3625 )
3626 {
3627 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3628 rtLockValComplainPanic();
3629 rc = VERR_SEM_LV_NESTED;
3630 }
3631 }
3632 /*
3633 * Perform deadlock detection.
3634 */
3635 else if ( pRec->hClass
3636 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3637 || pRec->hClass->cMsMinDeadlock > cMillies))
3638 rc = VINF_SUCCESS;
3639 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3640 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3641
3642 if (RT_SUCCESS(rc))
3643 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3644 else
3645 {
3646 rtThreadSetState(pThreadSelf, enmThreadState);
3647 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3648 }
3649 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3650 return rc;
3651}
3652RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3653
3654
3655RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3656 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3657 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3658{
3659 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3660 if (RT_SUCCESS(rc))
3661 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3662 enmSleepState, fReallySleeping);
3663 return rc;
3664}
3665RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3666
3667
3668/**
3669 * Allocates and initializes an owner entry for the shared lock record.
3670 *
3671 * @returns The new owner entry.
3672 * @param pRec The shared lock record.
3673 * @param pThreadSelf The calling thread and owner. Used for record
3674 * initialization and allocation.
3675 * @param pSrcPos The source position.
3676 */
3677DECLINLINE(PRTLOCKVALRECUNION)
3678rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3679{
3680 PRTLOCKVALRECUNION pEntry;
3681
3682 /*
3683 * Check if the thread has any statically allocated records we can easily
3684 * make use of.
3685 */
3686 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3687 if ( iEntry > 0
3688 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3689 {
3690 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3691 Assert(!pEntry->ShrdOwner.fReserved);
3692 pEntry->ShrdOwner.fStaticAlloc = true;
3693 rtThreadGet(pThreadSelf);
3694 }
3695 else
3696 {
3697 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3698 if (RT_UNLIKELY(!pEntry))
3699 return NULL;
3700 pEntry->ShrdOwner.fStaticAlloc = false;
3701 }
3702
3703 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3704 pEntry->ShrdOwner.cRecursion = 1;
3705 pEntry->ShrdOwner.fReserved = true;
3706 pEntry->ShrdOwner.hThread = pThreadSelf;
3707 pEntry->ShrdOwner.pDown = NULL;
3708 pEntry->ShrdOwner.pSharedRec = pRec;
3709#if HC_ARCH_BITS == 32
3710 pEntry->ShrdOwner.pvReserved = NULL;
3711#endif
3712 if (pSrcPos)
3713 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3714 else
3715 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3716 return pEntry;
3717}
3718
3719
3720/**
3721 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3722 *
3723 * @param pEntry The owner entry.
3724 */
3725DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3726{
3727 if (pEntry)
3728 {
3729 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3730 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3731
3732 PRTTHREADINT pThread;
3733 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3734
3735 Assert(pEntry->fReserved);
3736 pEntry->fReserved = false;
3737
3738 if (pEntry->fStaticAlloc)
3739 {
3740 AssertPtrReturnVoid(pThread);
3741 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3742
3743 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3744 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3745
3746 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, iEntry));
3747 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, iEntry);
3748
3749 rtThreadRelease(pThread);
3750 }
3751 else
3752 {
3753 rtLockValidatorSerializeDestructEnter();
3754 rtLockValidatorSerializeDestructLeave();
3755
3756 RTMemFree(pEntry);
3757 }
3758 }
3759}
3760
3761
3762/**
3763 * Make more room in the table.
3764 *
3765 * @retval true on success
3766 * @retval false if we're out of memory or running into a bad race condition
3767 * (probably a bug somewhere). No longer holding the lock.
3768 *
3769 * @param pShared The shared lock record.
3770 */
3771static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3772{
3773 for (unsigned i = 0; i < 1000; i++)
3774 {
3775 /*
3776 * Switch to the other data access direction.
3777 */
3778 rtLockValidatorSerializeDetectionLeave();
3779 if (i >= 10)
3780 {
3781 Assert(i != 10 && i != 100);
3782 RTThreadSleep(i >= 100);
3783 }
3784 rtLockValidatorSerializeDestructEnter();
3785
3786 /*
3787 * Try grab the privilege to reallocating the table.
3788 */
3789 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3790 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3791 {
3792 uint32_t cAllocated = pShared->cAllocated;
3793 if (cAllocated < pShared->cEntries)
3794 {
3795 /*
3796 * Ok, still not enough space. Reallocate the table.
3797 */
3798#if 0 /** @todo enable this after making sure growing works flawlessly. */
3799 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3800#else
3801 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3802#endif
3803 PRTLOCKVALRECSHRDOWN *papOwners;
3804 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3805 (cAllocated + cInc) * sizeof(void *));
3806 if (!papOwners)
3807 {
3808 ASMAtomicWriteBool(&pShared->fReallocating, false);
3809 rtLockValidatorSerializeDestructLeave();
3810 /* RTMemRealloc will assert */
3811 return false;
3812 }
3813
3814 while (cInc-- > 0)
3815 {
3816 papOwners[cAllocated] = NULL;
3817 cAllocated++;
3818 }
3819
3820 ASMAtomicWritePtr((void * volatile *)&pShared->papOwners, papOwners);
3821 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3822 }
3823 ASMAtomicWriteBool(&pShared->fReallocating, false);
3824 }
3825 rtLockValidatorSerializeDestructLeave();
3826
3827 rtLockValidatorSerializeDetectionEnter();
3828 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3829 break;
3830
3831 if (pShared->cAllocated >= pShared->cEntries)
3832 return true;
3833 }
3834
3835 rtLockValidatorSerializeDetectionLeave();
3836 AssertFailed(); /* too many iterations or destroyed while racing. */
3837 return false;
3838}
3839
3840
3841/**
3842 * Adds an owner entry to a shared lock record.
3843 *
3844 * @returns true on success, false on serious race or we're if out of memory.
3845 * @param pShared The shared lock record.
3846 * @param pEntry The owner entry.
3847 */
3848DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3849{
3850 rtLockValidatorSerializeDetectionEnter();
3851 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3852 {
3853 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3854 && !rtLockValidatorRecSharedMakeRoom(pShared))
3855 return false; /* the worker leave the lock */
3856
3857 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3858 uint32_t const cMax = pShared->cAllocated;
3859 for (unsigned i = 0; i < 100; i++)
3860 {
3861 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3862 {
3863 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], pEntry, NULL))
3864 {
3865 rtLockValidatorSerializeDetectionLeave();
3866 return true;
3867 }
3868 }
3869 Assert(i != 25);
3870 }
3871 AssertFailed();
3872 }
3873 rtLockValidatorSerializeDetectionLeave();
3874 return false;
3875}
3876
3877
3878/**
3879 * Remove an owner entry from a shared lock record and free it.
3880 *
3881 * @param pShared The shared lock record.
3882 * @param pEntry The owner entry to remove.
3883 * @param iEntry The last known index.
3884 */
3885DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3886 uint32_t iEntry)
3887{
3888 /*
3889 * Remove it from the table.
3890 */
3891 rtLockValidatorSerializeDetectionEnter();
3892 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3893 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3894 || !ASMAtomicCmpXchgPtr((void * volatile *)&pShared->papOwners[iEntry], NULL, pEntry)))
3895 {
3896 /* this shouldn't happen yet... */
3897 AssertFailed();
3898 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3899 uint32_t const cMax = pShared->cAllocated;
3900 for (iEntry = 0; iEntry < cMax; iEntry++)
3901 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], NULL, pEntry))
3902 break;
3903 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3904 }
3905 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3906 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3907 rtLockValidatorSerializeDetectionLeave();
3908
3909 /*
3910 * Successfully removed, now free it.
3911 */
3912 rtLockValidatorRecSharedFreeOwner(pEntry);
3913}
3914
3915
3916RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3917{
3918 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3919 if (!pRec->fEnabled)
3920 return;
3921 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3922 AssertReturnVoid(pRec->fSignaller);
3923
3924 /*
3925 * Free all current owners.
3926 */
3927 rtLockValidatorSerializeDetectionEnter();
3928 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3929 {
3930 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3931 uint32_t iEntry = 0;
3932 uint32_t cEntries = pRec->cAllocated;
3933 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3934 while (iEntry < cEntries)
3935 {
3936 PRTLOCKVALRECSHRDOWN pEntry = (PRTLOCKVALRECSHRDOWN)ASMAtomicXchgPtr((void * volatile *)&papEntries[iEntry], NULL);
3937 if (pEntry)
3938 {
3939 ASMAtomicDecU32(&pRec->cEntries);
3940 rtLockValidatorSerializeDetectionLeave();
3941
3942 rtLockValidatorRecSharedFreeOwner(pEntry);
3943
3944 rtLockValidatorSerializeDetectionEnter();
3945 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3946 break;
3947 cEntries = pRec->cAllocated;
3948 papEntries = pRec->papOwners;
3949 }
3950 iEntry++;
3951 }
3952 }
3953 rtLockValidatorSerializeDetectionLeave();
3954
3955 if (hThread != NIL_RTTHREAD)
3956 {
3957 /*
3958 * Allocate a new owner entry and insert it into the table.
3959 */
3960 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3961 if ( pEntry
3962 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3963 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3964 }
3965}
3966RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
3967
3968
3969RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3970{
3971 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3972 if (!pRec->fEnabled)
3973 return;
3974 if (hThread == NIL_RTTHREAD)
3975 {
3976 hThread = RTThreadSelfAutoAdopt();
3977 AssertReturnVoid(hThread != NIL_RTTHREAD);
3978 }
3979 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
3980
3981 /*
3982 * Recursive?
3983 *
3984 * Note! This code can be optimized to try avoid scanning the table on
3985 * insert. However, that's annoying work that makes the code big,
3986 * so it can wait til later sometime.
3987 */
3988 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
3989 if (pEntry)
3990 {
3991 Assert(!pRec->fSignaller);
3992 pEntry->ShrdOwner.cRecursion++;
3993 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
3994 return;
3995 }
3996
3997 /*
3998 * Allocate a new owner entry and insert it into the table.
3999 */
4000 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4001 if (pEntry)
4002 {
4003 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4004 {
4005 if (!pRec->fSignaller)
4006 rtLockValidatorStackPush(hThread, pEntry);
4007 }
4008 else
4009 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4010 }
4011}
4012RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4013
4014
4015RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4016{
4017 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4018 if (!pRec->fEnabled)
4019 return;
4020 if (hThread == NIL_RTTHREAD)
4021 {
4022 hThread = RTThreadSelfAutoAdopt();
4023 AssertReturnVoid(hThread != NIL_RTTHREAD);
4024 }
4025 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4026
4027 /*
4028 * Find the entry hope it's a recursive one.
4029 */
4030 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4031 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4032 AssertReturnVoid(pEntry);
4033 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4034
4035 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4036 if (c == 0)
4037 {
4038 if (!pRec->fSignaller)
4039 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4040 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4041 }
4042 else
4043 {
4044 Assert(!pRec->fSignaller);
4045 rtLockValidatorStackPopRecursion(hThread, pEntry);
4046 }
4047}
4048RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4049
4050
4051RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4052{
4053 /* Validate and resolve input. */
4054 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4055 if (!pRec->fEnabled)
4056 return false;
4057 if (hThread == NIL_RTTHREAD)
4058 {
4059 hThread = RTThreadSelfAutoAdopt();
4060 AssertReturn(hThread != NIL_RTTHREAD, false);
4061 }
4062 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4063
4064 /* Do the job. */
4065 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4066 return pEntry != NULL;
4067}
4068RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4069
4070
4071RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4072{
4073 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4074 if (!pRec->fEnabled)
4075 return VINF_SUCCESS;
4076 if (hThreadSelf == NIL_RTTHREAD)
4077 {
4078 hThreadSelf = RTThreadSelfAutoAdopt();
4079 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4080 }
4081 Assert(hThreadSelf == RTThreadSelf());
4082 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4083
4084 /*
4085 * Locate the entry for this thread in the table.
4086 */
4087 uint32_t iEntry = 0;
4088 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4089 if (RT_UNLIKELY(!pEntry))
4090 {
4091 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4092 rtLockValComplainPanic();
4093 return VERR_SEM_LV_NOT_OWNER;
4094 }
4095
4096 /*
4097 * Check the release order.
4098 */
4099 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4100 && pRec->hClass->fStrictReleaseOrder
4101 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4102 )
4103 {
4104 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4105 if (RT_FAILURE(rc))
4106 return rc;
4107 }
4108
4109 /*
4110 * Release the ownership or unwind a level of recursion.
4111 */
4112 Assert(pEntry->ShrdOwner.cRecursion > 0);
4113 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4114 if (c == 0)
4115 {
4116 rtLockValidatorStackPop(hThreadSelf, pEntry);
4117 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4118 }
4119 else
4120 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4121
4122 return VINF_SUCCESS;
4123}
4124
4125
4126RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4127{
4128 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4129 if (!pRec->fEnabled)
4130 return VINF_SUCCESS;
4131 if (hThreadSelf == NIL_RTTHREAD)
4132 {
4133 hThreadSelf = RTThreadSelfAutoAdopt();
4134 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4135 }
4136 Assert(hThreadSelf == RTThreadSelf());
4137 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4138
4139 /*
4140 * Locate the entry for this thread in the table.
4141 */
4142 uint32_t iEntry = 0;
4143 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4144 if (RT_UNLIKELY(!pEntry))
4145 {
4146 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4147 rtLockValComplainPanic();
4148 return VERR_SEM_LV_NOT_SIGNALLER;
4149 }
4150 return VINF_SUCCESS;
4151}
4152
4153
4154RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4155{
4156 if (Thread == NIL_RTTHREAD)
4157 return 0;
4158
4159 PRTTHREADINT pThread = rtThreadGet(Thread);
4160 if (!pThread)
4161 return VERR_INVALID_HANDLE;
4162 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4163 rtThreadRelease(pThread);
4164 return cWriteLocks;
4165}
4166RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4167
4168
4169RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4170{
4171 PRTTHREADINT pThread = rtThreadGet(Thread);
4172 AssertReturnVoid(pThread);
4173 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4174 rtThreadRelease(pThread);
4175}
4176RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4177
4178
4179RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4180{
4181 PRTTHREADINT pThread = rtThreadGet(Thread);
4182 AssertReturnVoid(pThread);
4183 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4184 rtThreadRelease(pThread);
4185}
4186RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4187
4188
4189RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4190{
4191 if (Thread == NIL_RTTHREAD)
4192 return 0;
4193
4194 PRTTHREADINT pThread = rtThreadGet(Thread);
4195 if (!pThread)
4196 return VERR_INVALID_HANDLE;
4197 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4198 rtThreadRelease(pThread);
4199 return cReadLocks;
4200}
4201RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4202
4203
4204RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4205{
4206 PRTTHREADINT pThread = rtThreadGet(Thread);
4207 Assert(pThread);
4208 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4209 rtThreadRelease(pThread);
4210}
4211RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4212
4213
4214RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4215{
4216 PRTTHREADINT pThread = rtThreadGet(Thread);
4217 Assert(pThread);
4218 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4219 rtThreadRelease(pThread);
4220}
4221RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4222
4223
4224RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4225{
4226 void *pvLock = NULL;
4227 PRTTHREADINT pThread = rtThreadGet(hThread);
4228 if (pThread)
4229 {
4230 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4231 if (RTTHREAD_IS_SLEEPING(enmState))
4232 {
4233 rtLockValidatorSerializeDetectionEnter();
4234
4235 enmState = rtThreadGetState(pThread);
4236 if (RTTHREAD_IS_SLEEPING(enmState))
4237 {
4238 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4239 if (pRec)
4240 {
4241 switch (pRec->Core.u32Magic)
4242 {
4243 case RTLOCKVALRECEXCL_MAGIC:
4244 pvLock = pRec->Excl.hLock;
4245 break;
4246
4247 case RTLOCKVALRECSHRDOWN_MAGIC:
4248 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4249 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4250 break;
4251 case RTLOCKVALRECSHRD_MAGIC:
4252 pvLock = pRec->Shared.hLock;
4253 break;
4254 }
4255 if (RTThreadGetState(pThread) != enmState)
4256 pvLock = NULL;
4257 }
4258 }
4259
4260 rtLockValidatorSerializeDetectionLeave();
4261 }
4262 rtThreadRelease(pThread);
4263 }
4264 return pvLock;
4265}
4266RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4267
4268
4269RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4270{
4271 bool fRet = false;
4272 PRTTHREADINT pThread = rtThreadGet(hThread);
4273 if (pThread)
4274 {
4275 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4276 rtThreadRelease(pThread);
4277 }
4278 return fRet;
4279}
4280RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4281
4282
4283RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4284{
4285 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4286}
4287RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4288
4289
4290RTDECL(bool) RTLockValidatorIsEnabled(void)
4291{
4292 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4293}
4294RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4295
4296
4297RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4298{
4299 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4300}
4301RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4302
4303
4304RTDECL(bool) RTLockValidatorIsQuiet(void)
4305{
4306 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4307}
4308RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4309
4310
4311RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4312{
4313 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4314}
4315RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4316
4317
4318RTDECL(bool) RTLockValidatorMayPanic(void)
4319{
4320 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4321}
4322RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4323
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette