VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 57546

Last change on this file since 57546 was 57358, checked in by vboxsync, 10 years ago

*: scm cleanup run.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 159.8 KB
Line 
1/* $Id: lockvalidator.cpp 57358 2015-08-14 15:16:38Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include <iprt/lockvalidator.h>
32#include "internal/iprt.h"
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/env.h>
37#include <iprt/err.h>
38#include <iprt/mem.h>
39#include <iprt/once.h>
40#include <iprt/semaphore.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44#include "internal/lockvalidator.h"
45#include "internal/magics.h"
46#include "internal/strhash.h"
47#include "internal/thread.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** Macro that asserts that a pointer is aligned correctly.
54 * Only used when fighting bugs. */
55#if 1
56# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
57 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
58#else
59# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
60#endif
61
62/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
63#define RTLOCKVALCLASS_HASH(hClass) \
64 ( ((uintptr_t)(hClass) >> 6 ) \
65 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
66 / sizeof(PRTLOCKVALCLASSREF)) )
67
68/** The max value for RTLOCKVALCLASSINT::cRefs. */
69#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
70/** The max value for RTLOCKVALCLASSREF::cLookups. */
71#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
72/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
73 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
74#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
75
76
77/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
78 * Enable recursion records. */
79#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
80# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
81#endif
82
83/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
84 * Enables some extra verbosity in the lock dumping. */
85#if defined(DOXYGEN_RUNNING)
86# define RTLOCKVAL_WITH_VERBOSE_DUMPS
87#endif
88
89/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
90 * Enables collection prior class hash lookup statistics, dumping them when
91 * complaining about the class. */
92#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
93# define RTLOCKVAL_WITH_CLASS_HASH_STATS
94#endif
95
96
97/*********************************************************************************************************************************
98* Structures and Typedefs *
99*********************************************************************************************************************************/
100/**
101 * Deadlock detection stack entry.
102 */
103typedef struct RTLOCKVALDDENTRY
104{
105 /** The current record. */
106 PRTLOCKVALRECUNION pRec;
107 /** The current entry number if pRec is a shared one. */
108 uint32_t iEntry;
109 /** The thread state of the thread we followed to get to pFirstSibling.
110 * This is only used for validating a deadlock stack. */
111 RTTHREADSTATE enmState;
112 /** The thread we followed to get to pFirstSibling.
113 * This is only used for validating a deadlock stack. */
114 PRTTHREADINT pThread;
115 /** What pThread is waiting on, i.e. where we entered the circular list of
116 * siblings. This is used for validating a deadlock stack as well as
117 * terminating the sibling walk. */
118 PRTLOCKVALRECUNION pFirstSibling;
119} RTLOCKVALDDENTRY;
120
121
122/**
123 * Deadlock detection stack.
124 */
125typedef struct RTLOCKVALDDSTACK
126{
127 /** The number stack entries. */
128 uint32_t c;
129 /** The stack entries. */
130 RTLOCKVALDDENTRY a[32];
131} RTLOCKVALDDSTACK;
132/** Pointer to a deadlock detection stack. */
133typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
134
135
136/**
137 * Reference to another class.
138 */
139typedef struct RTLOCKVALCLASSREF
140{
141 /** The class. */
142 RTLOCKVALCLASS hClass;
143 /** The number of lookups of this class. */
144 uint32_t volatile cLookups;
145 /** Indicates whether the entry was added automatically during order checking
146 * (true) or manually via the API (false). */
147 bool fAutodidacticism;
148 /** Reserved / explicit alignment padding. */
149 bool afReserved[3];
150} RTLOCKVALCLASSREF;
151/** Pointer to a class reference. */
152typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
153
154
155/** Pointer to a chunk of class references. */
156typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
157/**
158 * Chunk of class references.
159 */
160typedef struct RTLOCKVALCLASSREFCHUNK
161{
162 /** Array of refs. */
163#if 0 /** @todo for testing allocation of new chunks. */
164 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
165#else
166 RTLOCKVALCLASSREF aRefs[2];
167#endif
168 /** Pointer to the next chunk. */
169 PRTLOCKVALCLASSREFCHUNK volatile pNext;
170} RTLOCKVALCLASSREFCHUNK;
171
172
173/**
174 * Lock class.
175 */
176typedef struct RTLOCKVALCLASSINT
177{
178 /** AVL node core. */
179 AVLLU32NODECORE Core;
180 /** Magic value (RTLOCKVALCLASS_MAGIC). */
181 uint32_t volatile u32Magic;
182 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
183 uint32_t volatile cRefs;
184 /** Whether the class is allowed to teach it self new locking order rules. */
185 bool fAutodidact;
186 /** Whether to allow recursion. */
187 bool fRecursionOk;
188 /** Strict release order. */
189 bool fStrictReleaseOrder;
190 /** Whether this class is in the tree. */
191 bool fInTree;
192 /** Donate a reference to the next retainer. This is a hack to make
193 * RTLockValidatorClassCreateUnique work. */
194 bool volatile fDonateRefToNextRetainer;
195 /** Reserved future use / explicit alignment. */
196 bool afReserved[3];
197 /** The minimum wait interval for which we do deadlock detection
198 * (milliseconds). */
199 RTMSINTERVAL cMsMinDeadlock;
200 /** The minimum wait interval for which we do order checks (milliseconds). */
201 RTMSINTERVAL cMsMinOrder;
202 /** More padding. */
203 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
204 /** Classes that may be taken prior to this one.
205 * This is a linked list where each node contains a chunk of locks so that we
206 * reduce the number of allocations as well as localize the data. */
207 RTLOCKVALCLASSREFCHUNK PriorLocks;
208 /** Hash table containing frequently encountered prior locks. */
209 PRTLOCKVALCLASSREF apPriorLocksHash[17];
210 /** Class name. (Allocated after the end of the block as usual.) */
211 char const *pszName;
212 /** Where this class was created.
213 * This is mainly used for finding automatically created lock classes.
214 * @remarks The strings are stored after this structure so we won't crash
215 * if the class lives longer than the module (dll/so/dylib) that
216 * spawned it. */
217 RTLOCKVALSRCPOS CreatePos;
218#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
219 /** Hash hits. */
220 uint32_t volatile cHashHits;
221 /** Hash misses. */
222 uint32_t volatile cHashMisses;
223#endif
224} RTLOCKVALCLASSINT;
225AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
226AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
227
228
229/*********************************************************************************************************************************
230* Global Variables *
231*********************************************************************************************************************************/
232/** Serializing object destruction and deadlock detection.
233 *
234 * This makes sure that none of the memory examined by the deadlock detection
235 * code will become invalid (reused for other purposes or made not present)
236 * while the detection is in progress.
237 *
238 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
239 * EW: Deadlock detection and some related activities.
240 */
241static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
242/** Serializing class tree insert and lookups. */
243static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
244/** Class tree. */
245static PAVLLU32NODECORE g_LockValClassTree = NULL;
246/** Critical section serializing the teaching new rules to the classes. */
247static RTCRITSECT g_LockValClassTeachCS;
248
249/** Whether the lock validator is enabled or disabled.
250 * Only applies to new locks. */
251static bool volatile g_fLockValidatorEnabled = true;
252/** Set if the lock validator is quiet. */
253#ifdef RT_STRICT
254static bool volatile g_fLockValidatorQuiet = false;
255#else
256static bool volatile g_fLockValidatorQuiet = true;
257#endif
258/** Set if the lock validator may panic. */
259#ifdef RT_STRICT
260static bool volatile g_fLockValidatorMayPanic = true;
261#else
262static bool volatile g_fLockValidatorMayPanic = false;
263#endif
264/** Whether to return an error status on wrong locking order. */
265static bool volatile g_fLockValSoftWrongOrder = false;
266
267
268/*********************************************************************************************************************************
269* Internal Functions *
270*********************************************************************************************************************************/
271static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
272static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
273
274
275/**
276 * Lazy initialization of the lock validator globals.
277 */
278static void rtLockValidatorLazyInit(void)
279{
280 static uint32_t volatile s_fInitializing = false;
281 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
282 {
283 /*
284 * The locks.
285 */
286 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
287 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
288 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
289
290 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
291 {
292 RTSEMRW hSemRW;
293 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
294 if (RT_SUCCESS(rc))
295 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
296 }
297
298 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
299 {
300 RTSEMXROADS hXRoads;
301 int rc = RTSemXRoadsCreate(&hXRoads);
302 if (RT_SUCCESS(rc))
303 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
304 }
305
306#ifdef IN_RING3
307 /*
308 * Check the environment for our config variables.
309 */
310 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
311 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
312 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
313 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
314
315 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
316 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
317 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
318 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
319
320 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
321 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
322 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
323 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
324
325 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
326 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
327 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
328 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
329#endif
330
331 /*
332 * Register cleanup
333 */
334 /** @todo register some cleanup callback if we care. */
335
336 ASMAtomicWriteU32(&s_fInitializing, false);
337 }
338}
339
340
341
342/** Wrapper around ASMAtomicReadPtr. */
343DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
344{
345 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
346 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
347 return p;
348}
349
350
351/** Wrapper around ASMAtomicWritePtr. */
352DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
353{
354 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
355 ASMAtomicWritePtr(ppRec, pRecNew);
356}
357
358
359/** Wrapper around ASMAtomicReadPtr. */
360DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
361{
362 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
363 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
364 return p;
365}
366
367
368/** Wrapper around ASMAtomicUoReadPtr. */
369DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
370{
371 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
372 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
373 return p;
374}
375
376
377/**
378 * Reads a volatile thread handle field and returns the thread name.
379 *
380 * @returns Thread name (read only).
381 * @param phThread The thread handle field.
382 */
383static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
384{
385 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
386 if (!pThread)
387 return "<NIL>";
388 if (!VALID_PTR(pThread))
389 return "<INVALID>";
390 if (pThread->u32Magic != RTTHREADINT_MAGIC)
391 return "<BAD-THREAD-MAGIC>";
392 return pThread->szName;
393}
394
395
396/**
397 * Launch a simple assertion like complaint w/ panic.
398 *
399 * @param pszFile Where from - file.
400 * @param iLine Where from - line.
401 * @param pszFunction Where from - function.
402 * @param pszWhat What we're complaining about.
403 * @param ... Format arguments.
404 */
405static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
406{
407 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
408 {
409 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
410 va_list va;
411 va_start(va, pszWhat);
412 RTAssertMsg2WeakV(pszWhat, va);
413 va_end(va);
414 }
415 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
416 RTAssertPanic();
417}
418
419
420/**
421 * Describes the class.
422 *
423 * @param pszPrefix Message prefix.
424 * @param pClass The class to complain about.
425 * @param uSubClass My sub-class.
426 * @param fVerbose Verbose description including relations to other
427 * classes.
428 */
429static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
430{
431 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
432 return;
433
434 /* Stringify the sub-class. */
435 const char *pszSubClass;
436 char szSubClass[32];
437 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
438 switch (uSubClass)
439 {
440 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
441 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
442 default:
443 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
444 pszSubClass = szSubClass;
445 break;
446 }
447 else
448 {
449 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
450 pszSubClass = szSubClass;
451 }
452
453 /* Validate the class pointer. */
454 if (!VALID_PTR(pClass))
455 {
456 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
457 return;
458 }
459 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
460 {
461 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
462 return;
463 }
464
465 /* OK, dump the class info. */
466 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
467 pClass,
468 pClass->pszName,
469 pClass->CreatePos.pszFile,
470 pClass->CreatePos.uLine,
471 pClass->CreatePos.pszFunction,
472 pClass->CreatePos.uId,
473 pszSubClass);
474 if (fVerbose)
475 {
476 uint32_t i = 0;
477 uint32_t cPrinted = 0;
478 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
479 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
480 {
481 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
482 if (pCurClass != NIL_RTLOCKVALCLASS)
483 {
484 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
485 cPrinted == 0
486 ? "Prior:"
487 : " ",
488 i,
489 pCurClass->pszName,
490 pChunk->aRefs[j].fAutodidacticism
491 ? "autodidactic"
492 : "manually ",
493 pChunk->aRefs[j].cLookups,
494 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
495 cPrinted++;
496 }
497 }
498 if (!cPrinted)
499 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
500#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
501 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
502#endif
503 }
504 else
505 {
506 uint32_t cPrinted = 0;
507 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
508 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
509 {
510 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
511 if (pCurClass != NIL_RTLOCKVALCLASS)
512 {
513 if ((cPrinted % 10) == 0)
514 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
515 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
516 else if ((cPrinted % 10) != 9)
517 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
518 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
519 else
520 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
521 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
522 cPrinted++;
523 }
524 }
525 if (!cPrinted)
526 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
527 else if ((cPrinted % 10) != 0)
528 RTAssertMsg2AddWeak("\n");
529 }
530}
531
532
533/**
534 * Helper for getting the class name.
535 * @returns Class name string.
536 * @param pClass The class.
537 */
538static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
539{
540 if (!pClass)
541 return "<nil-class>";
542 if (!VALID_PTR(pClass))
543 return "<bad-class-ptr>";
544 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
545 return "<bad-class-magic>";
546 if (!pClass->pszName)
547 return "<no-class-name>";
548 return pClass->pszName;
549}
550
551/**
552 * Formats the sub-class.
553 *
554 * @returns Stringified sub-class.
555 * @param uSubClass The name.
556 * @param pszBuf Buffer that is big enough.
557 */
558static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
559{
560 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
561 switch (uSubClass)
562 {
563 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
564 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
565 default:
566 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
567 break;
568 }
569 else
570 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
571 return pszBuf;
572}
573
574
575/**
576 * Helper for rtLockValComplainAboutLock.
577 */
578DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
579 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
580 const char *pszFrameType)
581{
582 char szBuf[32];
583 switch (u32Magic)
584 {
585 case RTLOCKVALRECEXCL_MAGIC:
586#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
587 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
588 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
589 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
590 rtLockValComplainGetClassName(pRec->Excl.hClass),
591 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
592 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
593 pszFrameType, pszSuffix);
594#else
595 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
596 pRec->Excl.hLock, pRec->Excl.szName,
597 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
598 rtLockValComplainGetClassName(pRec->Excl.hClass),
599 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
600 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
601 pszFrameType, pszSuffix);
602#endif
603 break;
604
605 case RTLOCKVALRECSHRD_MAGIC:
606 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
607 pRec->Shared.hLock, pRec->Shared.szName, pRec,
608 rtLockValComplainGetClassName(pRec->Shared.hClass),
609 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
610 pszFrameType, pszSuffix);
611 break;
612
613 case RTLOCKVALRECSHRDOWN_MAGIC:
614 {
615 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
616 if ( VALID_PTR(pShared)
617 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
618#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
619 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
620 pShared->hLock, pShared->pszName, pShared,
621 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
622 rtLockValComplainGetClassName(pShared->hClass),
623 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
624 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
625 pszSuffix2, pszSuffix);
626#else
627 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
628 pShared->hLock, pShared->szName,
629 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
630 rtLockValComplainGetClassName(pShared->hClass),
631 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
632 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
633 pszFrameType, pszSuffix);
634#endif
635 else
636 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
637 pShared,
638 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
639 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
640 pszFrameType, pszSuffix);
641 break;
642 }
643
644 default:
645 AssertMsgFailed(("%#x\n", u32Magic));
646 }
647}
648
649
650/**
651 * Describes the lock.
652 *
653 * @param pszPrefix Message prefix.
654 * @param pRec The lock record we're working on.
655 * @param pszSuffix Message suffix.
656 */
657static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
658{
659#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
660# define FIX_REC(r) 1
661#else
662# define FIX_REC(r) (r)
663#endif
664 if ( VALID_PTR(pRec)
665 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
666 {
667 switch (pRec->Core.u32Magic)
668 {
669 case RTLOCKVALRECEXCL_MAGIC:
670 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
671 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
672 break;
673
674 case RTLOCKVALRECSHRD_MAGIC:
675 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
676 break;
677
678 case RTLOCKVALRECSHRDOWN_MAGIC:
679 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
680 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
681 break;
682
683 case RTLOCKVALRECNEST_MAGIC:
684 {
685 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
686 uint32_t u32Magic;
687 if ( VALID_PTR(pRealRec)
688 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
689 || u32Magic == RTLOCKVALRECSHRD_MAGIC
690 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
691 )
692 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
693 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
694 else
695 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
696 pRealRec, pRec, pRec->Nest.cRecursion,
697 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
698 pszSuffix);
699 break;
700 }
701
702 default:
703 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
704 break;
705 }
706 }
707#undef FIX_REC
708}
709
710
711/**
712 * Dump the lock stack.
713 *
714 * @param pThread The thread which lock stack we're gonna dump.
715 * @param cchIndent The indentation in chars.
716 * @param cMinFrames The minimum number of frames to consider
717 * dumping.
718 * @param pHighightRec Record that should be marked specially in the
719 * dump.
720 */
721static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
722 PRTLOCKVALRECUNION pHighightRec)
723{
724 if ( VALID_PTR(pThread)
725 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
726 && pThread->u32Magic == RTTHREADINT_MAGIC
727 )
728 {
729 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
730 if (cEntries >= cMinFrames)
731 {
732 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
733 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
734 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
735 for (uint32_t i = 0; VALID_PTR(pCur); i++)
736 {
737 char szPrefix[80];
738 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
739 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
740 switch (pCur->Core.u32Magic)
741 {
742 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
743 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
744 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
745 default:
746 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
747 pCur = NULL;
748 break;
749 }
750 }
751 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
752 }
753 }
754}
755
756
757/**
758 * Launch the initial complaint.
759 *
760 * @param pszWhat What we're complaining about.
761 * @param pSrcPos Where we are complaining from, as it were.
762 * @param pThreadSelf The calling thread.
763 * @param pRec The main lock involved. Can be NULL.
764 * @param fDumpStack Whether to dump the lock stack (true) or not
765 * (false).
766 */
767static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
768 PRTLOCKVALRECUNION pRec, bool fDumpStack)
769{
770 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
771 {
772 ASMCompilerBarrier(); /* paranoia */
773 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
774 if (pSrcPos && pSrcPos->uId)
775 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
776 else
777 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
778 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
779 if (fDumpStack)
780 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
781 }
782}
783
784
785/**
786 * Continue bitching.
787 *
788 * @param pszFormat Format string.
789 * @param ... Format arguments.
790 */
791static void rtLockValComplainMore(const char *pszFormat, ...)
792{
793 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
794 {
795 va_list va;
796 va_start(va, pszFormat);
797 RTAssertMsg2AddWeakV(pszFormat, va);
798 va_end(va);
799 }
800}
801
802
803/**
804 * Raise a panic if enabled.
805 */
806static void rtLockValComplainPanic(void)
807{
808 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
809 RTAssertPanic();
810}
811
812
813/**
814 * Copy a source position record.
815 *
816 * @param pDst The destination.
817 * @param pSrc The source. Can be NULL.
818 */
819DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
820{
821 if (pSrc)
822 {
823 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
824 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
825 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
826 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
827 }
828 else
829 {
830 ASMAtomicUoWriteU32(&pDst->uLine, 0);
831 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
832 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
833 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
834 }
835}
836
837
838/**
839 * Init a source position record.
840 *
841 * @param pSrcPos The source position record.
842 */
843DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
844{
845 pSrcPos->pszFile = NULL;
846 pSrcPos->pszFunction = NULL;
847 pSrcPos->uId = 0;
848 pSrcPos->uLine = 0;
849#if HC_ARCH_BITS == 64
850 pSrcPos->u32Padding = 0;
851#endif
852}
853
854
855/**
856 * Hashes the specified source position.
857 *
858 * @returns Hash.
859 * @param pSrcPos The source position record.
860 */
861static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
862{
863 uint32_t uHash;
864 if ( ( pSrcPos->pszFile
865 || pSrcPos->pszFunction)
866 && pSrcPos->uLine != 0)
867 {
868 uHash = 0;
869 if (pSrcPos->pszFile)
870 uHash = sdbmInc(pSrcPos->pszFile, uHash);
871 if (pSrcPos->pszFunction)
872 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
873 uHash += pSrcPos->uLine;
874 }
875 else
876 {
877 Assert(pSrcPos->uId);
878 uHash = (uint32_t)pSrcPos->uId;
879 }
880
881 return uHash;
882}
883
884
885/**
886 * Compares two source positions.
887 *
888 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
889 * otherwise.
890 * @param pSrcPos1 The first source position.
891 * @param pSrcPos2 The second source position.
892 */
893static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
894{
895 if (pSrcPos1->uLine != pSrcPos2->uLine)
896 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
897
898 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
899 if (iDiff != 0)
900 return iDiff;
901
902 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
903 if (iDiff != 0)
904 return iDiff;
905
906 if (pSrcPos1->uId != pSrcPos2->uId)
907 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
908 return 0;
909}
910
911
912
913/**
914 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
915 */
916DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
917{
918 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
919 if (hXRoads != NIL_RTSEMXROADS)
920 RTSemXRoadsNSEnter(hXRoads);
921}
922
923
924/**
925 * Call after rtLockValidatorSerializeDestructEnter.
926 */
927DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
928{
929 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
930 if (hXRoads != NIL_RTSEMXROADS)
931 RTSemXRoadsNSLeave(hXRoads);
932}
933
934
935/**
936 * Serializes deadlock detection against destruction of the objects being
937 * inspected.
938 */
939DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
940{
941 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
942 if (hXRoads != NIL_RTSEMXROADS)
943 RTSemXRoadsEWEnter(hXRoads);
944}
945
946
947/**
948 * Call after rtLockValidatorSerializeDetectionEnter.
949 */
950DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
951{
952 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
953 if (hXRoads != NIL_RTSEMXROADS)
954 RTSemXRoadsEWLeave(hXRoads);
955}
956
957
958/**
959 * Initializes the per thread lock validator data.
960 *
961 * @param pPerThread The data.
962 */
963DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
964{
965 pPerThread->bmFreeShrdOwners = UINT32_MAX;
966
967 /* ASSUMES the rest has already been zeroed. */
968 Assert(pPerThread->pRec == NULL);
969 Assert(pPerThread->cWriteLocks == 0);
970 Assert(pPerThread->cReadLocks == 0);
971 Assert(pPerThread->fInValidator == false);
972 Assert(pPerThread->pStackTop == NULL);
973}
974
975
976/**
977 * Delete the per thread lock validator data.
978 *
979 * @param pPerThread The data.
980 */
981DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
982{
983 /*
984 * Check that the thread doesn't own any locks at this time.
985 */
986 if (pPerThread->pStackTop)
987 {
988 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
989 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
990 pPerThread->pStackTop, true);
991 rtLockValComplainPanic();
992 }
993
994 /*
995 * Free the recursion records.
996 */
997 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
998 pPerThread->pFreeNestRecs = NULL;
999 while (pCur)
1000 {
1001 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1002 RTMemFree(pCur);
1003 pCur = pNext;
1004 }
1005}
1006
1007RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1008 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1009 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1010 const char *pszNameFmt, ...)
1011{
1012 va_list va;
1013 va_start(va, pszNameFmt);
1014 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1015 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1016 va_end(va);
1017 return rc;
1018}
1019
1020
1021RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1022 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1023 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1024 const char *pszNameFmt, va_list va)
1025{
1026 Assert(cMsMinDeadlock >= 1);
1027 Assert(cMsMinOrder >= 1);
1028 AssertPtr(pSrcPos);
1029
1030 /*
1031 * Format the name and calc its length.
1032 */
1033 size_t cbName;
1034 char szName[32];
1035 if (pszNameFmt && *pszNameFmt)
1036 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1037 else
1038 {
1039 static uint32_t volatile s_cAnonymous = 0;
1040 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1041 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1042 }
1043
1044 /*
1045 * Figure out the file and function name lengths and allocate memory for
1046 * it all.
1047 */
1048 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1049 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1050 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVar(sizeof(*pThis) + cbFile + cbFunction + cbName);
1051 if (!pThis)
1052 return VERR_NO_MEMORY;
1053
1054 /*
1055 * Initialize the class data.
1056 */
1057 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1058 pThis->Core.uchHeight = 0;
1059 pThis->Core.pLeft = NULL;
1060 pThis->Core.pRight = NULL;
1061 pThis->Core.pList = NULL;
1062 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1063 pThis->cRefs = 1;
1064 pThis->fAutodidact = fAutodidact;
1065 pThis->fRecursionOk = fRecursionOk;
1066 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1067 pThis->fInTree = false;
1068 pThis->fDonateRefToNextRetainer = false;
1069 pThis->afReserved[0] = false;
1070 pThis->afReserved[1] = false;
1071 pThis->afReserved[2] = false;
1072 pThis->cMsMinDeadlock = cMsMinDeadlock;
1073 pThis->cMsMinOrder = cMsMinOrder;
1074 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1075 pThis->au32Reserved[i] = 0;
1076 for (unsigned i = 0; i < RT_ELEMENTS(pThis->PriorLocks.aRefs); i++)
1077 {
1078 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1079 pThis->PriorLocks.aRefs[i].cLookups = 0;
1080 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1081 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1082 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1083 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1084 }
1085 pThis->PriorLocks.pNext = NULL;
1086 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1087 pThis->apPriorLocksHash[i] = NULL;
1088 char *pszDst = (char *)(pThis + 1);
1089 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1090 pszDst += cbName;
1091 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1092 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1093 pszDst += cbFile;
1094 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1095 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1096#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1097 pThis->cHashHits = 0;
1098 pThis->cHashMisses = 0;
1099#endif
1100
1101 *phClass = pThis;
1102 return VINF_SUCCESS;
1103}
1104
1105
1106RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1107{
1108 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1109 va_list va;
1110 va_start(va, pszNameFmt);
1111 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1112 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1113 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1114 pszNameFmt, va);
1115 va_end(va);
1116 return rc;
1117}
1118
1119
1120/**
1121 * Creates a new lock validator class with a reference that is consumed by the
1122 * first call to RTLockValidatorClassRetain.
1123 *
1124 * This is tailored for use in the parameter list of a semaphore constructor.
1125 *
1126 * @returns Class handle with a reference that is automatically consumed by the
1127 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1128 *
1129 * @param pszFile The source position of the call, file.
1130 * @param iLine The source position of the call, line.
1131 * @param pszFunction The source position of the call, function.
1132 * @param pszNameFmt Class name format string, optional (NULL). Max
1133 * length is 32 bytes.
1134 * @param ... Format string arguments.
1135 */
1136RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1137{
1138 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1139 RTLOCKVALCLASSINT *pClass;
1140 va_list va;
1141 va_start(va, pszNameFmt);
1142 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1143 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1144 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1145 pszNameFmt, va);
1146 va_end(va);
1147 if (RT_FAILURE(rc))
1148 return NIL_RTLOCKVALCLASS;
1149 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1150 return pClass;
1151}
1152
1153
1154/**
1155 * Internal class retainer.
1156 * @returns The new reference count.
1157 * @param pClass The class.
1158 */
1159DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1160{
1161 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1162 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1163 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1164 else if ( cRefs == 2
1165 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1166 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1167 return cRefs;
1168}
1169
1170
1171/**
1172 * Validates and retains a lock validator class.
1173 *
1174 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1175 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1176 */
1177DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1178{
1179 if (hClass == NIL_RTLOCKVALCLASS)
1180 return hClass;
1181 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1182 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1183 rtLockValidatorClassRetain(hClass);
1184 return hClass;
1185}
1186
1187
1188/**
1189 * Internal class releaser.
1190 * @returns The new reference count.
1191 * @param pClass The class.
1192 */
1193DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1194{
1195 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1196 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1197 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1198 else if (!cRefs)
1199 rtLockValidatorClassDestroy(pClass);
1200 return cRefs;
1201}
1202
1203
1204/**
1205 * Destroys a class once there are not more references to it.
1206 *
1207 * @param Class The class.
1208 */
1209static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1210{
1211 AssertReturnVoid(!pClass->fInTree);
1212 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1213
1214 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1215 while (pChunk)
1216 {
1217 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1218 {
1219 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1220 if (pClass2 != NIL_RTLOCKVALCLASS)
1221 {
1222 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1223 rtLockValidatorClassRelease(pClass2);
1224 }
1225 }
1226
1227 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1228 pChunk->pNext = NULL;
1229 if (pChunk != &pClass->PriorLocks)
1230 RTMemFree(pChunk);
1231 pChunk = pNext;
1232 }
1233
1234 RTMemFree(pClass);
1235}
1236
1237
1238RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1239{
1240 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1241 rtLockValidatorLazyInit();
1242 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1243
1244 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1245 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1246 while (pClass)
1247 {
1248 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1249 break;
1250 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1251 }
1252
1253 if (RT_SUCCESS(rcLock))
1254 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1255 return pClass;
1256}
1257
1258
1259RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1260{
1261 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1262 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1263 if (hClass == NIL_RTLOCKVALCLASS)
1264 {
1265 /*
1266 * Create a new class and insert it into the tree.
1267 */
1268 va_list va;
1269 va_start(va, pszNameFmt);
1270 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1271 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1272 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1273 pszNameFmt, va);
1274 va_end(va);
1275 if (RT_SUCCESS(rc))
1276 {
1277 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1278 rtLockValidatorLazyInit();
1279 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1280
1281 Assert(!hClass->fInTree);
1282 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1283 Assert(hClass->fInTree);
1284
1285 if (RT_SUCCESS(rcLock))
1286 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1287 return hClass;
1288 }
1289 }
1290 return hClass;
1291}
1292
1293
1294RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1295{
1296 RTLOCKVALCLASSINT *pClass = hClass;
1297 AssertPtrReturn(pClass, UINT32_MAX);
1298 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1299 return rtLockValidatorClassRetain(pClass);
1300}
1301
1302
1303RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1304{
1305 RTLOCKVALCLASSINT *pClass = hClass;
1306 if (pClass == NIL_RTLOCKVALCLASS)
1307 return 0;
1308 AssertPtrReturn(pClass, UINT32_MAX);
1309 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1310 return rtLockValidatorClassRelease(pClass);
1311}
1312
1313
1314/**
1315 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1316 * all the chunks for @a pPriorClass.
1317 *
1318 * @returns true / false.
1319 * @param pClass The class to search.
1320 * @param pPriorClass The class to search for.
1321 */
1322static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1323{
1324 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1325 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1326 {
1327 if (pChunk->aRefs[i].hClass == pPriorClass)
1328 {
1329 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1330 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1331 {
1332 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1333 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1334 }
1335
1336 /* update the hash table entry. */
1337 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1338 if ( !(*ppHashEntry)
1339 || (*ppHashEntry)->cLookups + 128 < cLookups)
1340 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1341
1342#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1343 ASMAtomicIncU32(&pClass->cHashMisses);
1344#endif
1345 return true;
1346 }
1347 }
1348
1349 return false;
1350}
1351
1352
1353/**
1354 * Checks if @a pPriorClass is a known prior class.
1355 *
1356 * @returns true / false.
1357 * @param pClass The class to search.
1358 * @param pPriorClass The class to search for.
1359 */
1360DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1361{
1362 /*
1363 * Hash lookup here.
1364 */
1365 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1366 if ( pRef
1367 && pRef->hClass == pPriorClass)
1368 {
1369 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1370 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1371 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1372#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1373 ASMAtomicIncU32(&pClass->cHashHits);
1374#endif
1375 return true;
1376 }
1377
1378 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1379}
1380
1381
1382/**
1383 * Adds a class to the prior list.
1384 *
1385 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1386 * @param pClass The class to work on.
1387 * @param pPriorClass The class to add.
1388 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1389 * somebody is teaching us via the API (false).
1390 * @param pSrcPos Where this rule was added (optional).
1391 */
1392static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1393 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1394{
1395 NOREF(pSrcPos);
1396 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1397 rtLockValidatorLazyInit();
1398 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1399
1400 /*
1401 * Check that there are no conflict (no assert since we might race each other).
1402 */
1403 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1404 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1405 {
1406 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1407 {
1408 /*
1409 * Scan the table for a free entry, allocating a new chunk if necessary.
1410 */
1411 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1412 {
1413 bool fDone = false;
1414 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1415 {
1416 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1417 if (fDone)
1418 {
1419 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1420 rtLockValidatorClassRetain(pPriorClass);
1421 rc = VINF_SUCCESS;
1422 break;
1423 }
1424 }
1425 if (fDone)
1426 break;
1427
1428 /* If no more chunks, allocate a new one and insert the class before linking it. */
1429 if (!pChunk->pNext)
1430 {
1431 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1432 if (!pNew)
1433 {
1434 rc = VERR_NO_MEMORY;
1435 break;
1436 }
1437 pNew->pNext = NULL;
1438 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1439 {
1440 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1441 pNew->aRefs[i].cLookups = 0;
1442 pNew->aRefs[i].fAutodidacticism = false;
1443 pNew->aRefs[i].afReserved[0] = false;
1444 pNew->aRefs[i].afReserved[1] = false;
1445 pNew->aRefs[i].afReserved[2] = false;
1446 }
1447
1448 pNew->aRefs[0].hClass = pPriorClass;
1449 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1450
1451 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1452 rtLockValidatorClassRetain(pPriorClass);
1453 rc = VINF_SUCCESS;
1454 break;
1455 }
1456 } /* chunk loop */
1457 }
1458 else
1459 rc = VINF_SUCCESS;
1460 }
1461 else
1462 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1463
1464 if (RT_SUCCESS(rcLock))
1465 RTCritSectLeave(&g_LockValClassTeachCS);
1466 return rc;
1467}
1468
1469
1470RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1471{
1472 RTLOCKVALCLASSINT *pClass = hClass;
1473 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1474 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1475
1476 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1477 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1478 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1479
1480 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1481}
1482
1483
1484RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1485{
1486 RTLOCKVALCLASSINT *pClass = hClass;
1487 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1488 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1489
1490 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1491 return VINF_SUCCESS;
1492}
1493
1494
1495/**
1496 * Unlinks all siblings.
1497 *
1498 * This is used during record deletion and assumes no races.
1499 *
1500 * @param pCore One of the siblings.
1501 */
1502static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1503{
1504 /* ASSUMES sibling destruction doesn't involve any races and that all
1505 related records are to be disposed off now. */
1506 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1507 while (pSibling)
1508 {
1509 PRTLOCKVALRECUNION volatile *ppCoreNext;
1510 switch (pSibling->Core.u32Magic)
1511 {
1512 case RTLOCKVALRECEXCL_MAGIC:
1513 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1514 ppCoreNext = &pSibling->Excl.pSibling;
1515 break;
1516
1517 case RTLOCKVALRECSHRD_MAGIC:
1518 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1519 ppCoreNext = &pSibling->Shared.pSibling;
1520 break;
1521
1522 default:
1523 AssertFailed();
1524 ppCoreNext = NULL;
1525 break;
1526 }
1527 if (RT_UNLIKELY(ppCoreNext))
1528 break;
1529 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1530 }
1531}
1532
1533
1534RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1535{
1536 /*
1537 * Validate input.
1538 */
1539 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1540 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1541
1542 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1543 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1544 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1545 , VERR_SEM_LV_INVALID_PARAMETER);
1546
1547 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1548 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1549 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1550 , VERR_SEM_LV_INVALID_PARAMETER);
1551
1552 /*
1553 * Link them (circular list).
1554 */
1555 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1556 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1557 {
1558 p1->Excl.pSibling = p2;
1559 p2->Shared.pSibling = p1;
1560 }
1561 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1562 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1563 {
1564 p1->Shared.pSibling = p2;
1565 p2->Excl.pSibling = p1;
1566 }
1567 else
1568 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1569
1570 return VINF_SUCCESS;
1571}
1572
1573
1574/**
1575 * Gets the lock name for the given record.
1576 *
1577 * @returns Read-only lock name.
1578 * @param pRec The lock record.
1579 */
1580DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1581{
1582 switch (pRec->Core.u32Magic)
1583 {
1584 case RTLOCKVALRECEXCL_MAGIC:
1585 return pRec->Excl.szName;
1586 case RTLOCKVALRECSHRD_MAGIC:
1587 return pRec->Shared.szName;
1588 case RTLOCKVALRECSHRDOWN_MAGIC:
1589 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1590 case RTLOCKVALRECNEST_MAGIC:
1591 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1592 if (VALID_PTR(pRec))
1593 {
1594 switch (pRec->Core.u32Magic)
1595 {
1596 case RTLOCKVALRECEXCL_MAGIC:
1597 return pRec->Excl.szName;
1598 case RTLOCKVALRECSHRD_MAGIC:
1599 return pRec->Shared.szName;
1600 case RTLOCKVALRECSHRDOWN_MAGIC:
1601 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1602 default:
1603 return "unknown-nested";
1604 }
1605 }
1606 return "orphaned-nested";
1607 default:
1608 return "unknown";
1609 }
1610}
1611
1612
1613/**
1614 * Gets the class for this locking record.
1615 *
1616 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1617 * @param pRec The lock validator record.
1618 */
1619DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1620{
1621 switch (pRec->Core.u32Magic)
1622 {
1623 case RTLOCKVALRECEXCL_MAGIC:
1624 return pRec->Excl.hClass;
1625
1626 case RTLOCKVALRECSHRD_MAGIC:
1627 return pRec->Shared.hClass;
1628
1629 case RTLOCKVALRECSHRDOWN_MAGIC:
1630 {
1631 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1632 if (RT_LIKELY( VALID_PTR(pSharedRec)
1633 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1634 return pSharedRec->hClass;
1635 return NIL_RTLOCKVALCLASS;
1636 }
1637
1638 case RTLOCKVALRECNEST_MAGIC:
1639 {
1640 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1641 if (VALID_PTR(pRealRec))
1642 {
1643 switch (pRealRec->Core.u32Magic)
1644 {
1645 case RTLOCKVALRECEXCL_MAGIC:
1646 return pRealRec->Excl.hClass;
1647
1648 case RTLOCKVALRECSHRDOWN_MAGIC:
1649 {
1650 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1651 if (RT_LIKELY( VALID_PTR(pSharedRec)
1652 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1653 return pSharedRec->hClass;
1654 break;
1655 }
1656
1657 default:
1658 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1659 break;
1660 }
1661 }
1662 return NIL_RTLOCKVALCLASS;
1663 }
1664
1665 default:
1666 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1667 return NIL_RTLOCKVALCLASS;
1668 }
1669}
1670
1671
1672/**
1673 * Gets the class for this locking record and the pointer to the one below it in
1674 * the stack.
1675 *
1676 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1677 * @param pRec The lock validator record.
1678 * @param puSubClass Where to return the sub-class.
1679 * @param ppDown Where to return the pointer to the record below.
1680 */
1681DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1682rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1683{
1684 switch (pRec->Core.u32Magic)
1685 {
1686 case RTLOCKVALRECEXCL_MAGIC:
1687 *ppDown = pRec->Excl.pDown;
1688 *puSubClass = pRec->Excl.uSubClass;
1689 return pRec->Excl.hClass;
1690
1691 case RTLOCKVALRECSHRD_MAGIC:
1692 *ppDown = NULL;
1693 *puSubClass = pRec->Shared.uSubClass;
1694 return pRec->Shared.hClass;
1695
1696 case RTLOCKVALRECSHRDOWN_MAGIC:
1697 {
1698 *ppDown = pRec->ShrdOwner.pDown;
1699
1700 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1701 if (RT_LIKELY( VALID_PTR(pSharedRec)
1702 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1703 {
1704 *puSubClass = pSharedRec->uSubClass;
1705 return pSharedRec->hClass;
1706 }
1707 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1708 return NIL_RTLOCKVALCLASS;
1709 }
1710
1711 case RTLOCKVALRECNEST_MAGIC:
1712 {
1713 *ppDown = pRec->Nest.pDown;
1714
1715 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1716 if (VALID_PTR(pRealRec))
1717 {
1718 switch (pRealRec->Core.u32Magic)
1719 {
1720 case RTLOCKVALRECEXCL_MAGIC:
1721 *puSubClass = pRealRec->Excl.uSubClass;
1722 return pRealRec->Excl.hClass;
1723
1724 case RTLOCKVALRECSHRDOWN_MAGIC:
1725 {
1726 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1727 if (RT_LIKELY( VALID_PTR(pSharedRec)
1728 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1729 {
1730 *puSubClass = pSharedRec->uSubClass;
1731 return pSharedRec->hClass;
1732 }
1733 break;
1734 }
1735
1736 default:
1737 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1738 break;
1739 }
1740 }
1741 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1742 return NIL_RTLOCKVALCLASS;
1743 }
1744
1745 default:
1746 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1747 *ppDown = NULL;
1748 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1749 return NIL_RTLOCKVALCLASS;
1750 }
1751}
1752
1753
1754/**
1755 * Gets the sub-class for a lock record.
1756 *
1757 * @returns the sub-class.
1758 * @param pRec The lock validator record.
1759 */
1760DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1761{
1762 switch (pRec->Core.u32Magic)
1763 {
1764 case RTLOCKVALRECEXCL_MAGIC:
1765 return pRec->Excl.uSubClass;
1766
1767 case RTLOCKVALRECSHRD_MAGIC:
1768 return pRec->Shared.uSubClass;
1769
1770 case RTLOCKVALRECSHRDOWN_MAGIC:
1771 {
1772 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1773 if (RT_LIKELY( VALID_PTR(pSharedRec)
1774 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1775 return pSharedRec->uSubClass;
1776 return RTLOCKVAL_SUB_CLASS_NONE;
1777 }
1778
1779 case RTLOCKVALRECNEST_MAGIC:
1780 {
1781 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1782 if (VALID_PTR(pRealRec))
1783 {
1784 switch (pRealRec->Core.u32Magic)
1785 {
1786 case RTLOCKVALRECEXCL_MAGIC:
1787 return pRec->Excl.uSubClass;
1788
1789 case RTLOCKVALRECSHRDOWN_MAGIC:
1790 {
1791 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1792 if (RT_LIKELY( VALID_PTR(pSharedRec)
1793 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1794 return pSharedRec->uSubClass;
1795 break;
1796 }
1797
1798 default:
1799 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1800 break;
1801 }
1802 }
1803 return RTLOCKVAL_SUB_CLASS_NONE;
1804 }
1805
1806 default:
1807 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1808 return RTLOCKVAL_SUB_CLASS_NONE;
1809 }
1810}
1811
1812
1813
1814
1815/**
1816 * Calculates the depth of a lock stack.
1817 *
1818 * @returns Number of stack frames.
1819 * @param pThread The thread.
1820 */
1821static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1822{
1823 uint32_t cEntries = 0;
1824 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1825 while (VALID_PTR(pCur))
1826 {
1827 switch (pCur->Core.u32Magic)
1828 {
1829 case RTLOCKVALRECEXCL_MAGIC:
1830 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1831 break;
1832
1833 case RTLOCKVALRECSHRDOWN_MAGIC:
1834 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1835 break;
1836
1837 case RTLOCKVALRECNEST_MAGIC:
1838 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1839 break;
1840
1841 default:
1842 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1843 }
1844 cEntries++;
1845 }
1846 return cEntries;
1847}
1848
1849
1850#ifdef RT_STRICT
1851/**
1852 * Checks if the stack contains @a pRec.
1853 *
1854 * @returns true / false.
1855 * @param pThreadSelf The current thread.
1856 * @param pRec The lock record.
1857 */
1858static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1859{
1860 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1861 while (pCur)
1862 {
1863 AssertPtrReturn(pCur, false);
1864 if (pCur == pRec)
1865 return true;
1866 switch (pCur->Core.u32Magic)
1867 {
1868 case RTLOCKVALRECEXCL_MAGIC:
1869 Assert(pCur->Excl.cRecursion >= 1);
1870 pCur = pCur->Excl.pDown;
1871 break;
1872
1873 case RTLOCKVALRECSHRDOWN_MAGIC:
1874 Assert(pCur->ShrdOwner.cRecursion >= 1);
1875 pCur = pCur->ShrdOwner.pDown;
1876 break;
1877
1878 case RTLOCKVALRECNEST_MAGIC:
1879 Assert(pCur->Nest.cRecursion > 1);
1880 pCur = pCur->Nest.pDown;
1881 break;
1882
1883 default:
1884 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1885 }
1886 }
1887 return false;
1888}
1889#endif /* RT_STRICT */
1890
1891
1892/**
1893 * Pushes a lock record onto the stack.
1894 *
1895 * @param pThreadSelf The current thread.
1896 * @param pRec The lock record.
1897 */
1898static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1899{
1900 Assert(pThreadSelf == RTThreadSelf());
1901 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1902
1903 switch (pRec->Core.u32Magic)
1904 {
1905 case RTLOCKVALRECEXCL_MAGIC:
1906 Assert(pRec->Excl.cRecursion == 1);
1907 Assert(pRec->Excl.pDown == NULL);
1908 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1909 break;
1910
1911 case RTLOCKVALRECSHRDOWN_MAGIC:
1912 Assert(pRec->ShrdOwner.cRecursion == 1);
1913 Assert(pRec->ShrdOwner.pDown == NULL);
1914 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1915 break;
1916
1917 default:
1918 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1919 }
1920 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1921}
1922
1923
1924/**
1925 * Pops a lock record off the stack.
1926 *
1927 * @param pThreadSelf The current thread.
1928 * @param pRec The lock.
1929 */
1930static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1931{
1932 Assert(pThreadSelf == RTThreadSelf());
1933
1934 PRTLOCKVALRECUNION pDown;
1935 switch (pRec->Core.u32Magic)
1936 {
1937 case RTLOCKVALRECEXCL_MAGIC:
1938 Assert(pRec->Excl.cRecursion == 0);
1939 pDown = pRec->Excl.pDown;
1940 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1941 break;
1942
1943 case RTLOCKVALRECSHRDOWN_MAGIC:
1944 Assert(pRec->ShrdOwner.cRecursion == 0);
1945 pDown = pRec->ShrdOwner.pDown;
1946 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1947 break;
1948
1949 default:
1950 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1951 }
1952 if (pThreadSelf->LockValidator.pStackTop == pRec)
1953 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1954 else
1955 {
1956 /* Find the pointer to our record and unlink ourselves. */
1957 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1958 while (pCur)
1959 {
1960 PRTLOCKVALRECUNION volatile *ppDown;
1961 switch (pCur->Core.u32Magic)
1962 {
1963 case RTLOCKVALRECEXCL_MAGIC:
1964 Assert(pCur->Excl.cRecursion >= 1);
1965 ppDown = &pCur->Excl.pDown;
1966 break;
1967
1968 case RTLOCKVALRECSHRDOWN_MAGIC:
1969 Assert(pCur->ShrdOwner.cRecursion >= 1);
1970 ppDown = &pCur->ShrdOwner.pDown;
1971 break;
1972
1973 case RTLOCKVALRECNEST_MAGIC:
1974 Assert(pCur->Nest.cRecursion >= 1);
1975 ppDown = &pCur->Nest.pDown;
1976 break;
1977
1978 default:
1979 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1980 }
1981 pCur = *ppDown;
1982 if (pCur == pRec)
1983 {
1984 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1985 return;
1986 }
1987 }
1988 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
1989 }
1990}
1991
1992
1993/**
1994 * Creates and pushes lock recursion record onto the stack.
1995 *
1996 * @param pThreadSelf The current thread.
1997 * @param pRec The lock record.
1998 * @param pSrcPos Where the recursion occurred.
1999 */
2000static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2001{
2002 Assert(pThreadSelf == RTThreadSelf());
2003 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2004
2005#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2006 /*
2007 * Allocate a new recursion record
2008 */
2009 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2010 if (pRecursionRec)
2011 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2012 else
2013 {
2014 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2015 if (!pRecursionRec)
2016 return;
2017 }
2018
2019 /*
2020 * Initialize it.
2021 */
2022 switch (pRec->Core.u32Magic)
2023 {
2024 case RTLOCKVALRECEXCL_MAGIC:
2025 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2026 break;
2027
2028 case RTLOCKVALRECSHRDOWN_MAGIC:
2029 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2030 break;
2031
2032 default:
2033 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2034 rtLockValidatorSerializeDestructEnter();
2035 rtLockValidatorSerializeDestructLeave();
2036 RTMemFree(pRecursionRec);
2037 return;
2038 }
2039 Assert(pRecursionRec->cRecursion > 1);
2040 pRecursionRec->pRec = pRec;
2041 pRecursionRec->pDown = NULL;
2042 pRecursionRec->pNextFree = NULL;
2043 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2044 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2045
2046 /*
2047 * Link it.
2048 */
2049 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2050 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2051#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2052}
2053
2054
2055/**
2056 * Pops a lock recursion record off the stack.
2057 *
2058 * @param pThreadSelf The current thread.
2059 * @param pRec The lock record.
2060 */
2061static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2062{
2063 Assert(pThreadSelf == RTThreadSelf());
2064 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2065
2066 uint32_t cRecursion;
2067 switch (pRec->Core.u32Magic)
2068 {
2069 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2070 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2071 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2072 }
2073 Assert(cRecursion >= 1);
2074
2075#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2076 /*
2077 * Pop the recursion record.
2078 */
2079 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2080 if ( pNest != NULL
2081 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2082 && pNest->Nest.pRec == pRec
2083 )
2084 {
2085 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2086 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2087 }
2088 else
2089 {
2090 /* Find the record above ours. */
2091 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2092 for (;;)
2093 {
2094 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2095 switch (pNest->Core.u32Magic)
2096 {
2097 case RTLOCKVALRECEXCL_MAGIC:
2098 ppDown = &pNest->Excl.pDown;
2099 pNest = *ppDown;
2100 continue;
2101 case RTLOCKVALRECSHRDOWN_MAGIC:
2102 ppDown = &pNest->ShrdOwner.pDown;
2103 pNest = *ppDown;
2104 continue;
2105 case RTLOCKVALRECNEST_MAGIC:
2106 if (pNest->Nest.pRec == pRec)
2107 break;
2108 ppDown = &pNest->Nest.pDown;
2109 pNest = *ppDown;
2110 continue;
2111 default:
2112 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2113 }
2114 break; /* ugly */
2115 }
2116 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2117 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2118 }
2119
2120 /*
2121 * Invalidate and free the record.
2122 */
2123 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2124 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2125 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2126 pNest->Nest.cRecursion = 0;
2127 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2128 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2129#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2130}
2131
2132
2133/**
2134 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2135 * returns VERR_SEM_LV_WRONG_ORDER.
2136 */
2137static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2138 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2139 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2140
2141
2142{
2143 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2144 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2145 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2146 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2147 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2148 rtLockValComplainPanic();
2149 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2150}
2151
2152
2153/**
2154 * Checks if the sub-class order is ok or not.
2155 *
2156 * Used to deal with two locks from the same class.
2157 *
2158 * @returns true if ok, false if not.
2159 * @param uSubClass1 The sub-class of the lock that is being
2160 * considered.
2161 * @param uSubClass2 The sub-class of the lock that is already being
2162 * held.
2163 */
2164DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2165{
2166 if (uSubClass1 > uSubClass2)
2167 {
2168 /* NONE kills ANY. */
2169 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2170 return false;
2171 return true;
2172 }
2173
2174 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2175 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2176 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2177 return true;
2178 return false;
2179}
2180
2181
2182/**
2183 * Checks if the class and sub-class lock order is ok.
2184 *
2185 * @returns true if ok, false if not.
2186 * @param pClass1 The class of the lock that is being considered.
2187 * @param uSubClass1 The sub-class that goes with @a pClass1.
2188 * @param pClass2 The class of the lock that is already being
2189 * held.
2190 * @param uSubClass2 The sub-class that goes with @a pClass2.
2191 */
2192DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2193 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2194{
2195 if (pClass1 == pClass2)
2196 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2197 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2198}
2199
2200
2201/**
2202 * Checks the locking order, part two.
2203 *
2204 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2205 * @param pClass The lock class.
2206 * @param uSubClass The lock sub-class.
2207 * @param pThreadSelf The current thread.
2208 * @param pRec The lock record.
2209 * @param pSrcPos The source position of the locking operation.
2210 */
2211static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2212 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2213 PCRTLOCKVALSRCPOS const pSrcPos,
2214 RTLOCKVALCLASSINT * const pFirstBadClass,
2215 PRTLOCKVALRECUNION const pFirstBadRec,
2216 PRTLOCKVALRECUNION const pFirstBadDown)
2217{
2218 /*
2219 * Something went wrong, pCur is pointing to where.
2220 */
2221 if ( pClass == pFirstBadClass
2222 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2223 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2224 pRec, pFirstBadRec, pClass, pFirstBadClass);
2225 if (!pClass->fAutodidact)
2226 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2227 pRec, pFirstBadRec, pClass, pFirstBadClass);
2228
2229 /*
2230 * This class is an autodidact, so we have to check out the rest of the stack
2231 * for direct violations.
2232 */
2233 uint32_t cNewRules = 1;
2234 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2235 while (pCur)
2236 {
2237 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2238
2239 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2240 pCur = pCur->Nest.pDown;
2241 else
2242 {
2243 PRTLOCKVALRECUNION pDown;
2244 uint32_t uPriorSubClass;
2245 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2246 if (pPriorClass != NIL_RTLOCKVALCLASS)
2247 {
2248 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2249 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2250 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2251 {
2252 if ( pClass == pPriorClass
2253 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2254 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2255 pRec, pCur, pClass, pPriorClass);
2256 cNewRules++;
2257 }
2258 }
2259 pCur = pDown;
2260 }
2261 }
2262
2263 if (cNewRules == 1)
2264 {
2265 /*
2266 * Special case the simple operation, hoping that it will be a
2267 * frequent case.
2268 */
2269 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2270 if (rc == VERR_SEM_LV_WRONG_ORDER)
2271 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2272 pRec, pFirstBadRec, pClass, pFirstBadClass);
2273 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2274 }
2275 else
2276 {
2277 /*
2278 * We may be adding more than one rule, so we have to take the lock
2279 * before starting to add the rules. This means we have to check
2280 * the state after taking it since we might be racing someone adding
2281 * a conflicting rule.
2282 */
2283 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2284 rtLockValidatorLazyInit();
2285 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2286
2287 /* Check */
2288 pCur = pFirstBadRec;
2289 while (pCur)
2290 {
2291 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2292 pCur = pCur->Nest.pDown;
2293 else
2294 {
2295 uint32_t uPriorSubClass;
2296 PRTLOCKVALRECUNION pDown;
2297 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2298 if (pPriorClass != NIL_RTLOCKVALCLASS)
2299 {
2300 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2301 {
2302 if ( pClass == pPriorClass
2303 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2304 {
2305 if (RT_SUCCESS(rcLock))
2306 RTCritSectLeave(&g_LockValClassTeachCS);
2307 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2308 pRec, pCur, pClass, pPriorClass);
2309 }
2310 }
2311 }
2312 pCur = pDown;
2313 }
2314 }
2315
2316 /* Iterate the stack yet again, adding new rules this time. */
2317 pCur = pFirstBadRec;
2318 while (pCur)
2319 {
2320 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2321 pCur = pCur->Nest.pDown;
2322 else
2323 {
2324 uint32_t uPriorSubClass;
2325 PRTLOCKVALRECUNION pDown;
2326 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2327 if (pPriorClass != NIL_RTLOCKVALCLASS)
2328 {
2329 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2330 {
2331 Assert( pClass != pPriorClass
2332 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2333 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2334 if (RT_FAILURE(rc))
2335 {
2336 Assert(rc == VERR_NO_MEMORY);
2337 break;
2338 }
2339 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2340 }
2341 }
2342 pCur = pDown;
2343 }
2344 }
2345
2346 if (RT_SUCCESS(rcLock))
2347 RTCritSectLeave(&g_LockValClassTeachCS);
2348 }
2349
2350 return VINF_SUCCESS;
2351}
2352
2353
2354
2355/**
2356 * Checks the locking order.
2357 *
2358 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2359 * @param pClass The lock class.
2360 * @param uSubClass The lock sub-class.
2361 * @param pThreadSelf The current thread.
2362 * @param pRec The lock record.
2363 * @param pSrcPos The source position of the locking operation.
2364 */
2365static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2366 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2367 PCRTLOCKVALSRCPOS pSrcPos)
2368{
2369 /*
2370 * Some internal paranoia first.
2371 */
2372 AssertPtr(pClass);
2373 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2374 AssertPtr(pThreadSelf);
2375 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2376 AssertPtr(pRec);
2377 AssertPtrNull(pSrcPos);
2378
2379 /*
2380 * Walk the stack, delegate problems to a worker routine.
2381 */
2382 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2383 if (!pCur)
2384 return VINF_SUCCESS;
2385
2386 for (;;)
2387 {
2388 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2389
2390 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2391 pCur = pCur->Nest.pDown;
2392 else
2393 {
2394 uint32_t uPriorSubClass;
2395 PRTLOCKVALRECUNION pDown;
2396 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2397 if (pPriorClass != NIL_RTLOCKVALCLASS)
2398 {
2399 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2400 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2401 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2402 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2403 pPriorClass, pCur, pDown);
2404 }
2405 pCur = pDown;
2406 }
2407 if (!pCur)
2408 return VINF_SUCCESS;
2409 }
2410}
2411
2412
2413/**
2414 * Check that the lock record is the topmost one on the stack, complain and fail
2415 * if it isn't.
2416 *
2417 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2418 * VERR_SEM_LV_INVALID_PARAMETER.
2419 * @param pThreadSelf The current thread.
2420 * @param pRec The record.
2421 */
2422static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2423{
2424 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2425 Assert(pThreadSelf == RTThreadSelf());
2426
2427 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2428 if (RT_LIKELY( pTop == pRec
2429 || ( pTop
2430 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2431 && pTop->Nest.pRec == pRec) ))
2432 return VINF_SUCCESS;
2433
2434#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2435 /* Look for a recursion record so the right frame is dumped and marked. */
2436 while (pTop)
2437 {
2438 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2439 {
2440 if (pTop->Nest.pRec == pRec)
2441 {
2442 pRec = pTop;
2443 break;
2444 }
2445 pTop = pTop->Nest.pDown;
2446 }
2447 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2448 pTop = pTop->Excl.pDown;
2449 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2450 pTop = pTop->ShrdOwner.pDown;
2451 else
2452 break;
2453 }
2454#endif
2455
2456 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2457 rtLockValComplainPanic();
2458 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2459}
2460
2461
2462/**
2463 * Checks if all owners are blocked - shared record operated in signaller mode.
2464 *
2465 * @returns true / false accordingly.
2466 * @param pRec The record.
2467 * @param pThreadSelf The current thread.
2468 */
2469DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2470{
2471 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2472 uint32_t cAllocated = pRec->cAllocated;
2473 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2474 if (cEntries == 0)
2475 return false;
2476
2477 for (uint32_t i = 0; i < cAllocated; i++)
2478 {
2479 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2480 if ( pEntry
2481 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2482 {
2483 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2484 if (!pCurThread)
2485 return false;
2486 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2487 return false;
2488 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2489 && pCurThread != pThreadSelf)
2490 return false;
2491 if (--cEntries == 0)
2492 break;
2493 }
2494 else
2495 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2496 }
2497
2498 return true;
2499}
2500
2501
2502/**
2503 * Verifies the deadlock stack before calling it a deadlock.
2504 *
2505 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2506 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2507 * @retval VERR_TRY_AGAIN if something changed.
2508 *
2509 * @param pStack The deadlock detection stack.
2510 * @param pThreadSelf The current thread.
2511 */
2512static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2513{
2514 uint32_t const c = pStack->c;
2515 for (uint32_t iPass = 0; iPass < 3; iPass++)
2516 {
2517 for (uint32_t i = 1; i < c; i++)
2518 {
2519 PRTTHREADINT pThread = pStack->a[i].pThread;
2520 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2521 return VERR_TRY_AGAIN;
2522 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2523 return VERR_TRY_AGAIN;
2524 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2525 return VERR_TRY_AGAIN;
2526 /* ASSUMES the signaller records won't have siblings! */
2527 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2528 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2529 && pRec->Shared.fSignaller
2530 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2531 return VERR_TRY_AGAIN;
2532 }
2533 RTThreadYield();
2534 }
2535
2536 if (c == 1)
2537 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2538 return VERR_SEM_LV_DEADLOCK;
2539}
2540
2541
2542/**
2543 * Checks for stack cycles caused by another deadlock before returning.
2544 *
2545 * @retval VINF_SUCCESS if the stack is simply too small.
2546 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2547 *
2548 * @param pStack The deadlock detection stack.
2549 */
2550static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2551{
2552 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2553 {
2554 PRTTHREADINT pThread = pStack->a[i].pThread;
2555 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2556 if (pStack->a[j].pThread == pThread)
2557 return VERR_SEM_LV_EXISTING_DEADLOCK;
2558 }
2559 static bool volatile s_fComplained = false;
2560 if (!s_fComplained)
2561 {
2562 s_fComplained = true;
2563 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2564 }
2565 return VINF_SUCCESS;
2566}
2567
2568
2569/**
2570 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2571 * detection.
2572 *
2573 * @retval VINF_SUCCESS
2574 * @retval VERR_SEM_LV_DEADLOCK
2575 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2576 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2577 * @retval VERR_TRY_AGAIN
2578 *
2579 * @param pStack The stack to use.
2580 * @param pOriginalRec The original record.
2581 * @param pThreadSelf The calling thread.
2582 */
2583static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2584 PRTTHREADINT const pThreadSelf)
2585{
2586 pStack->c = 0;
2587
2588 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2589 compiler may make a better job of it when using individual variables. */
2590 PRTLOCKVALRECUNION pRec = pOriginalRec;
2591 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2592 uint32_t iEntry = UINT32_MAX;
2593 PRTTHREADINT pThread = NIL_RTTHREAD;
2594 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2595 for (uint32_t iLoop = 0; ; iLoop++)
2596 {
2597 /*
2598 * Process the current record.
2599 */
2600 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2601
2602 /* Find the next relevant owner thread and record. */
2603 PRTLOCKVALRECUNION pNextRec = NULL;
2604 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2605 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2606 switch (pRec->Core.u32Magic)
2607 {
2608 case RTLOCKVALRECEXCL_MAGIC:
2609 Assert(iEntry == UINT32_MAX);
2610 for (;;)
2611 {
2612 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2613 if ( !pNextThread
2614 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2615 break;
2616 enmNextState = rtThreadGetState(pNextThread);
2617 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2618 && pNextThread != pThreadSelf)
2619 break;
2620 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2621 if (RT_LIKELY( !pNextRec
2622 || enmNextState == rtThreadGetState(pNextThread)))
2623 break;
2624 pNextRec = NULL;
2625 }
2626 if (!pNextRec)
2627 {
2628 pRec = pRec->Excl.pSibling;
2629 if ( pRec
2630 && pRec != pFirstSibling)
2631 continue;
2632 pNextThread = NIL_RTTHREAD;
2633 }
2634 break;
2635
2636 case RTLOCKVALRECSHRD_MAGIC:
2637 if (!pRec->Shared.fSignaller)
2638 {
2639 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2640 /** @todo The read side of a read-write lock is problematic if
2641 * the implementation prioritizes writers over readers because
2642 * that means we should could deadlock against current readers
2643 * if a writer showed up. If the RW sem implementation is
2644 * wrapping some native API, it's not so easy to detect when we
2645 * should do this and when we shouldn't. Checking when we
2646 * shouldn't is subject to wakeup scheduling and cannot easily
2647 * be made reliable.
2648 *
2649 * At the moment we circumvent all this mess by declaring that
2650 * readers has priority. This is TRUE on linux, but probably
2651 * isn't on Solaris and FreeBSD. */
2652 if ( pRec == pFirstSibling
2653 && pRec->Shared.pSibling != NULL
2654 && pRec->Shared.pSibling != pFirstSibling)
2655 {
2656 pRec = pRec->Shared.pSibling;
2657 Assert(iEntry == UINT32_MAX);
2658 continue;
2659 }
2660 }
2661
2662 /* Scan the owner table for blocked owners. */
2663 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2664 && ( !pRec->Shared.fSignaller
2665 || iEntry != UINT32_MAX
2666 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2667 )
2668 )
2669 {
2670 uint32_t cAllocated = pRec->Shared.cAllocated;
2671 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2672 while (++iEntry < cAllocated)
2673 {
2674 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2675 if (pEntry)
2676 {
2677 for (;;)
2678 {
2679 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2680 break;
2681 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2682 if ( !pNextThread
2683 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2684 break;
2685 enmNextState = rtThreadGetState(pNextThread);
2686 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2687 && pNextThread != pThreadSelf)
2688 break;
2689 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2690 if (RT_LIKELY( !pNextRec
2691 || enmNextState == rtThreadGetState(pNextThread)))
2692 break;
2693 pNextRec = NULL;
2694 }
2695 if (pNextRec)
2696 break;
2697 }
2698 else
2699 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2700 }
2701 if (pNextRec)
2702 break;
2703 pNextThread = NIL_RTTHREAD;
2704 }
2705
2706 /* Advance to the next sibling, if any. */
2707 pRec = pRec->Shared.pSibling;
2708 if ( pRec != NULL
2709 && pRec != pFirstSibling)
2710 {
2711 iEntry = UINT32_MAX;
2712 continue;
2713 }
2714 break;
2715
2716 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2717 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2718 break;
2719
2720 case RTLOCKVALRECSHRDOWN_MAGIC:
2721 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2722 default:
2723 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2724 break;
2725 }
2726
2727 if (pNextRec)
2728 {
2729 /*
2730 * Recurse and check for deadlock.
2731 */
2732 uint32_t i = pStack->c;
2733 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2734 return rtLockValidatorDdHandleStackOverflow(pStack);
2735
2736 pStack->c++;
2737 pStack->a[i].pRec = pRec;
2738 pStack->a[i].iEntry = iEntry;
2739 pStack->a[i].enmState = enmState;
2740 pStack->a[i].pThread = pThread;
2741 pStack->a[i].pFirstSibling = pFirstSibling;
2742
2743 if (RT_UNLIKELY( pNextThread == pThreadSelf
2744 && ( i != 0
2745 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2746 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2747 )
2748 )
2749 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2750
2751 pRec = pNextRec;
2752 pFirstSibling = pNextRec;
2753 iEntry = UINT32_MAX;
2754 enmState = enmNextState;
2755 pThread = pNextThread;
2756 }
2757 else
2758 {
2759 /*
2760 * No deadlock here, unwind the stack and deal with any unfinished
2761 * business there.
2762 */
2763 uint32_t i = pStack->c;
2764 for (;;)
2765 {
2766 /* pop */
2767 if (i == 0)
2768 return VINF_SUCCESS;
2769 i--;
2770 pRec = pStack->a[i].pRec;
2771 iEntry = pStack->a[i].iEntry;
2772
2773 /* Examine it. */
2774 uint32_t u32Magic = pRec->Core.u32Magic;
2775 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2776 pRec = pRec->Excl.pSibling;
2777 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2778 {
2779 if (iEntry + 1 < pRec->Shared.cAllocated)
2780 break; /* continue processing this record. */
2781 pRec = pRec->Shared.pSibling;
2782 }
2783 else
2784 {
2785 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2786 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2787 continue;
2788 }
2789
2790 /* Any next record to advance to? */
2791 if ( !pRec
2792 || pRec == pStack->a[i].pFirstSibling)
2793 continue;
2794 iEntry = UINT32_MAX;
2795 break;
2796 }
2797
2798 /* Restore the rest of the state and update the stack. */
2799 pFirstSibling = pStack->a[i].pFirstSibling;
2800 enmState = pStack->a[i].enmState;
2801 pThread = pStack->a[i].pThread;
2802 pStack->c = i;
2803 }
2804
2805 Assert(iLoop != 1000000);
2806 }
2807}
2808
2809
2810/**
2811 * Check for the simple no-deadlock case.
2812 *
2813 * @returns true if no deadlock, false if further investigation is required.
2814 *
2815 * @param pOriginalRec The original record.
2816 */
2817DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2818{
2819 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2820 && !pOriginalRec->Excl.pSibling)
2821 {
2822 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2823 if ( !pThread
2824 || pThread->u32Magic != RTTHREADINT_MAGIC)
2825 return true;
2826 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2827 if (!RTTHREAD_IS_SLEEPING(enmState))
2828 return true;
2829 }
2830 return false;
2831}
2832
2833
2834/**
2835 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2836 *
2837 * @param pStack The chain of locks causing the deadlock.
2838 * @param pRec The record relating to the current thread's lock
2839 * operation.
2840 * @param pThreadSelf This thread.
2841 * @param pSrcPos Where we are going to deadlock.
2842 * @param rc The return code.
2843 */
2844static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2845 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2846{
2847 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2848 {
2849 const char *pszWhat;
2850 switch (rc)
2851 {
2852 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2853 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2854 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2855 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2856 }
2857 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2858 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2859 for (uint32_t i = 0; i < pStack->c; i++)
2860 {
2861 char szPrefix[24];
2862 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2863 PRTLOCKVALRECUNION pShrdOwner = NULL;
2864 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2865 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2866 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2867 {
2868 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2869 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2870 }
2871 else
2872 {
2873 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2874 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2875 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2876 }
2877 }
2878 rtLockValComplainMore("---- end of deadlock chain ----\n");
2879 }
2880
2881 rtLockValComplainPanic();
2882}
2883
2884
2885/**
2886 * Perform deadlock detection.
2887 *
2888 * @retval VINF_SUCCESS
2889 * @retval VERR_SEM_LV_DEADLOCK
2890 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2891 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2892 *
2893 * @param pRec The record relating to the current thread's lock
2894 * operation.
2895 * @param pThreadSelf The current thread.
2896 * @param pSrcPos The position of the current lock operation.
2897 */
2898static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2899{
2900 RTLOCKVALDDSTACK Stack;
2901 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2902 if (RT_SUCCESS(rc))
2903 return VINF_SUCCESS;
2904
2905 if (rc == VERR_TRY_AGAIN)
2906 {
2907 for (uint32_t iLoop = 0; ; iLoop++)
2908 {
2909 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2910 if (RT_SUCCESS_NP(rc))
2911 return VINF_SUCCESS;
2912 if (rc != VERR_TRY_AGAIN)
2913 break;
2914 RTThreadYield();
2915 if (iLoop >= 3)
2916 return VINF_SUCCESS;
2917 }
2918 }
2919
2920 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2921 return rc;
2922}
2923
2924
2925RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2926 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2927{
2928 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2929 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2930 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2931 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2932 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2933
2934 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2935 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2936 pRec->afReserved[0] = 0;
2937 pRec->afReserved[1] = 0;
2938 pRec->afReserved[2] = 0;
2939 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2940 pRec->hThread = NIL_RTTHREAD;
2941 pRec->pDown = NULL;
2942 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2943 pRec->uSubClass = uSubClass;
2944 pRec->cRecursion = 0;
2945 pRec->hLock = hLock;
2946 pRec->pSibling = NULL;
2947 if (pszNameFmt)
2948 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2949 else
2950 {
2951 static uint32_t volatile s_cAnonymous = 0;
2952 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2953 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2954 }
2955
2956 /* Lazy initialization. */
2957 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2958 rtLockValidatorLazyInit();
2959}
2960
2961
2962RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2963 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2964{
2965 va_list va;
2966 va_start(va, pszNameFmt);
2967 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2968 va_end(va);
2969}
2970
2971
2972RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2973 uint32_t uSubClass, void *pvLock, bool fEnabled,
2974 const char *pszNameFmt, va_list va)
2975{
2976 PRTLOCKVALRECEXCL pRec;
2977 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2978 if (!pRec)
2979 return VERR_NO_MEMORY;
2980 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2981 return VINF_SUCCESS;
2982}
2983
2984
2985RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2986 uint32_t uSubClass, void *pvLock, bool fEnabled,
2987 const char *pszNameFmt, ...)
2988{
2989 va_list va;
2990 va_start(va, pszNameFmt);
2991 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2992 va_end(va);
2993 return rc;
2994}
2995
2996
2997RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
2998{
2999 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3000
3001 rtLockValidatorSerializeDestructEnter();
3002
3003 /** @todo Check that it's not on our stack first. Need to make it
3004 * configurable whether deleting a owned lock is acceptable? */
3005
3006 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3007 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3008 RTLOCKVALCLASS hClass;
3009 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3010 if (pRec->pSibling)
3011 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3012 rtLockValidatorSerializeDestructLeave();
3013 if (hClass != NIL_RTLOCKVALCLASS)
3014 RTLockValidatorClassRelease(hClass);
3015}
3016
3017
3018RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3019{
3020 PRTLOCKVALRECEXCL pRec = *ppRec;
3021 *ppRec = NULL;
3022 if (pRec)
3023 {
3024 RTLockValidatorRecExclDelete(pRec);
3025 RTMemFree(pRec);
3026 }
3027}
3028
3029
3030RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3031{
3032 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3033 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3034 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3035 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3036 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3037 RTLOCKVAL_SUB_CLASS_INVALID);
3038 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3039}
3040
3041
3042RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3043 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3044{
3045 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3046 if (!pRecU)
3047 return;
3048 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3049 if (!pRecU->Excl.fEnabled)
3050 return;
3051 if (hThreadSelf == NIL_RTTHREAD)
3052 {
3053 hThreadSelf = RTThreadSelfAutoAdopt();
3054 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3055 }
3056 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3057 Assert(hThreadSelf == RTThreadSelf());
3058
3059 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3060
3061 if (pRecU->Excl.hThread == hThreadSelf)
3062 {
3063 Assert(!fFirstRecursion);
3064 pRecU->Excl.cRecursion++;
3065 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3066 }
3067 else
3068 {
3069 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3070
3071 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3072 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3073 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3074
3075 rtLockValidatorStackPush(hThreadSelf, pRecU);
3076 }
3077}
3078
3079
3080/**
3081 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3082 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3083 */
3084static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3085{
3086 RTTHREADINT *pThread = pRec->Excl.hThread;
3087 AssertReturnVoid(pThread != NIL_RTTHREAD);
3088 Assert(pThread == RTThreadSelf());
3089
3090 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3091 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3092 if (c == 0)
3093 {
3094 rtLockValidatorStackPop(pThread, pRec);
3095 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3096 }
3097 else
3098 {
3099 Assert(c < UINT32_C(0xffff0000));
3100 Assert(!fFinalRecursion);
3101 rtLockValidatorStackPopRecursion(pThread, pRec);
3102 }
3103}
3104
3105RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3106{
3107 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3108 if (!pRecU)
3109 return VINF_SUCCESS;
3110 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3111 if (!pRecU->Excl.fEnabled)
3112 return VINF_SUCCESS;
3113
3114 /*
3115 * Check the release order.
3116 */
3117 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3118 && pRecU->Excl.hClass->fStrictReleaseOrder
3119 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3120 )
3121 {
3122 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3123 if (RT_FAILURE(rc))
3124 return rc;
3125 }
3126
3127 /*
3128 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3129 */
3130 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3131 return VINF_SUCCESS;
3132}
3133
3134
3135RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3136{
3137 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3138 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3139 if (pRecU->Excl.fEnabled)
3140 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3141}
3142
3143
3144RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3145{
3146 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3147 if (!pRecU)
3148 return VINF_SUCCESS;
3149 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3150 if (!pRecU->Excl.fEnabled)
3151 return VINF_SUCCESS;
3152 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3153 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3154
3155 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3156 && !pRecU->Excl.hClass->fRecursionOk)
3157 {
3158 rtLockValComplainFirst("Recursion not allowed by the class!",
3159 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3160 rtLockValComplainPanic();
3161 return VERR_SEM_LV_NESTED;
3162 }
3163
3164 Assert(pRecU->Excl.cRecursion < _1M);
3165 pRecU->Excl.cRecursion++;
3166 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3167 return VINF_SUCCESS;
3168}
3169
3170
3171RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3172{
3173 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3174 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3175 if (!pRecU->Excl.fEnabled)
3176 return VINF_SUCCESS;
3177 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3178 Assert(pRecU->Excl.hThread == RTThreadSelf());
3179 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3180
3181 /*
3182 * Check the release order.
3183 */
3184 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3185 && pRecU->Excl.hClass->fStrictReleaseOrder
3186 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3187 )
3188 {
3189 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3190 if (RT_FAILURE(rc))
3191 return rc;
3192 }
3193
3194 /*
3195 * Perform the unwind.
3196 */
3197 pRecU->Excl.cRecursion--;
3198 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3199 return VINF_SUCCESS;
3200}
3201
3202
3203RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3204{
3205 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3206 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3207 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3208 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3209 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3210 , VERR_SEM_LV_INVALID_PARAMETER);
3211 if (!pRecU->Excl.fEnabled)
3212 return VINF_SUCCESS;
3213 Assert(pRecU->Excl.hThread == RTThreadSelf());
3214 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3215 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3216
3217 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3218 && !pRecU->Excl.hClass->fRecursionOk)
3219 {
3220 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3221 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3222 rtLockValComplainPanic();
3223 return VERR_SEM_LV_NESTED;
3224 }
3225
3226 Assert(pRecU->Excl.cRecursion < _1M);
3227 pRecU->Excl.cRecursion++;
3228 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3229
3230 return VINF_SUCCESS;
3231}
3232
3233
3234RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3235{
3236 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3237 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3238 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3239 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3240 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3241 , VERR_SEM_LV_INVALID_PARAMETER);
3242 if (!pRecU->Excl.fEnabled)
3243 return VINF_SUCCESS;
3244 Assert(pRecU->Excl.hThread == RTThreadSelf());
3245 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3246 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3247
3248 /*
3249 * Check the release order.
3250 */
3251 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3252 && pRecU->Excl.hClass->fStrictReleaseOrder
3253 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3254 )
3255 {
3256 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3257 if (RT_FAILURE(rc))
3258 return rc;
3259 }
3260
3261 /*
3262 * Perform the unwind.
3263 */
3264 pRecU->Excl.cRecursion--;
3265 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3266 return VINF_SUCCESS;
3267}
3268
3269
3270RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3271 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3272{
3273 /*
3274 * Validate and adjust input. Quit early if order validation is disabled.
3275 */
3276 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3277 if (!pRecU)
3278 return VINF_SUCCESS;
3279 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3280 if ( !pRecU->Excl.fEnabled
3281 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3282 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3283 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3284 return VINF_SUCCESS;
3285
3286 if (hThreadSelf == NIL_RTTHREAD)
3287 {
3288 hThreadSelf = RTThreadSelfAutoAdopt();
3289 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3290 }
3291 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3292 Assert(hThreadSelf == RTThreadSelf());
3293
3294 /*
3295 * Detect recursion as it isn't subject to order restrictions.
3296 */
3297 if (pRec->hThread == hThreadSelf)
3298 return VINF_SUCCESS;
3299
3300 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3301}
3302
3303
3304RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3305 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3306 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3307{
3308 /*
3309 * Fend off wild life.
3310 */
3311 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3312 if (!pRecU)
3313 return VINF_SUCCESS;
3314 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3315 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3316 if (!pRec->fEnabled)
3317 return VINF_SUCCESS;
3318
3319 PRTTHREADINT pThreadSelf = hThreadSelf;
3320 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3321 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3322 Assert(pThreadSelf == RTThreadSelf());
3323
3324 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3325
3326 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3327 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3328 {
3329 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3330 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3331 , VERR_SEM_LV_INVALID_PARAMETER);
3332 enmSleepState = enmThreadState;
3333 }
3334
3335 /*
3336 * Record the location.
3337 */
3338 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3339 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3340 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3341 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3342 rtThreadSetState(pThreadSelf, enmSleepState);
3343
3344 /*
3345 * Don't do deadlock detection if we're recursing.
3346 *
3347 * On some hosts we don't do recursion accounting our selves and there
3348 * isn't any other place to check for this.
3349 */
3350 int rc = VINF_SUCCESS;
3351 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3352 {
3353 if ( !fRecursiveOk
3354 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3355 && !pRecU->Excl.hClass->fRecursionOk))
3356 {
3357 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3358 rtLockValComplainPanic();
3359 rc = VERR_SEM_LV_NESTED;
3360 }
3361 }
3362 /*
3363 * Perform deadlock detection.
3364 */
3365 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3366 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3367 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3368 rc = VINF_SUCCESS;
3369 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3370 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3371
3372 if (RT_SUCCESS(rc))
3373 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3374 else
3375 {
3376 rtThreadSetState(pThreadSelf, enmThreadState);
3377 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3378 }
3379 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3380 return rc;
3381}
3382RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3383
3384
3385RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3386 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3387 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3388{
3389 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3390 if (RT_SUCCESS(rc))
3391 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3392 enmSleepState, fReallySleeping);
3393 return rc;
3394}
3395RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3396
3397
3398RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3399 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3400{
3401 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3402 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3403 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3404 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3405 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3406
3407 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3408 pRec->uSubClass = uSubClass;
3409 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3410 pRec->hLock = hLock;
3411 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3412 pRec->fSignaller = fSignaller;
3413 pRec->pSibling = NULL;
3414
3415 /* the table */
3416 pRec->cEntries = 0;
3417 pRec->iLastEntry = 0;
3418 pRec->cAllocated = 0;
3419 pRec->fReallocating = false;
3420 pRec->fPadding = false;
3421 pRec->papOwners = NULL;
3422
3423 /* the name */
3424 if (pszNameFmt)
3425 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3426 else
3427 {
3428 static uint32_t volatile s_cAnonymous = 0;
3429 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3430 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3431 }
3432}
3433
3434
3435RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3436 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3437{
3438 va_list va;
3439 va_start(va, pszNameFmt);
3440 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3441 va_end(va);
3442}
3443
3444
3445RTDECL(int) RTLockValidatorRecSharedCreateV(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3446 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3447 const char *pszNameFmt, va_list va)
3448{
3449 PRTLOCKVALRECSHRD pRec;
3450 *ppRec = pRec = (PRTLOCKVALRECSHRD)RTMemAlloc(sizeof(*pRec));
3451 if (!pRec)
3452 return VERR_NO_MEMORY;
3453 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3454 return VINF_SUCCESS;
3455}
3456
3457
3458RTDECL(int) RTLockValidatorRecSharedCreate(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3459 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3460 const char *pszNameFmt, ...)
3461{
3462 va_list va;
3463 va_start(va, pszNameFmt);
3464 int rc = RTLockValidatorRecSharedCreateV(ppRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3465 va_end(va);
3466 return rc;
3467}
3468
3469
3470RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3471{
3472 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3473
3474 /** @todo Check that it's not on our stack first. Need to make it
3475 * configurable whether deleting a owned lock is acceptable? */
3476
3477 /*
3478 * Flip it into table realloc mode and take the destruction lock.
3479 */
3480 rtLockValidatorSerializeDestructEnter();
3481 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3482 {
3483 rtLockValidatorSerializeDestructLeave();
3484
3485 rtLockValidatorSerializeDetectionEnter();
3486 rtLockValidatorSerializeDetectionLeave();
3487
3488 rtLockValidatorSerializeDestructEnter();
3489 }
3490
3491 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3492 RTLOCKVALCLASS hClass;
3493 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3494 if (pRec->papOwners)
3495 {
3496 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3497 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3498 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3499
3500 RTMemFree((void *)papOwners);
3501 }
3502 if (pRec->pSibling)
3503 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3504 ASMAtomicWriteBool(&pRec->fReallocating, false);
3505
3506 rtLockValidatorSerializeDestructLeave();
3507
3508 if (hClass != NIL_RTLOCKVALCLASS)
3509 RTLockValidatorClassRelease(hClass);
3510}
3511
3512
3513RTDECL(void) RTLockValidatorRecSharedDestroy(PRTLOCKVALRECSHRD *ppRec)
3514{
3515 PRTLOCKVALRECSHRD pRec = *ppRec;
3516 *ppRec = NULL;
3517 if (pRec)
3518 {
3519 RTLockValidatorRecSharedDelete(pRec);
3520 RTMemFree(pRec);
3521 }
3522}
3523
3524
3525RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3526{
3527 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3528 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3529 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3530 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3531 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3532 RTLOCKVAL_SUB_CLASS_INVALID);
3533 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3534}
3535
3536
3537/**
3538 * Locates an owner (thread) in a shared lock record.
3539 *
3540 * @returns Pointer to the owner entry on success, NULL on failure..
3541 * @param pShared The shared lock record.
3542 * @param hThread The thread (owner) to find.
3543 * @param piEntry Where to optionally return the table in index.
3544 * Optional.
3545 */
3546DECLINLINE(PRTLOCKVALRECUNION)
3547rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3548{
3549 rtLockValidatorSerializeDetectionEnter();
3550
3551 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3552 if (papOwners)
3553 {
3554 uint32_t const cMax = pShared->cAllocated;
3555 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3556 {
3557 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3558 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3559 {
3560 rtLockValidatorSerializeDetectionLeave();
3561 if (piEntry)
3562 *piEntry = iEntry;
3563 return pEntry;
3564 }
3565 }
3566 }
3567
3568 rtLockValidatorSerializeDetectionLeave();
3569 return NULL;
3570}
3571
3572
3573RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3574 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3575{
3576 /*
3577 * Validate and adjust input. Quit early if order validation is disabled.
3578 */
3579 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3580 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3581 if ( !pRecU->Shared.fEnabled
3582 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3583 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3584 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3585 )
3586 return VINF_SUCCESS;
3587
3588 if (hThreadSelf == NIL_RTTHREAD)
3589 {
3590 hThreadSelf = RTThreadSelfAutoAdopt();
3591 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3592 }
3593 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3594 Assert(hThreadSelf == RTThreadSelf());
3595
3596 /*
3597 * Detect recursion as it isn't subject to order restrictions.
3598 */
3599 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3600 if (pEntry)
3601 return VINF_SUCCESS;
3602
3603 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3604}
3605
3606
3607RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3608 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3609 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3610{
3611 /*
3612 * Fend off wild life.
3613 */
3614 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3615 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3616 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3617 if (!pRecU->Shared.fEnabled)
3618 return VINF_SUCCESS;
3619
3620 PRTTHREADINT pThreadSelf = hThreadSelf;
3621 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3622 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3623 Assert(pThreadSelf == RTThreadSelf());
3624
3625 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3626
3627 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3628 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3629 {
3630 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3631 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3632 , VERR_SEM_LV_INVALID_PARAMETER);
3633 enmSleepState = enmThreadState;
3634 }
3635
3636 /*
3637 * Record the location.
3638 */
3639 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3640 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3641 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3642 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3643 rtThreadSetState(pThreadSelf, enmSleepState);
3644
3645 /*
3646 * Don't do deadlock detection if we're recursing.
3647 */
3648 int rc = VINF_SUCCESS;
3649 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3650 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3651 : NULL;
3652 if (pEntry)
3653 {
3654 if ( !fRecursiveOk
3655 || ( pRec->hClass
3656 && !pRec->hClass->fRecursionOk)
3657 )
3658 {
3659 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3660 rtLockValComplainPanic();
3661 rc = VERR_SEM_LV_NESTED;
3662 }
3663 }
3664 /*
3665 * Perform deadlock detection.
3666 */
3667 else if ( pRec->hClass
3668 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3669 || pRec->hClass->cMsMinDeadlock > cMillies))
3670 rc = VINF_SUCCESS;
3671 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3672 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3673
3674 if (RT_SUCCESS(rc))
3675 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3676 else
3677 {
3678 rtThreadSetState(pThreadSelf, enmThreadState);
3679 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3680 }
3681 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3682 return rc;
3683}
3684RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3685
3686
3687RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3688 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3689 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3690{
3691 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3692 if (RT_SUCCESS(rc))
3693 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3694 enmSleepState, fReallySleeping);
3695 return rc;
3696}
3697RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3698
3699
3700/**
3701 * Allocates and initializes an owner entry for the shared lock record.
3702 *
3703 * @returns The new owner entry.
3704 * @param pRec The shared lock record.
3705 * @param pThreadSelf The calling thread and owner. Used for record
3706 * initialization and allocation.
3707 * @param pSrcPos The source position.
3708 */
3709DECLINLINE(PRTLOCKVALRECUNION)
3710rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3711{
3712 PRTLOCKVALRECUNION pEntry;
3713
3714 /*
3715 * Check if the thread has any statically allocated records we can easily
3716 * make use of.
3717 */
3718 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3719 if ( iEntry > 0
3720 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3721 {
3722 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3723 Assert(!pEntry->ShrdOwner.fReserved);
3724 pEntry->ShrdOwner.fStaticAlloc = true;
3725 rtThreadGet(pThreadSelf);
3726 }
3727 else
3728 {
3729 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3730 if (RT_UNLIKELY(!pEntry))
3731 return NULL;
3732 pEntry->ShrdOwner.fStaticAlloc = false;
3733 }
3734
3735 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3736 pEntry->ShrdOwner.cRecursion = 1;
3737 pEntry->ShrdOwner.fReserved = true;
3738 pEntry->ShrdOwner.hThread = pThreadSelf;
3739 pEntry->ShrdOwner.pDown = NULL;
3740 pEntry->ShrdOwner.pSharedRec = pRec;
3741#if HC_ARCH_BITS == 32
3742 pEntry->ShrdOwner.pvReserved = NULL;
3743#endif
3744 if (pSrcPos)
3745 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3746 else
3747 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3748 return pEntry;
3749}
3750
3751
3752/**
3753 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3754 *
3755 * @param pEntry The owner entry.
3756 */
3757DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3758{
3759 if (pEntry)
3760 {
3761 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3762 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3763
3764 PRTTHREADINT pThread;
3765 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3766
3767 Assert(pEntry->fReserved);
3768 pEntry->fReserved = false;
3769
3770 if (pEntry->fStaticAlloc)
3771 {
3772 AssertPtrReturnVoid(pThread);
3773 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3774
3775 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3776 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3777
3778 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3779 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3780
3781 rtThreadRelease(pThread);
3782 }
3783 else
3784 {
3785 rtLockValidatorSerializeDestructEnter();
3786 rtLockValidatorSerializeDestructLeave();
3787
3788 RTMemFree(pEntry);
3789 }
3790 }
3791}
3792
3793
3794/**
3795 * Make more room in the table.
3796 *
3797 * @retval true on success
3798 * @retval false if we're out of memory or running into a bad race condition
3799 * (probably a bug somewhere). No longer holding the lock.
3800 *
3801 * @param pShared The shared lock record.
3802 */
3803static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3804{
3805 for (unsigned i = 0; i < 1000; i++)
3806 {
3807 /*
3808 * Switch to the other data access direction.
3809 */
3810 rtLockValidatorSerializeDetectionLeave();
3811 if (i >= 10)
3812 {
3813 Assert(i != 10 && i != 100);
3814 RTThreadSleep(i >= 100);
3815 }
3816 rtLockValidatorSerializeDestructEnter();
3817
3818 /*
3819 * Try grab the privilege to reallocating the table.
3820 */
3821 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3822 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3823 {
3824 uint32_t cAllocated = pShared->cAllocated;
3825 if (cAllocated < pShared->cEntries)
3826 {
3827 /*
3828 * Ok, still not enough space. Reallocate the table.
3829 */
3830#if 0 /** @todo enable this after making sure growing works flawlessly. */
3831 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3832#else
3833 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3834#endif
3835 PRTLOCKVALRECSHRDOWN *papOwners;
3836 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3837 (cAllocated + cInc) * sizeof(void *));
3838 if (!papOwners)
3839 {
3840 ASMAtomicWriteBool(&pShared->fReallocating, false);
3841 rtLockValidatorSerializeDestructLeave();
3842 /* RTMemRealloc will assert */
3843 return false;
3844 }
3845
3846 while (cInc-- > 0)
3847 {
3848 papOwners[cAllocated] = NULL;
3849 cAllocated++;
3850 }
3851
3852 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3853 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3854 }
3855 ASMAtomicWriteBool(&pShared->fReallocating, false);
3856 }
3857 rtLockValidatorSerializeDestructLeave();
3858
3859 rtLockValidatorSerializeDetectionEnter();
3860 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3861 break;
3862
3863 if (pShared->cAllocated >= pShared->cEntries)
3864 return true;
3865 }
3866
3867 rtLockValidatorSerializeDetectionLeave();
3868 AssertFailed(); /* too many iterations or destroyed while racing. */
3869 return false;
3870}
3871
3872
3873/**
3874 * Adds an owner entry to a shared lock record.
3875 *
3876 * @returns true on success, false on serious race or we're if out of memory.
3877 * @param pShared The shared lock record.
3878 * @param pEntry The owner entry.
3879 */
3880DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3881{
3882 rtLockValidatorSerializeDetectionEnter();
3883 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3884 {
3885 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3886 && !rtLockValidatorRecSharedMakeRoom(pShared))
3887 return false; /* the worker leave the lock */
3888
3889 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3890 uint32_t const cMax = pShared->cAllocated;
3891 for (unsigned i = 0; i < 100; i++)
3892 {
3893 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3894 {
3895 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3896 {
3897 rtLockValidatorSerializeDetectionLeave();
3898 return true;
3899 }
3900 }
3901 Assert(i != 25);
3902 }
3903 AssertFailed();
3904 }
3905 rtLockValidatorSerializeDetectionLeave();
3906 return false;
3907}
3908
3909
3910/**
3911 * Remove an owner entry from a shared lock record and free it.
3912 *
3913 * @param pShared The shared lock record.
3914 * @param pEntry The owner entry to remove.
3915 * @param iEntry The last known index.
3916 */
3917DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3918 uint32_t iEntry)
3919{
3920 /*
3921 * Remove it from the table.
3922 */
3923 rtLockValidatorSerializeDetectionEnter();
3924 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3925 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3926 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3927 {
3928 /* this shouldn't happen yet... */
3929 AssertFailed();
3930 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3931 uint32_t const cMax = pShared->cAllocated;
3932 for (iEntry = 0; iEntry < cMax; iEntry++)
3933 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3934 break;
3935 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3936 }
3937 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3938 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3939 rtLockValidatorSerializeDetectionLeave();
3940
3941 /*
3942 * Successfully removed, now free it.
3943 */
3944 rtLockValidatorRecSharedFreeOwner(pEntry);
3945}
3946
3947
3948RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3949{
3950 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3951 if (!pRec->fEnabled)
3952 return;
3953 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3954 AssertReturnVoid(pRec->fSignaller);
3955
3956 /*
3957 * Free all current owners.
3958 */
3959 rtLockValidatorSerializeDetectionEnter();
3960 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3961 {
3962 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3963 uint32_t iEntry = 0;
3964 uint32_t cEntries = pRec->cAllocated;
3965 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3966 while (iEntry < cEntries)
3967 {
3968 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3969 if (pEntry)
3970 {
3971 ASMAtomicDecU32(&pRec->cEntries);
3972 rtLockValidatorSerializeDetectionLeave();
3973
3974 rtLockValidatorRecSharedFreeOwner(pEntry);
3975
3976 rtLockValidatorSerializeDetectionEnter();
3977 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3978 break;
3979 cEntries = pRec->cAllocated;
3980 papEntries = pRec->papOwners;
3981 }
3982 iEntry++;
3983 }
3984 }
3985 rtLockValidatorSerializeDetectionLeave();
3986
3987 if (hThread != NIL_RTTHREAD)
3988 {
3989 /*
3990 * Allocate a new owner entry and insert it into the table.
3991 */
3992 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3993 if ( pEntry
3994 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3995 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3996 }
3997}
3998RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
3999
4000
4001RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
4002{
4003 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4004 if (!pRec->fEnabled)
4005 return;
4006 if (hThread == NIL_RTTHREAD)
4007 {
4008 hThread = RTThreadSelfAutoAdopt();
4009 AssertReturnVoid(hThread != NIL_RTTHREAD);
4010 }
4011 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4012
4013 /*
4014 * Recursive?
4015 *
4016 * Note! This code can be optimized to try avoid scanning the table on
4017 * insert. However, that's annoying work that makes the code big,
4018 * so it can wait til later sometime.
4019 */
4020 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4021 if (pEntry)
4022 {
4023 Assert(!pRec->fSignaller);
4024 pEntry->ShrdOwner.cRecursion++;
4025 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
4026 return;
4027 }
4028
4029 /*
4030 * Allocate a new owner entry and insert it into the table.
4031 */
4032 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4033 if (pEntry)
4034 {
4035 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4036 {
4037 if (!pRec->fSignaller)
4038 rtLockValidatorStackPush(hThread, pEntry);
4039 }
4040 else
4041 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4042 }
4043}
4044RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4045
4046
4047RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4048{
4049 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4050 if (!pRec->fEnabled)
4051 return;
4052 if (hThread == NIL_RTTHREAD)
4053 {
4054 hThread = RTThreadSelfAutoAdopt();
4055 AssertReturnVoid(hThread != NIL_RTTHREAD);
4056 }
4057 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4058
4059 /*
4060 * Find the entry hope it's a recursive one.
4061 */
4062 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4063 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4064 AssertReturnVoid(pEntry);
4065 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4066
4067 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4068 if (c == 0)
4069 {
4070 if (!pRec->fSignaller)
4071 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4072 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4073 }
4074 else
4075 {
4076 Assert(!pRec->fSignaller);
4077 rtLockValidatorStackPopRecursion(hThread, pEntry);
4078 }
4079}
4080RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4081
4082
4083RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4084{
4085 /* Validate and resolve input. */
4086 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4087 if (!pRec->fEnabled)
4088 return false;
4089 if (hThread == NIL_RTTHREAD)
4090 {
4091 hThread = RTThreadSelfAutoAdopt();
4092 AssertReturn(hThread != NIL_RTTHREAD, false);
4093 }
4094 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4095
4096 /* Do the job. */
4097 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4098 return pEntry != NULL;
4099}
4100RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4101
4102
4103RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4104{
4105 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4106 if (!pRec->fEnabled)
4107 return VINF_SUCCESS;
4108 if (hThreadSelf == NIL_RTTHREAD)
4109 {
4110 hThreadSelf = RTThreadSelfAutoAdopt();
4111 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4112 }
4113 Assert(hThreadSelf == RTThreadSelf());
4114 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4115
4116 /*
4117 * Locate the entry for this thread in the table.
4118 */
4119 uint32_t iEntry = 0;
4120 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4121 if (RT_UNLIKELY(!pEntry))
4122 {
4123 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4124 rtLockValComplainPanic();
4125 return VERR_SEM_LV_NOT_OWNER;
4126 }
4127
4128 /*
4129 * Check the release order.
4130 */
4131 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4132 && pRec->hClass->fStrictReleaseOrder
4133 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4134 )
4135 {
4136 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4137 if (RT_FAILURE(rc))
4138 return rc;
4139 }
4140
4141 /*
4142 * Release the ownership or unwind a level of recursion.
4143 */
4144 Assert(pEntry->ShrdOwner.cRecursion > 0);
4145 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4146 if (c == 0)
4147 {
4148 rtLockValidatorStackPop(hThreadSelf, pEntry);
4149 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4150 }
4151 else
4152 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4153
4154 return VINF_SUCCESS;
4155}
4156
4157
4158RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4159{
4160 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4161 if (!pRec->fEnabled)
4162 return VINF_SUCCESS;
4163 if (hThreadSelf == NIL_RTTHREAD)
4164 {
4165 hThreadSelf = RTThreadSelfAutoAdopt();
4166 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4167 }
4168 Assert(hThreadSelf == RTThreadSelf());
4169 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4170
4171 /*
4172 * Locate the entry for this thread in the table.
4173 */
4174 uint32_t iEntry = 0;
4175 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4176 if (RT_UNLIKELY(!pEntry))
4177 {
4178 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4179 rtLockValComplainPanic();
4180 return VERR_SEM_LV_NOT_SIGNALLER;
4181 }
4182 return VINF_SUCCESS;
4183}
4184
4185
4186RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4187{
4188 if (Thread == NIL_RTTHREAD)
4189 return 0;
4190
4191 PRTTHREADINT pThread = rtThreadGet(Thread);
4192 if (!pThread)
4193 return VERR_INVALID_HANDLE;
4194 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4195 rtThreadRelease(pThread);
4196 return cWriteLocks;
4197}
4198RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4199
4200
4201RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4202{
4203 PRTTHREADINT pThread = rtThreadGet(Thread);
4204 AssertReturnVoid(pThread);
4205 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4206 rtThreadRelease(pThread);
4207}
4208RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4209
4210
4211RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4212{
4213 PRTTHREADINT pThread = rtThreadGet(Thread);
4214 AssertReturnVoid(pThread);
4215 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4216 rtThreadRelease(pThread);
4217}
4218RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4219
4220
4221RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4222{
4223 if (Thread == NIL_RTTHREAD)
4224 return 0;
4225
4226 PRTTHREADINT pThread = rtThreadGet(Thread);
4227 if (!pThread)
4228 return VERR_INVALID_HANDLE;
4229 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4230 rtThreadRelease(pThread);
4231 return cReadLocks;
4232}
4233RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4234
4235
4236RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4237{
4238 PRTTHREADINT pThread = rtThreadGet(Thread);
4239 Assert(pThread);
4240 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4241 rtThreadRelease(pThread);
4242}
4243RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4244
4245
4246RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4247{
4248 PRTTHREADINT pThread = rtThreadGet(Thread);
4249 Assert(pThread);
4250 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4251 rtThreadRelease(pThread);
4252}
4253RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4254
4255
4256RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4257{
4258 void *pvLock = NULL;
4259 PRTTHREADINT pThread = rtThreadGet(hThread);
4260 if (pThread)
4261 {
4262 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4263 if (RTTHREAD_IS_SLEEPING(enmState))
4264 {
4265 rtLockValidatorSerializeDetectionEnter();
4266
4267 enmState = rtThreadGetState(pThread);
4268 if (RTTHREAD_IS_SLEEPING(enmState))
4269 {
4270 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4271 if (pRec)
4272 {
4273 switch (pRec->Core.u32Magic)
4274 {
4275 case RTLOCKVALRECEXCL_MAGIC:
4276 pvLock = pRec->Excl.hLock;
4277 break;
4278
4279 case RTLOCKVALRECSHRDOWN_MAGIC:
4280 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4281 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4282 break;
4283 case RTLOCKVALRECSHRD_MAGIC:
4284 pvLock = pRec->Shared.hLock;
4285 break;
4286 }
4287 if (RTThreadGetState(pThread) != enmState)
4288 pvLock = NULL;
4289 }
4290 }
4291
4292 rtLockValidatorSerializeDetectionLeave();
4293 }
4294 rtThreadRelease(pThread);
4295 }
4296 return pvLock;
4297}
4298RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4299
4300
4301RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4302{
4303 bool fRet = false;
4304 PRTTHREADINT pThread = rtThreadGet(hThread);
4305 if (pThread)
4306 {
4307 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4308 rtThreadRelease(pThread);
4309 }
4310 return fRet;
4311}
4312RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4313
4314
4315RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4316{
4317 bool fRet = false;
4318 if (hCurrentThread == NIL_RTTHREAD)
4319 hCurrentThread = RTThreadSelf();
4320 else
4321 Assert(hCurrentThread == RTThreadSelf());
4322 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4323 if (pThread)
4324 {
4325 if (hClass != NIL_RTLOCKVALCLASS)
4326 {
4327 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4328 while (VALID_PTR(pCur) && !fRet)
4329 {
4330 switch (pCur->Core.u32Magic)
4331 {
4332 case RTLOCKVALRECEXCL_MAGIC:
4333 fRet = pCur->Excl.hClass == hClass;
4334 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4335 break;
4336 case RTLOCKVALRECSHRDOWN_MAGIC:
4337 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4338 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4339 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4340 break;
4341 case RTLOCKVALRECNEST_MAGIC:
4342 switch (pCur->Nest.pRec->Core.u32Magic)
4343 {
4344 case RTLOCKVALRECEXCL_MAGIC:
4345 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4346 break;
4347 case RTLOCKVALRECSHRDOWN_MAGIC:
4348 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4349 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4350 break;
4351 }
4352 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4353 break;
4354 default:
4355 pCur = NULL;
4356 break;
4357 }
4358 }
4359 }
4360
4361 rtThreadRelease(pThread);
4362 }
4363 return fRet;
4364}
4365RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4366
4367
4368RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4369{
4370 bool fRet = false;
4371 if (hCurrentThread == NIL_RTTHREAD)
4372 hCurrentThread = RTThreadSelf();
4373 else
4374 Assert(hCurrentThread == RTThreadSelf());
4375 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4376 if (pThread)
4377 {
4378 if (hClass != NIL_RTLOCKVALCLASS)
4379 {
4380 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4381 while (VALID_PTR(pCur) && !fRet)
4382 {
4383 switch (pCur->Core.u32Magic)
4384 {
4385 case RTLOCKVALRECEXCL_MAGIC:
4386 fRet = pCur->Excl.hClass == hClass
4387 && pCur->Excl.uSubClass == uSubClass;
4388 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4389 break;
4390 case RTLOCKVALRECSHRDOWN_MAGIC:
4391 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4392 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4393 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4394 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4395 break;
4396 case RTLOCKVALRECNEST_MAGIC:
4397 switch (pCur->Nest.pRec->Core.u32Magic)
4398 {
4399 case RTLOCKVALRECEXCL_MAGIC:
4400 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4401 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4402 break;
4403 case RTLOCKVALRECSHRDOWN_MAGIC:
4404 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4405 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4406 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4407 break;
4408 }
4409 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4410 break;
4411 default:
4412 pCur = NULL;
4413 break;
4414 }
4415 }
4416 }
4417
4418 rtThreadRelease(pThread);
4419 }
4420 return fRet;
4421}
4422RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4423
4424
4425RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4426{
4427 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4428}
4429RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4430
4431
4432RTDECL(bool) RTLockValidatorIsEnabled(void)
4433{
4434 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4435}
4436RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4437
4438
4439RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4440{
4441 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4442}
4443RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4444
4445
4446RTDECL(bool) RTLockValidatorIsQuiet(void)
4447{
4448 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4449}
4450RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4451
4452
4453RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4454{
4455 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4456}
4457RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4458
4459
4460RTDECL(bool) RTLockValidatorMayPanic(void)
4461{
4462 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4463}
4464RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4465
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette