VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 33679

Last change on this file since 33679 was 33540, checked in by vboxsync, 14 years ago

*: spelling fixes, thanks Timeless!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 158.3 KB
Line 
1/* $Id: lockvalidator.cpp 33540 2010-10-28 09:27:05Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/*******************************************************************************
28* Header Files *
29*******************************************************************************/
30#include <iprt/lockvalidator.h>
31#include "internal/iprt.h"
32
33#include <iprt/asm.h>
34#include <iprt/assert.h>
35#include <iprt/env.h>
36#include <iprt/err.h>
37#include <iprt/mem.h>
38#include <iprt/once.h>
39#include <iprt/semaphore.h>
40#include <iprt/string.h>
41#include <iprt/thread.h>
42
43#include "internal/lockvalidator.h"
44#include "internal/magics.h"
45#include "internal/thread.h"
46
47
48/*******************************************************************************
49* Defined Constants And Macros *
50*******************************************************************************/
51/** Macro that asserts that a pointer is aligned correctly.
52 * Only used when fighting bugs. */
53#if 1
54# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
55 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
56#else
57# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
58#endif
59
60/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
61#define RTLOCKVALCLASS_HASH(hClass) \
62 ( ((uintptr_t)(hClass) >> 6 ) \
63 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
64 / sizeof(PRTLOCKVALCLASSREF)) )
65
66/** The max value for RTLOCKVALCLASSINT::cRefs. */
67#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
68/** The max value for RTLOCKVALCLASSREF::cLookups. */
69#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
70/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
71 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
72#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
73
74
75/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
76 * Enable recursion records. */
77#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
78# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
79#endif
80
81/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
82 * Enables some extra verbosity in the lock dumping. */
83#if defined(DOXYGEN_RUNNING)
84# define RTLOCKVAL_WITH_VERBOSE_DUMPS
85#endif
86
87/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
88 * Enables collection prior class hash lookup statistics, dumping them when
89 * complaining about the class. */
90#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
91# define RTLOCKVAL_WITH_CLASS_HASH_STATS
92#endif
93
94
95/*******************************************************************************
96* Structures and Typedefs *
97*******************************************************************************/
98/**
99 * Deadlock detection stack entry.
100 */
101typedef struct RTLOCKVALDDENTRY
102{
103 /** The current record. */
104 PRTLOCKVALRECUNION pRec;
105 /** The current entry number if pRec is a shared one. */
106 uint32_t iEntry;
107 /** The thread state of the thread we followed to get to pFirstSibling.
108 * This is only used for validating a deadlock stack. */
109 RTTHREADSTATE enmState;
110 /** The thread we followed to get to pFirstSibling.
111 * This is only used for validating a deadlock stack. */
112 PRTTHREADINT pThread;
113 /** What pThread is waiting on, i.e. where we entered the circular list of
114 * siblings. This is used for validating a deadlock stack as well as
115 * terminating the sibling walk. */
116 PRTLOCKVALRECUNION pFirstSibling;
117} RTLOCKVALDDENTRY;
118
119
120/**
121 * Deadlock detection stack.
122 */
123typedef struct RTLOCKVALDDSTACK
124{
125 /** The number stack entries. */
126 uint32_t c;
127 /** The stack entries. */
128 RTLOCKVALDDENTRY a[32];
129} RTLOCKVALDDSTACK;
130/** Pointer to a deadlock detection stack. */
131typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
132
133
134/**
135 * Reference to another class.
136 */
137typedef struct RTLOCKVALCLASSREF
138{
139 /** The class. */
140 RTLOCKVALCLASS hClass;
141 /** The number of lookups of this class. */
142 uint32_t volatile cLookups;
143 /** Indicates whether the entry was added automatically during order checking
144 * (true) or manually via the API (false). */
145 bool fAutodidacticism;
146 /** Reserved / explicit alignment padding. */
147 bool afReserved[3];
148} RTLOCKVALCLASSREF;
149/** Pointer to a class reference. */
150typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
151
152
153/** Pointer to a chunk of class references. */
154typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
155/**
156 * Chunk of class references.
157 */
158typedef struct RTLOCKVALCLASSREFCHUNK
159{
160 /** Array of refs. */
161#if 0 /** @todo for testing allocation of new chunks. */
162 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
163#else
164 RTLOCKVALCLASSREF aRefs[2];
165#endif
166 /** Pointer to the next chunk. */
167 PRTLOCKVALCLASSREFCHUNK volatile pNext;
168} RTLOCKVALCLASSREFCHUNK;
169
170
171/**
172 * Lock class.
173 */
174typedef struct RTLOCKVALCLASSINT
175{
176 /** AVL node core. */
177 AVLLU32NODECORE Core;
178 /** Magic value (RTLOCKVALCLASS_MAGIC). */
179 uint32_t volatile u32Magic;
180 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
181 uint32_t volatile cRefs;
182 /** Whether the class is allowed to teach it self new locking order rules. */
183 bool fAutodidact;
184 /** Whether to allow recursion. */
185 bool fRecursionOk;
186 /** Strict release order. */
187 bool fStrictReleaseOrder;
188 /** Whether this class is in the tree. */
189 bool fInTree;
190 /** Donate a reference to the next retainer. This is a hack to make
191 * RTLockValidatorClassCreateUnique work. */
192 bool volatile fDonateRefToNextRetainer;
193 /** Reserved future use / explicit alignment. */
194 bool afReserved[3];
195 /** The minimum wait interval for which we do deadlock detection
196 * (milliseconds). */
197 RTMSINTERVAL cMsMinDeadlock;
198 /** The minimum wait interval for which we do order checks (milliseconds). */
199 RTMSINTERVAL cMsMinOrder;
200 /** More padding. */
201 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
202 /** Classes that may be taken prior to this one.
203 * This is a linked list where each node contains a chunk of locks so that we
204 * reduce the number of allocations as well as localize the data. */
205 RTLOCKVALCLASSREFCHUNK PriorLocks;
206 /** Hash table containing frequently encountered prior locks. */
207 PRTLOCKVALCLASSREF apPriorLocksHash[17];
208 /** Class name. (Allocated after the end of the block as usual.) */
209 char const *pszName;
210 /** Where this class was created.
211 * This is mainly used for finding automatically created lock classes.
212 * @remarks The strings are stored after this structure so we won't crash
213 * if the class lives longer than the module (dll/so/dylib) that
214 * spawned it. */
215 RTLOCKVALSRCPOS CreatePos;
216#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
217 /** Hash hits. */
218 uint32_t volatile cHashHits;
219 /** Hash misses. */
220 uint32_t volatile cHashMisses;
221#endif
222} RTLOCKVALCLASSINT;
223AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
224AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
225
226
227/*******************************************************************************
228* Global Variables *
229*******************************************************************************/
230/** Serializing object destruction and deadlock detection.
231 *
232 * This makes sure that none of the memory examined by the deadlock detection
233 * code will become invalid (reused for other purposes or made not present)
234 * while the detection is in progress.
235 *
236 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
237 * EW: Deadlock detection and some related activities.
238 */
239static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
240/** Serializing class tree insert and lookups. */
241static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
242/** Class tree. */
243static PAVLLU32NODECORE g_LockValClassTree = NULL;
244/** Critical section serializing the teaching new rules to the classes. */
245static RTCRITSECT g_LockValClassTeachCS;
246
247/** Whether the lock validator is enabled or disabled.
248 * Only applies to new locks. */
249static bool volatile g_fLockValidatorEnabled = true;
250/** Set if the lock validator is quiet. */
251#ifdef RT_STRICT
252static bool volatile g_fLockValidatorQuiet = false;
253#else
254static bool volatile g_fLockValidatorQuiet = true;
255#endif
256/** Set if the lock validator may panic. */
257#ifdef RT_STRICT
258static bool volatile g_fLockValidatorMayPanic = true;
259#else
260static bool volatile g_fLockValidatorMayPanic = false;
261#endif
262/** Whether to return an error status on wrong locking order. */
263static bool volatile g_fLockValSoftWrongOrder = false;
264
265
266/*******************************************************************************
267* Internal Functions *
268*******************************************************************************/
269static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
270static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
271
272
273/**
274 * Lazy initialization of the lock validator globals.
275 */
276static void rtLockValidatorLazyInit(void)
277{
278 static uint32_t volatile s_fInitializing = false;
279 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
280 {
281 /*
282 * The locks.
283 */
284 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
285 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
286 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
287
288 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
289 {
290 RTSEMRW hSemRW;
291 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
292 if (RT_SUCCESS(rc))
293 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
294 }
295
296 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
297 {
298 RTSEMXROADS hXRoads;
299 int rc = RTSemXRoadsCreate(&hXRoads);
300 if (RT_SUCCESS(rc))
301 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
302 }
303
304#ifdef IN_RING3
305 /*
306 * Check the environment for our config variables.
307 */
308 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
309 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
310 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
311 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
312
313 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
314 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
315 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
316 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
317
318 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
319 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
320 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
321 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
322
323 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
324 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
325 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
326 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
327#endif
328
329 /*
330 * Register cleanup
331 */
332 /** @todo register some cleanup callback if we care. */
333
334 ASMAtomicWriteU32(&s_fInitializing, false);
335 }
336}
337
338
339
340/** Wrapper around ASMAtomicReadPtr. */
341DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
342{
343 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
344 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
345 return p;
346}
347
348
349/** Wrapper around ASMAtomicWritePtr. */
350DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
351{
352 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
353 ASMAtomicWritePtr(ppRec, pRecNew);
354}
355
356
357/** Wrapper around ASMAtomicReadPtr. */
358DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
359{
360 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
361 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
362 return p;
363}
364
365
366/** Wrapper around ASMAtomicUoReadPtr. */
367DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
368{
369 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
370 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
371 return p;
372}
373
374
375/**
376 * Reads a volatile thread handle field and returns the thread name.
377 *
378 * @returns Thread name (read only).
379 * @param phThread The thread handle field.
380 */
381static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
382{
383 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
384 if (!pThread)
385 return "<NIL>";
386 if (!VALID_PTR(pThread))
387 return "<INVALID>";
388 if (pThread->u32Magic != RTTHREADINT_MAGIC)
389 return "<BAD-THREAD-MAGIC>";
390 return pThread->szName;
391}
392
393
394/**
395 * Launch a simple assertion like complaint w/ panic.
396 *
397 * @param pszFile Where from - file.
398 * @param iLine Where from - line.
399 * @param pszFunction Where from - function.
400 * @param pszWhat What we're complaining about.
401 * @param ... Format arguments.
402 */
403static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
404{
405 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
406 {
407 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
408 va_list va;
409 va_start(va, pszWhat);
410 RTAssertMsg2WeakV(pszWhat, va);
411 va_end(va);
412 }
413 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
414 RTAssertPanic();
415}
416
417
418/**
419 * Describes the class.
420 *
421 * @param pszPrefix Message prefix.
422 * @param pClass The class to complain about.
423 * @param uSubClass My sub-class.
424 * @param fVerbose Verbose description including relations to other
425 * classes.
426 */
427static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
428{
429 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
430 return;
431
432 /* Stringify the sub-class. */
433 const char *pszSubClass;
434 char szSubClass[32];
435 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
436 switch (uSubClass)
437 {
438 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
439 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
440 default:
441 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
442 pszSubClass = szSubClass;
443 break;
444 }
445 else
446 {
447 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
448 pszSubClass = szSubClass;
449 }
450
451 /* Validate the class pointer. */
452 if (!VALID_PTR(pClass))
453 {
454 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
455 return;
456 }
457 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
458 {
459 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
460 return;
461 }
462
463 /* OK, dump the class info. */
464 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
465 pClass,
466 pClass->pszName,
467 pClass->CreatePos.pszFile,
468 pClass->CreatePos.uLine,
469 pClass->CreatePos.pszFunction,
470 pClass->CreatePos.uId,
471 pszSubClass);
472 if (fVerbose)
473 {
474 uint32_t i = 0;
475 uint32_t cPrinted = 0;
476 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
477 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
478 {
479 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
480 if (pCurClass != NIL_RTLOCKVALCLASS)
481 {
482 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
483 cPrinted == 0
484 ? "Prior:"
485 : " ",
486 i,
487 pCurClass->pszName,
488 pChunk->aRefs[j].fAutodidacticism
489 ? "autodidactic"
490 : "manually ",
491 pChunk->aRefs[j].cLookups,
492 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
493 cPrinted++;
494 }
495 }
496 if (!cPrinted)
497 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
498#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
499 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
500#endif
501 }
502 else
503 {
504 uint32_t cPrinted = 0;
505 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
506 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
507 {
508 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
509 if (pCurClass != NIL_RTLOCKVALCLASS)
510 {
511 if ((cPrinted % 10) == 0)
512 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
513 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
514 else if ((cPrinted % 10) != 9)
515 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
516 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
517 else
518 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
519 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
520 cPrinted++;
521 }
522 }
523 if (!cPrinted)
524 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
525 else if ((cPrinted % 10) != 0)
526 RTAssertMsg2AddWeak("\n");
527 }
528}
529
530
531/**
532 * Helper for getting the class name.
533 * @returns Class name string.
534 * @param pClass The class.
535 */
536static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
537{
538 if (!pClass)
539 return "<nil-class>";
540 if (!VALID_PTR(pClass))
541 return "<bad-class-ptr>";
542 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
543 return "<bad-class-magic>";
544 if (!pClass->pszName)
545 return "<no-class-name>";
546 return pClass->pszName;
547}
548
549/**
550 * Formats the sub-class.
551 *
552 * @returns Stringified sub-class.
553 * @param uSubClass The name.
554 * @param pszBuf Buffer that is big enough.
555 */
556static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
557{
558 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
559 switch (uSubClass)
560 {
561 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
562 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
563 default:
564 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
565 break;
566 }
567 else
568 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
569 return pszBuf;
570}
571
572
573/**
574 * Helper for rtLockValComplainAboutLock.
575 */
576DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
577 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
578 const char *pszFrameType)
579{
580 char szBuf[32];
581 switch (u32Magic)
582 {
583 case RTLOCKVALRECEXCL_MAGIC:
584#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
585 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
586 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
587 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
588 rtLockValComplainGetClassName(pRec->Excl.hClass),
589 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
590 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
591 pszFrameType, pszSuffix);
592#else
593 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
594 pRec->Excl.hLock, pRec->Excl.szName,
595 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
596 rtLockValComplainGetClassName(pRec->Excl.hClass),
597 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
598 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
599 pszFrameType, pszSuffix);
600#endif
601 break;
602
603 case RTLOCKVALRECSHRD_MAGIC:
604 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
605 pRec->Shared.hLock, pRec->Shared.szName, pRec,
606 rtLockValComplainGetClassName(pRec->Shared.hClass),
607 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
608 pszFrameType, pszSuffix);
609 break;
610
611 case RTLOCKVALRECSHRDOWN_MAGIC:
612 {
613 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
614 if ( VALID_PTR(pShared)
615 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
616#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
617 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
618 pShared->hLock, pShared->pszName, pShared,
619 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
620 rtLockValComplainGetClassName(pShared->hClass),
621 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
622 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
623 pszSuffix2, pszSuffix);
624#else
625 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
626 pShared->hLock, pShared->szName,
627 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
628 rtLockValComplainGetClassName(pShared->hClass),
629 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
630 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
631 pszFrameType, pszSuffix);
632#endif
633 else
634 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
635 pShared,
636 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
637 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
638 pszFrameType, pszSuffix);
639 break;
640 }
641
642 default:
643 AssertMsgFailed(("%#x\n", u32Magic));
644 }
645}
646
647
648/**
649 * Describes the lock.
650 *
651 * @param pszPrefix Message prefix.
652 * @param pRec The lock record we're working on.
653 * @param pszSuffix Message suffix.
654 */
655static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
656{
657#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
658# define FIX_REC(r) 1
659#else
660# define FIX_REC(r) (r)
661#endif
662 if ( VALID_PTR(pRec)
663 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
664 {
665 switch (pRec->Core.u32Magic)
666 {
667 case RTLOCKVALRECEXCL_MAGIC:
668 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
669 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
670 break;
671
672 case RTLOCKVALRECSHRD_MAGIC:
673 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
674 break;
675
676 case RTLOCKVALRECSHRDOWN_MAGIC:
677 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
678 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
679 break;
680
681 case RTLOCKVALRECNEST_MAGIC:
682 {
683 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
684 uint32_t u32Magic;
685 if ( VALID_PTR(pRealRec)
686 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
687 || u32Magic == RTLOCKVALRECSHRD_MAGIC
688 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
689 )
690 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
691 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
692 else
693 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
694 pRealRec, pRec, pRec->Nest.cRecursion,
695 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
696 pszSuffix);
697 break;
698 }
699
700 default:
701 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
702 break;
703 }
704 }
705#undef FIX_REC
706}
707
708
709/**
710 * Dump the lock stack.
711 *
712 * @param pThread The thread which lock stack we're gonna dump.
713 * @param cchIndent The indentation in chars.
714 * @param cMinFrames The minimum number of frames to consider
715 * dumping.
716 * @param pHighightRec Record that should be marked specially in the
717 * dump.
718 */
719static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
720 PRTLOCKVALRECUNION pHighightRec)
721{
722 if ( VALID_PTR(pThread)
723 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
724 && pThread->u32Magic == RTTHREADINT_MAGIC
725 )
726 {
727 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
728 if (cEntries >= cMinFrames)
729 {
730 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
731 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
732 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
733 for (uint32_t i = 0; VALID_PTR(pCur); i++)
734 {
735 char szPrefix[80];
736 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
737 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
738 switch (pCur->Core.u32Magic)
739 {
740 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
741 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
742 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
743 default:
744 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
745 pCur = NULL;
746 break;
747 }
748 }
749 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
750 }
751 }
752}
753
754
755/**
756 * Launch the initial complaint.
757 *
758 * @param pszWhat What we're complaining about.
759 * @param pSrcPos Where we are complaining from, as it were.
760 * @param pThreadSelf The calling thread.
761 * @param pRec The main lock involved. Can be NULL.
762 * @param fDumpStack Whether to dump the lock stack (true) or not
763 * (false).
764 */
765static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
766 PRTLOCKVALRECUNION pRec, bool fDumpStack)
767{
768 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
769 {
770 ASMCompilerBarrier(); /* paranoia */
771 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
772 if (pSrcPos && pSrcPos->uId)
773 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
774 else
775 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
776 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
777 if (fDumpStack)
778 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
779 }
780}
781
782
783/**
784 * Continue bitching.
785 *
786 * @param pszFormat Format string.
787 * @param ... Format arguments.
788 */
789static void rtLockValComplainMore(const char *pszFormat, ...)
790{
791 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
792 {
793 va_list va;
794 va_start(va, pszFormat);
795 RTAssertMsg2AddWeakV(pszFormat, va);
796 va_end(va);
797 }
798}
799
800
801/**
802 * Raise a panic if enabled.
803 */
804static void rtLockValComplainPanic(void)
805{
806 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
807 RTAssertPanic();
808}
809
810
811/**
812 * Copy a source position record.
813 *
814 * @param pDst The destination.
815 * @param pSrc The source. Can be NULL.
816 */
817DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
818{
819 if (pSrc)
820 {
821 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
822 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
823 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
824 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
825 }
826 else
827 {
828 ASMAtomicUoWriteU32(&pDst->uLine, 0);
829 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
830 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
831 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
832 }
833}
834
835
836/**
837 * Init a source position record.
838 *
839 * @param pSrcPos The source position record.
840 */
841DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
842{
843 pSrcPos->pszFile = NULL;
844 pSrcPos->pszFunction = NULL;
845 pSrcPos->uId = 0;
846 pSrcPos->uLine = 0;
847#if HC_ARCH_BITS == 64
848 pSrcPos->u32Padding = 0;
849#endif
850}
851
852
853/* sdbm:
854 This algorithm was created for sdbm (a public-domain reimplementation of
855 ndbm) database library. it was found to do well in scrambling bits,
856 causing better distribution of the keys and fewer splits. it also happens
857 to be a good general hashing function with good distribution. the actual
858 function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
859 is the faster version used in gawk. [there is even a faster, duff-device
860 version] the magic constant 65599 was picked out of thin air while
861 experimenting with different constants, and turns out to be a prime.
862 this is one of the algorithms used in berkeley db (see sleepycat) and
863 elsewhere. */
864DECL_FORCE_INLINE(uint32_t) sdbm(const char *str, uint32_t hash)
865{
866 uint8_t *pu8 = (uint8_t *)str;
867 int c;
868
869 while ((c = *pu8++))
870 hash = c + (hash << 6) + (hash << 16) - hash;
871
872 return hash;
873}
874
875
876/**
877 * Hashes the specified source position.
878 *
879 * @returns Hash.
880 * @param pSrcPos The source position record.
881 */
882static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
883{
884 uint32_t uHash;
885 if ( ( pSrcPos->pszFile
886 || pSrcPos->pszFunction)
887 && pSrcPos->uLine != 0)
888 {
889 uHash = 0;
890 if (pSrcPos->pszFile)
891 uHash = sdbm(pSrcPos->pszFile, uHash);
892 if (pSrcPos->pszFunction)
893 uHash = sdbm(pSrcPos->pszFunction, uHash);
894 uHash += pSrcPos->uLine;
895 }
896 else
897 {
898 Assert(pSrcPos->uId);
899 uHash = (uint32_t)pSrcPos->uId;
900 }
901
902 return uHash;
903}
904
905
906/**
907 * Compares two source positions.
908 *
909 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
910 * otherwise.
911 * @param pSrcPos1 The first source position.
912 * @param pSrcPos2 The second source position.
913 */
914static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
915{
916 if (pSrcPos1->uLine != pSrcPos2->uLine)
917 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
918
919 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
920 if (iDiff != 0)
921 return iDiff;
922
923 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
924 if (iDiff != 0)
925 return iDiff;
926
927 if (pSrcPos1->uId != pSrcPos2->uId)
928 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
929 return 0;
930}
931
932
933
934/**
935 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
936 */
937DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
938{
939 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
940 if (hXRoads != NIL_RTSEMXROADS)
941 RTSemXRoadsNSEnter(hXRoads);
942}
943
944
945/**
946 * Call after rtLockValidatorSerializeDestructEnter.
947 */
948DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
949{
950 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
951 if (hXRoads != NIL_RTSEMXROADS)
952 RTSemXRoadsNSLeave(hXRoads);
953}
954
955
956/**
957 * Serializes deadlock detection against destruction of the objects being
958 * inspected.
959 */
960DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
961{
962 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
963 if (hXRoads != NIL_RTSEMXROADS)
964 RTSemXRoadsEWEnter(hXRoads);
965}
966
967
968/**
969 * Call after rtLockValidatorSerializeDetectionEnter.
970 */
971DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
972{
973 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
974 if (hXRoads != NIL_RTSEMXROADS)
975 RTSemXRoadsEWLeave(hXRoads);
976}
977
978
979/**
980 * Initializes the per thread lock validator data.
981 *
982 * @param pPerThread The data.
983 */
984DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
985{
986 pPerThread->bmFreeShrdOwners = UINT32_MAX;
987
988 /* ASSUMES the rest has already been zeroed. */
989 Assert(pPerThread->pRec == NULL);
990 Assert(pPerThread->cWriteLocks == 0);
991 Assert(pPerThread->cReadLocks == 0);
992 Assert(pPerThread->fInValidator == false);
993 Assert(pPerThread->pStackTop == NULL);
994}
995
996
997/**
998 * Delete the per thread lock validator data.
999 *
1000 * @param pPerThread The data.
1001 */
1002DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
1003{
1004 /*
1005 * Check that the thread doesn't own any locks at this time.
1006 */
1007 if (pPerThread->pStackTop)
1008 {
1009 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
1010 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
1011 pPerThread->pStackTop, true);
1012 rtLockValComplainPanic();
1013 }
1014
1015 /*
1016 * Free the recursion records.
1017 */
1018 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
1019 pPerThread->pFreeNestRecs = NULL;
1020 while (pCur)
1021 {
1022 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1023 RTMemFree(pCur);
1024 pCur = pNext;
1025 }
1026}
1027
1028RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1029 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1030 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1031 const char *pszNameFmt, ...)
1032{
1033 va_list va;
1034 va_start(va, pszNameFmt);
1035 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1036 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1037 va_end(va);
1038 return rc;
1039}
1040
1041
1042RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1043 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1044 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1045 const char *pszNameFmt, va_list va)
1046{
1047 Assert(cMsMinDeadlock >= 1);
1048 Assert(cMsMinOrder >= 1);
1049 AssertPtr(pSrcPos);
1050
1051 /*
1052 * Format the name and calc its length.
1053 */
1054 size_t cbName;
1055 char szName[32];
1056 if (pszNameFmt && *pszNameFmt)
1057 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1058 else
1059 {
1060 static uint32_t volatile s_cAnonymous = 0;
1061 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1062 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1063 }
1064
1065 /*
1066 * Figure out the file and function name lengths and allocate memory for
1067 * it all.
1068 */
1069 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1070 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1071 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVar(sizeof(*pThis) + cbFile + cbFunction + cbName);
1072 if (!pThis)
1073 return VERR_NO_MEMORY;
1074
1075 /*
1076 * Initialize the class data.
1077 */
1078 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1079 pThis->Core.uchHeight = 0;
1080 pThis->Core.pLeft = NULL;
1081 pThis->Core.pRight = NULL;
1082 pThis->Core.pList = NULL;
1083 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1084 pThis->cRefs = 1;
1085 pThis->fAutodidact = fAutodidact;
1086 pThis->fRecursionOk = fRecursionOk;
1087 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1088 pThis->fInTree = false;
1089 pThis->fDonateRefToNextRetainer = false;
1090 pThis->afReserved[0] = false;
1091 pThis->afReserved[1] = false;
1092 pThis->afReserved[2] = false;
1093 pThis->cMsMinDeadlock = cMsMinDeadlock;
1094 pThis->cMsMinOrder = cMsMinOrder;
1095 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1096 pThis->au32Reserved[i] = 0;
1097 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1098 {
1099 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1100 pThis->PriorLocks.aRefs[i].cLookups = 0;
1101 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1102 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1103 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1104 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1105 }
1106 pThis->PriorLocks.pNext = NULL;
1107 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1108 pThis->apPriorLocksHash[i] = NULL;
1109 char *pszDst = (char *)(pThis + 1);
1110 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1111 pszDst += cbName;
1112 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1113 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1114 pszDst += cbFile;
1115 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1116 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1117#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1118 pThis->cHashHits = 0;
1119 pThis->cHashMisses = 0;
1120#endif
1121
1122 *phClass = pThis;
1123 return VINF_SUCCESS;
1124}
1125
1126
1127RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1128{
1129 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1130 va_list va;
1131 va_start(va, pszNameFmt);
1132 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1133 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1134 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1135 pszNameFmt, va);
1136 va_end(va);
1137 return rc;
1138}
1139
1140
1141/**
1142 * Creates a new lock validator class with a reference that is consumed by the
1143 * first call to RTLockValidatorClassRetain.
1144 *
1145 * This is tailored for use in the parameter list of a semaphore constructor.
1146 *
1147 * @returns Class handle with a reference that is automatically consumed by the
1148 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1149 *
1150 * @param pszFile The source position of the call, file.
1151 * @param iLine The source position of the call, line.
1152 * @param pszFunction The source position of the call, function.
1153 * @param pszNameFmt Class name format string, optional (NULL). Max
1154 * length is 32 bytes.
1155 * @param ... Format string arguments.
1156 */
1157RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1158{
1159 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1160 RTLOCKVALCLASSINT *pClass;
1161 va_list va;
1162 va_start(va, pszNameFmt);
1163 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1164 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1165 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1166 pszNameFmt, va);
1167 va_end(va);
1168 if (RT_FAILURE(rc))
1169 return NIL_RTLOCKVALCLASS;
1170 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1171 return pClass;
1172}
1173
1174
1175/**
1176 * Internal class retainer.
1177 * @returns The new reference count.
1178 * @param pClass The class.
1179 */
1180DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1181{
1182 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1183 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1184 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1185 else if ( cRefs == 2
1186 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1187 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1188 return cRefs;
1189}
1190
1191
1192/**
1193 * Validates and retains a lock validator class.
1194 *
1195 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1196 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1197 */
1198DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1199{
1200 if (hClass == NIL_RTLOCKVALCLASS)
1201 return hClass;
1202 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1203 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1204 rtLockValidatorClassRetain(hClass);
1205 return hClass;
1206}
1207
1208
1209/**
1210 * Internal class releaser.
1211 * @returns The new reference count.
1212 * @param pClass The class.
1213 */
1214DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1215{
1216 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1217 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1218 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1219 else if (!cRefs)
1220 rtLockValidatorClassDestroy(pClass);
1221 return cRefs;
1222}
1223
1224
1225/**
1226 * Destroys a class once there are not more references to it.
1227 *
1228 * @param Class The class.
1229 */
1230static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1231{
1232 AssertReturnVoid(!pClass->fInTree);
1233 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1234
1235 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1236 while (pChunk)
1237 {
1238 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1239 {
1240 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1241 if (pClass2 != NIL_RTLOCKVALCLASS)
1242 {
1243 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1244 rtLockValidatorClassRelease(pClass2);
1245 }
1246 }
1247
1248 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1249 pChunk->pNext = NULL;
1250 if (pChunk != &pClass->PriorLocks)
1251 RTMemFree(pChunk);
1252 pChunk = pNext;
1253 }
1254
1255 RTMemFree(pClass);
1256}
1257
1258
1259RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1260{
1261 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1262 rtLockValidatorLazyInit();
1263 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1264
1265 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1266 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1267 while (pClass)
1268 {
1269 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1270 break;
1271 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1272 }
1273
1274 if (RT_SUCCESS(rcLock))
1275 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1276 return pClass;
1277}
1278
1279
1280RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1281{
1282 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1283 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1284 if (hClass == NIL_RTLOCKVALCLASS)
1285 {
1286 /*
1287 * Create a new class and insert it into the tree.
1288 */
1289 va_list va;
1290 va_start(va, pszNameFmt);
1291 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1292 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1293 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1294 pszNameFmt, va);
1295 va_end(va);
1296 if (RT_SUCCESS(rc))
1297 {
1298 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1299 rtLockValidatorLazyInit();
1300 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1301
1302 Assert(!hClass->fInTree);
1303 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1304 Assert(hClass->fInTree);
1305
1306 if (RT_SUCCESS(rcLock))
1307 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1308 return hClass;
1309 }
1310 }
1311 return hClass;
1312}
1313
1314
1315RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1316{
1317 RTLOCKVALCLASSINT *pClass = hClass;
1318 AssertPtrReturn(pClass, UINT32_MAX);
1319 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1320 return rtLockValidatorClassRetain(pClass);
1321}
1322
1323
1324RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1325{
1326 RTLOCKVALCLASSINT *pClass = hClass;
1327 if (pClass == NIL_RTLOCKVALCLASS)
1328 return 0;
1329 AssertPtrReturn(pClass, UINT32_MAX);
1330 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1331 return rtLockValidatorClassRelease(pClass);
1332}
1333
1334
1335/**
1336 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1337 * all the chunks for @a pPriorClass.
1338 *
1339 * @returns true / false.
1340 * @param pClass The class to search.
1341 * @param pPriorClass The class to search for.
1342 */
1343static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1344{
1345 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1346 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1347 {
1348 if (pChunk->aRefs[i].hClass == pPriorClass)
1349 {
1350 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1351 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1352 {
1353 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1354 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1355 }
1356
1357 /* update the hash table entry. */
1358 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1359 if ( !(*ppHashEntry)
1360 || (*ppHashEntry)->cLookups + 128 < cLookups)
1361 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1362
1363#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1364 ASMAtomicIncU32(&pClass->cHashMisses);
1365#endif
1366 return true;
1367 }
1368 }
1369
1370 return false;
1371}
1372
1373
1374/**
1375 * Checks if @a pPriorClass is a known prior class.
1376 *
1377 * @returns true / false.
1378 * @param pClass The class to search.
1379 * @param pPriorClass The class to search for.
1380 */
1381DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1382{
1383 /*
1384 * Hash lookup here.
1385 */
1386 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1387 if ( pRef
1388 && pRef->hClass == pPriorClass)
1389 {
1390 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1391 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1392 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1393#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1394 ASMAtomicIncU32(&pClass->cHashHits);
1395#endif
1396 return true;
1397 }
1398
1399 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1400}
1401
1402
1403/**
1404 * Adds a class to the prior list.
1405 *
1406 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1407 * @param pClass The class to work on.
1408 * @param pPriorClass The class to add.
1409 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1410 * somebody is teaching us via the API (false).
1411 * @param pSrcPos Where this rule was added (optional).
1412 */
1413static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1414 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1415{
1416 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1417 rtLockValidatorLazyInit();
1418 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1419
1420 /*
1421 * Check that there are no conflict (no assert since we might race each other).
1422 */
1423 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1424 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1425 {
1426 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1427 {
1428 /*
1429 * Scan the table for a free entry, allocating a new chunk if necessary.
1430 */
1431 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1432 {
1433 bool fDone = false;
1434 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1435 {
1436 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1437 if (fDone)
1438 {
1439 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1440 rtLockValidatorClassRetain(pPriorClass);
1441 rc = VINF_SUCCESS;
1442 break;
1443 }
1444 }
1445 if (fDone)
1446 break;
1447
1448 /* If no more chunks, allocate a new one and insert the class before linking it. */
1449 if (!pChunk->pNext)
1450 {
1451 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1452 if (!pNew)
1453 {
1454 rc = VERR_NO_MEMORY;
1455 break;
1456 }
1457 pNew->pNext = NULL;
1458 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1459 {
1460 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1461 pNew->aRefs[i].cLookups = 0;
1462 pNew->aRefs[i].fAutodidacticism = false;
1463 pNew->aRefs[i].afReserved[0] = false;
1464 pNew->aRefs[i].afReserved[1] = false;
1465 pNew->aRefs[i].afReserved[2] = false;
1466 }
1467
1468 pNew->aRefs[0].hClass = pPriorClass;
1469 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1470
1471 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1472 rtLockValidatorClassRetain(pPriorClass);
1473 rc = VINF_SUCCESS;
1474 break;
1475 }
1476 } /* chunk loop */
1477 }
1478 else
1479 rc = VINF_SUCCESS;
1480 }
1481 else
1482 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1483
1484 if (RT_SUCCESS(rcLock))
1485 RTCritSectLeave(&g_LockValClassTeachCS);
1486 return rc;
1487}
1488
1489
1490RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1491{
1492 RTLOCKVALCLASSINT *pClass = hClass;
1493 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1494 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1495
1496 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1497 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1498 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1499
1500 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1501}
1502
1503
1504RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1505{
1506 RTLOCKVALCLASSINT *pClass = hClass;
1507 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1508 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1509
1510 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1511 return VINF_SUCCESS;
1512}
1513
1514
1515/**
1516 * Unlinks all siblings.
1517 *
1518 * This is used during record deletion and assumes no races.
1519 *
1520 * @param pCore One of the siblings.
1521 */
1522static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1523{
1524 /* ASSUMES sibling destruction doesn't involve any races and that all
1525 related records are to be disposed off now. */
1526 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1527 while (pSibling)
1528 {
1529 PRTLOCKVALRECUNION volatile *ppCoreNext;
1530 switch (pSibling->Core.u32Magic)
1531 {
1532 case RTLOCKVALRECEXCL_MAGIC:
1533 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1534 ppCoreNext = &pSibling->Excl.pSibling;
1535 break;
1536
1537 case RTLOCKVALRECSHRD_MAGIC:
1538 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1539 ppCoreNext = &pSibling->Shared.pSibling;
1540 break;
1541
1542 default:
1543 AssertFailed();
1544 ppCoreNext = NULL;
1545 break;
1546 }
1547 if (RT_UNLIKELY(ppCoreNext))
1548 break;
1549 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1550 }
1551}
1552
1553
1554RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1555{
1556 /*
1557 * Validate input.
1558 */
1559 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1560 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1561
1562 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1563 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1564 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1565 , VERR_SEM_LV_INVALID_PARAMETER);
1566
1567 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1568 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1569 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1570 , VERR_SEM_LV_INVALID_PARAMETER);
1571
1572 /*
1573 * Link them (circular list).
1574 */
1575 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1576 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1577 {
1578 p1->Excl.pSibling = p2;
1579 p2->Shared.pSibling = p1;
1580 }
1581 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1582 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1583 {
1584 p1->Shared.pSibling = p2;
1585 p2->Excl.pSibling = p1;
1586 }
1587 else
1588 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1589
1590 return VINF_SUCCESS;
1591}
1592
1593
1594/**
1595 * Gets the lock name for the given record.
1596 *
1597 * @returns Read-only lock name.
1598 * @param pRec The lock record.
1599 */
1600DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1601{
1602 switch (pRec->Core.u32Magic)
1603 {
1604 case RTLOCKVALRECEXCL_MAGIC:
1605 return pRec->Excl.szName;
1606 case RTLOCKVALRECSHRD_MAGIC:
1607 return pRec->Shared.szName;
1608 case RTLOCKVALRECSHRDOWN_MAGIC:
1609 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1610 case RTLOCKVALRECNEST_MAGIC:
1611 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1612 if (VALID_PTR(pRec))
1613 {
1614 switch (pRec->Core.u32Magic)
1615 {
1616 case RTLOCKVALRECEXCL_MAGIC:
1617 return pRec->Excl.szName;
1618 case RTLOCKVALRECSHRD_MAGIC:
1619 return pRec->Shared.szName;
1620 case RTLOCKVALRECSHRDOWN_MAGIC:
1621 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1622 default:
1623 return "unknown-nested";
1624 }
1625 }
1626 return "orphaned-nested";
1627 default:
1628 return "unknown";
1629 }
1630}
1631
1632
1633/**
1634 * Gets the class for this locking record.
1635 *
1636 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1637 * @param pRec The lock validator record.
1638 */
1639DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1640{
1641 switch (pRec->Core.u32Magic)
1642 {
1643 case RTLOCKVALRECEXCL_MAGIC:
1644 return pRec->Excl.hClass;
1645
1646 case RTLOCKVALRECSHRD_MAGIC:
1647 return pRec->Shared.hClass;
1648
1649 case RTLOCKVALRECSHRDOWN_MAGIC:
1650 {
1651 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1652 if (RT_LIKELY( VALID_PTR(pSharedRec)
1653 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1654 return pSharedRec->hClass;
1655 return NIL_RTLOCKVALCLASS;
1656 }
1657
1658 case RTLOCKVALRECNEST_MAGIC:
1659 {
1660 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1661 if (VALID_PTR(pRealRec))
1662 {
1663 switch (pRealRec->Core.u32Magic)
1664 {
1665 case RTLOCKVALRECEXCL_MAGIC:
1666 return pRealRec->Excl.hClass;
1667
1668 case RTLOCKVALRECSHRDOWN_MAGIC:
1669 {
1670 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1671 if (RT_LIKELY( VALID_PTR(pSharedRec)
1672 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1673 return pSharedRec->hClass;
1674 break;
1675 }
1676
1677 default:
1678 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1679 break;
1680 }
1681 }
1682 return NIL_RTLOCKVALCLASS;
1683 }
1684
1685 default:
1686 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1687 return NIL_RTLOCKVALCLASS;
1688 }
1689}
1690
1691
1692/**
1693 * Gets the class for this locking record and the pointer to the one below it in
1694 * the stack.
1695 *
1696 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1697 * @param pRec The lock validator record.
1698 * @param puSubClass Where to return the sub-class.
1699 * @param ppDown Where to return the pointer to the record below.
1700 */
1701DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1702rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1703{
1704 switch (pRec->Core.u32Magic)
1705 {
1706 case RTLOCKVALRECEXCL_MAGIC:
1707 *ppDown = pRec->Excl.pDown;
1708 *puSubClass = pRec->Excl.uSubClass;
1709 return pRec->Excl.hClass;
1710
1711 case RTLOCKVALRECSHRD_MAGIC:
1712 *ppDown = NULL;
1713 *puSubClass = pRec->Shared.uSubClass;
1714 return pRec->Shared.hClass;
1715
1716 case RTLOCKVALRECSHRDOWN_MAGIC:
1717 {
1718 *ppDown = pRec->ShrdOwner.pDown;
1719
1720 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1721 if (RT_LIKELY( VALID_PTR(pSharedRec)
1722 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1723 {
1724 *puSubClass = pSharedRec->uSubClass;
1725 return pSharedRec->hClass;
1726 }
1727 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1728 return NIL_RTLOCKVALCLASS;
1729 }
1730
1731 case RTLOCKVALRECNEST_MAGIC:
1732 {
1733 *ppDown = pRec->Nest.pDown;
1734
1735 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1736 if (VALID_PTR(pRealRec))
1737 {
1738 switch (pRealRec->Core.u32Magic)
1739 {
1740 case RTLOCKVALRECEXCL_MAGIC:
1741 *puSubClass = pRealRec->Excl.uSubClass;
1742 return pRealRec->Excl.hClass;
1743
1744 case RTLOCKVALRECSHRDOWN_MAGIC:
1745 {
1746 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1747 if (RT_LIKELY( VALID_PTR(pSharedRec)
1748 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1749 {
1750 *puSubClass = pSharedRec->uSubClass;
1751 return pSharedRec->hClass;
1752 }
1753 break;
1754 }
1755
1756 default:
1757 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1758 break;
1759 }
1760 }
1761 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1762 return NIL_RTLOCKVALCLASS;
1763 }
1764
1765 default:
1766 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1767 *ppDown = NULL;
1768 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1769 return NIL_RTLOCKVALCLASS;
1770 }
1771}
1772
1773
1774/**
1775 * Gets the sub-class for a lock record.
1776 *
1777 * @returns the sub-class.
1778 * @param pRec The lock validator record.
1779 */
1780DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1781{
1782 switch (pRec->Core.u32Magic)
1783 {
1784 case RTLOCKVALRECEXCL_MAGIC:
1785 return pRec->Excl.uSubClass;
1786
1787 case RTLOCKVALRECSHRD_MAGIC:
1788 return pRec->Shared.uSubClass;
1789
1790 case RTLOCKVALRECSHRDOWN_MAGIC:
1791 {
1792 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1793 if (RT_LIKELY( VALID_PTR(pSharedRec)
1794 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1795 return pSharedRec->uSubClass;
1796 return RTLOCKVAL_SUB_CLASS_NONE;
1797 }
1798
1799 case RTLOCKVALRECNEST_MAGIC:
1800 {
1801 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1802 if (VALID_PTR(pRealRec))
1803 {
1804 switch (pRealRec->Core.u32Magic)
1805 {
1806 case RTLOCKVALRECEXCL_MAGIC:
1807 return pRec->Excl.uSubClass;
1808
1809 case RTLOCKVALRECSHRDOWN_MAGIC:
1810 {
1811 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1812 if (RT_LIKELY( VALID_PTR(pSharedRec)
1813 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1814 return pSharedRec->uSubClass;
1815 break;
1816 }
1817
1818 default:
1819 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1820 break;
1821 }
1822 }
1823 return RTLOCKVAL_SUB_CLASS_NONE;
1824 }
1825
1826 default:
1827 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1828 return RTLOCKVAL_SUB_CLASS_NONE;
1829 }
1830}
1831
1832
1833
1834
1835/**
1836 * Calculates the depth of a lock stack.
1837 *
1838 * @returns Number of stack frames.
1839 * @param pThread The thread.
1840 */
1841static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1842{
1843 uint32_t cEntries = 0;
1844 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1845 while (VALID_PTR(pCur))
1846 {
1847 switch (pCur->Core.u32Magic)
1848 {
1849 case RTLOCKVALRECEXCL_MAGIC:
1850 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1851 break;
1852
1853 case RTLOCKVALRECSHRDOWN_MAGIC:
1854 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1855 break;
1856
1857 case RTLOCKVALRECNEST_MAGIC:
1858 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1859 break;
1860
1861 default:
1862 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1863 }
1864 cEntries++;
1865 }
1866 return cEntries;
1867}
1868
1869
1870/**
1871 * Checks if the stack contains @a pRec.
1872 *
1873 * @returns true / false.
1874 * @param pThreadSelf The current thread.
1875 * @param pRec The lock record.
1876 */
1877static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1878{
1879 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1880 while (pCur)
1881 {
1882 AssertPtrReturn(pCur, false);
1883 if (pCur == pRec)
1884 return true;
1885 switch (pCur->Core.u32Magic)
1886 {
1887 case RTLOCKVALRECEXCL_MAGIC:
1888 Assert(pCur->Excl.cRecursion >= 1);
1889 pCur = pCur->Excl.pDown;
1890 break;
1891
1892 case RTLOCKVALRECSHRDOWN_MAGIC:
1893 Assert(pCur->ShrdOwner.cRecursion >= 1);
1894 pCur = pCur->ShrdOwner.pDown;
1895 break;
1896
1897 case RTLOCKVALRECNEST_MAGIC:
1898 Assert(pCur->Nest.cRecursion > 1);
1899 pCur = pCur->Nest.pDown;
1900 break;
1901
1902 default:
1903 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1904 }
1905 }
1906 return false;
1907}
1908
1909
1910/**
1911 * Pushes a lock record onto the stack.
1912 *
1913 * @param pThreadSelf The current thread.
1914 * @param pRec The lock record.
1915 */
1916static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1917{
1918 Assert(pThreadSelf == RTThreadSelf());
1919 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1920
1921 switch (pRec->Core.u32Magic)
1922 {
1923 case RTLOCKVALRECEXCL_MAGIC:
1924 Assert(pRec->Excl.cRecursion == 1);
1925 Assert(pRec->Excl.pDown == NULL);
1926 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1927 break;
1928
1929 case RTLOCKVALRECSHRDOWN_MAGIC:
1930 Assert(pRec->ShrdOwner.cRecursion == 1);
1931 Assert(pRec->ShrdOwner.pDown == NULL);
1932 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1933 break;
1934
1935 default:
1936 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1937 }
1938 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1939}
1940
1941
1942/**
1943 * Pops a lock record off the stack.
1944 *
1945 * @param pThreadSelf The current thread.
1946 * @param pRec The lock.
1947 */
1948static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1949{
1950 Assert(pThreadSelf == RTThreadSelf());
1951
1952 PRTLOCKVALRECUNION pDown;
1953 switch (pRec->Core.u32Magic)
1954 {
1955 case RTLOCKVALRECEXCL_MAGIC:
1956 Assert(pRec->Excl.cRecursion == 0);
1957 pDown = pRec->Excl.pDown;
1958 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1959 break;
1960
1961 case RTLOCKVALRECSHRDOWN_MAGIC:
1962 Assert(pRec->ShrdOwner.cRecursion == 0);
1963 pDown = pRec->ShrdOwner.pDown;
1964 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1965 break;
1966
1967 default:
1968 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1969 }
1970 if (pThreadSelf->LockValidator.pStackTop == pRec)
1971 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1972 else
1973 {
1974 /* Find the pointer to our record and unlink ourselves. */
1975 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1976 while (pCur)
1977 {
1978 PRTLOCKVALRECUNION volatile *ppDown;
1979 switch (pCur->Core.u32Magic)
1980 {
1981 case RTLOCKVALRECEXCL_MAGIC:
1982 Assert(pCur->Excl.cRecursion >= 1);
1983 ppDown = &pCur->Excl.pDown;
1984 break;
1985
1986 case RTLOCKVALRECSHRDOWN_MAGIC:
1987 Assert(pCur->ShrdOwner.cRecursion >= 1);
1988 ppDown = &pCur->ShrdOwner.pDown;
1989 break;
1990
1991 case RTLOCKVALRECNEST_MAGIC:
1992 Assert(pCur->Nest.cRecursion >= 1);
1993 ppDown = &pCur->Nest.pDown;
1994 break;
1995
1996 default:
1997 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1998 }
1999 pCur = *ppDown;
2000 if (pCur == pRec)
2001 {
2002 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
2003 return;
2004 }
2005 }
2006 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
2007 }
2008}
2009
2010
2011/**
2012 * Creates and pushes lock recursion record onto the stack.
2013 *
2014 * @param pThreadSelf The current thread.
2015 * @param pRec The lock record.
2016 * @param pSrcPos Where the recursion occurred.
2017 */
2018static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2019{
2020 Assert(pThreadSelf == RTThreadSelf());
2021 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2022
2023#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2024 /*
2025 * Allocate a new recursion record
2026 */
2027 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2028 if (pRecursionRec)
2029 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2030 else
2031 {
2032 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2033 if (!pRecursionRec)
2034 return;
2035 }
2036
2037 /*
2038 * Initialize it.
2039 */
2040 switch (pRec->Core.u32Magic)
2041 {
2042 case RTLOCKVALRECEXCL_MAGIC:
2043 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2044 break;
2045
2046 case RTLOCKVALRECSHRDOWN_MAGIC:
2047 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2048 break;
2049
2050 default:
2051 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2052 rtLockValidatorSerializeDestructEnter();
2053 rtLockValidatorSerializeDestructLeave();
2054 RTMemFree(pRecursionRec);
2055 return;
2056 }
2057 Assert(pRecursionRec->cRecursion > 1);
2058 pRecursionRec->pRec = pRec;
2059 pRecursionRec->pDown = NULL;
2060 pRecursionRec->pNextFree = NULL;
2061 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2062 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2063
2064 /*
2065 * Link it.
2066 */
2067 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2068 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2069#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2070}
2071
2072
2073/**
2074 * Pops a lock recursion record off the stack.
2075 *
2076 * @param pThreadSelf The current thread.
2077 * @param pRec The lock record.
2078 */
2079static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2080{
2081 Assert(pThreadSelf == RTThreadSelf());
2082 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2083
2084 uint32_t cRecursion;
2085 switch (pRec->Core.u32Magic)
2086 {
2087 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2088 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2089 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2090 }
2091 Assert(cRecursion >= 1);
2092
2093#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2094 /*
2095 * Pop the recursion record.
2096 */
2097 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2098 if ( pNest != NULL
2099 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2100 && pNest->Nest.pRec == pRec
2101 )
2102 {
2103 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2104 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2105 }
2106 else
2107 {
2108 /* Find the record above ours. */
2109 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2110 for (;;)
2111 {
2112 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2113 switch (pNest->Core.u32Magic)
2114 {
2115 case RTLOCKVALRECEXCL_MAGIC:
2116 ppDown = &pNest->Excl.pDown;
2117 pNest = *ppDown;
2118 continue;
2119 case RTLOCKVALRECSHRDOWN_MAGIC:
2120 ppDown = &pNest->ShrdOwner.pDown;
2121 pNest = *ppDown;
2122 continue;
2123 case RTLOCKVALRECNEST_MAGIC:
2124 if (pNest->Nest.pRec == pRec)
2125 break;
2126 ppDown = &pNest->Nest.pDown;
2127 pNest = *ppDown;
2128 continue;
2129 default:
2130 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2131 }
2132 break; /* ugly */
2133 }
2134 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2135 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2136 }
2137
2138 /*
2139 * Invalidate and free the record.
2140 */
2141 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2142 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2143 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2144 pNest->Nest.cRecursion = 0;
2145 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2146 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2147#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2148}
2149
2150
2151/**
2152 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2153 * returns VERR_SEM_LV_WRONG_ORDER.
2154 */
2155static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2156 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2157 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2158
2159
2160{
2161 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2162 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2163 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2164 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2165 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2166 rtLockValComplainPanic();
2167 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2168}
2169
2170
2171/**
2172 * Checks if the sub-class order is ok or not.
2173 *
2174 * Used to deal with two locks from the same class.
2175 *
2176 * @returns true if ok, false if not.
2177 * @param uSubClass1 The sub-class of the lock that is being
2178 * considered.
2179 * @param uSubClass2 The sub-class of the lock that is already being
2180 * held.
2181 */
2182DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2183{
2184 if (uSubClass1 > uSubClass2)
2185 {
2186 /* NONE kills ANY. */
2187 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2188 return false;
2189 return true;
2190 }
2191
2192 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2193 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2194 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2195 return true;
2196 return false;
2197}
2198
2199
2200/**
2201 * Checks if the class and sub-class lock order is ok.
2202 *
2203 * @returns true if ok, false if not.
2204 * @param pClass1 The class of the lock that is being considered.
2205 * @param uSubClass1 The sub-class that goes with @a pClass1.
2206 * @param pClass2 The class of the lock that is already being
2207 * held.
2208 * @param uSubClass2 The sub-class that goes with @a pClass2.
2209 */
2210DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2211 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2212{
2213 if (pClass1 == pClass2)
2214 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2215 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2216}
2217
2218
2219/**
2220 * Checks the locking order, part two.
2221 *
2222 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2223 * @param pClass The lock class.
2224 * @param uSubClass The lock sub-class.
2225 * @param pThreadSelf The current thread.
2226 * @param pRec The lock record.
2227 * @param pSrcPos The source position of the locking operation.
2228 */
2229static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2230 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2231 PCRTLOCKVALSRCPOS const pSrcPos,
2232 RTLOCKVALCLASSINT * const pFirstBadClass,
2233 PRTLOCKVALRECUNION const pFirstBadRec,
2234 PRTLOCKVALRECUNION const pFirstBadDown)
2235{
2236 /*
2237 * Something went wrong, pCur is pointing to where.
2238 */
2239 if ( pClass == pFirstBadClass
2240 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2241 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2242 pRec, pFirstBadRec, pClass, pFirstBadClass);
2243 if (!pClass->fAutodidact)
2244 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2245 pRec, pFirstBadRec, pClass, pFirstBadClass);
2246
2247 /*
2248 * This class is an autodidact, so we have to check out the rest of the stack
2249 * for direct violations.
2250 */
2251 uint32_t cNewRules = 1;
2252 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2253 while (pCur)
2254 {
2255 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2256
2257 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2258 pCur = pCur->Nest.pDown;
2259 else
2260 {
2261 PRTLOCKVALRECUNION pDown;
2262 uint32_t uPriorSubClass;
2263 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2264 if (pPriorClass != NIL_RTLOCKVALCLASS)
2265 {
2266 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2267 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2268 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2269 {
2270 if ( pClass == pPriorClass
2271 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2272 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2273 pRec, pCur, pClass, pPriorClass);
2274 cNewRules++;
2275 }
2276 }
2277 pCur = pDown;
2278 }
2279 }
2280
2281 if (cNewRules == 1)
2282 {
2283 /*
2284 * Special case the simple operation, hoping that it will be a
2285 * frequent case.
2286 */
2287 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2288 if (rc == VERR_SEM_LV_WRONG_ORDER)
2289 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2290 pRec, pFirstBadRec, pClass, pFirstBadClass);
2291 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2292 }
2293 else
2294 {
2295 /*
2296 * We may be adding more than one rule, so we have to take the lock
2297 * before starting to add the rules. This means we have to check
2298 * the state after taking it since we might be racing someone adding
2299 * a conflicting rule.
2300 */
2301 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2302 rtLockValidatorLazyInit();
2303 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2304
2305 /* Check */
2306 pCur = pFirstBadRec;
2307 while (pCur)
2308 {
2309 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2310 pCur = pCur->Nest.pDown;
2311 else
2312 {
2313 uint32_t uPriorSubClass;
2314 PRTLOCKVALRECUNION pDown;
2315 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2316 if (pPriorClass != NIL_RTLOCKVALCLASS)
2317 {
2318 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2319 {
2320 if ( pClass == pPriorClass
2321 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2322 {
2323 if (RT_SUCCESS(rcLock))
2324 RTCritSectLeave(&g_LockValClassTeachCS);
2325 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2326 pRec, pCur, pClass, pPriorClass);
2327 }
2328 }
2329 }
2330 pCur = pDown;
2331 }
2332 }
2333
2334 /* Iterate the stack yet again, adding new rules this time. */
2335 pCur = pFirstBadRec;
2336 while (pCur)
2337 {
2338 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2339 pCur = pCur->Nest.pDown;
2340 else
2341 {
2342 uint32_t uPriorSubClass;
2343 PRTLOCKVALRECUNION pDown;
2344 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2345 if (pPriorClass != NIL_RTLOCKVALCLASS)
2346 {
2347 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2348 {
2349 Assert( pClass != pPriorClass
2350 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2351 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2352 if (RT_FAILURE(rc))
2353 {
2354 Assert(rc == VERR_NO_MEMORY);
2355 break;
2356 }
2357 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2358 }
2359 }
2360 pCur = pDown;
2361 }
2362 }
2363
2364 if (RT_SUCCESS(rcLock))
2365 RTCritSectLeave(&g_LockValClassTeachCS);
2366 }
2367
2368 return VINF_SUCCESS;
2369}
2370
2371
2372
2373/**
2374 * Checks the locking order.
2375 *
2376 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2377 * @param pClass The lock class.
2378 * @param uSubClass The lock sub-class.
2379 * @param pThreadSelf The current thread.
2380 * @param pRec The lock record.
2381 * @param pSrcPos The source position of the locking operation.
2382 */
2383static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2384 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2385 PCRTLOCKVALSRCPOS pSrcPos)
2386{
2387 /*
2388 * Some internal paranoia first.
2389 */
2390 AssertPtr(pClass);
2391 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2392 AssertPtr(pThreadSelf);
2393 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2394 AssertPtr(pRec);
2395 AssertPtrNull(pSrcPos);
2396
2397 /*
2398 * Walk the stack, delegate problems to a worker routine.
2399 */
2400 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2401 if (!pCur)
2402 return VINF_SUCCESS;
2403
2404 for (;;)
2405 {
2406 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2407
2408 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2409 pCur = pCur->Nest.pDown;
2410 else
2411 {
2412 uint32_t uPriorSubClass;
2413 PRTLOCKVALRECUNION pDown;
2414 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2415 if (pPriorClass != NIL_RTLOCKVALCLASS)
2416 {
2417 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2418 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2419 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2420 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2421 pPriorClass, pCur, pDown);
2422 }
2423 pCur = pDown;
2424 }
2425 if (!pCur)
2426 return VINF_SUCCESS;
2427 }
2428}
2429
2430
2431/**
2432 * Check that the lock record is the topmost one on the stack, complain and fail
2433 * if it isn't.
2434 *
2435 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2436 * VERR_SEM_LV_INVALID_PARAMETER.
2437 * @param pThreadSelf The current thread.
2438 * @param pRec The record.
2439 */
2440static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2441{
2442 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2443 Assert(pThreadSelf == RTThreadSelf());
2444
2445 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2446 if (RT_LIKELY( pTop == pRec
2447 || ( pTop
2448 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2449 && pTop->Nest.pRec == pRec) ))
2450 return VINF_SUCCESS;
2451
2452#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2453 /* Look for a recursion record so the right frame is dumped and marked. */
2454 while (pTop)
2455 {
2456 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2457 {
2458 if (pTop->Nest.pRec == pRec)
2459 {
2460 pRec = pTop;
2461 break;
2462 }
2463 pTop = pTop->Nest.pDown;
2464 }
2465 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2466 pTop = pTop->Excl.pDown;
2467 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2468 pTop = pTop->ShrdOwner.pDown;
2469 else
2470 break;
2471 }
2472#endif
2473
2474 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2475 rtLockValComplainPanic();
2476 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2477}
2478
2479
2480/**
2481 * Checks if all owners are blocked - shared record operated in signaller mode.
2482 *
2483 * @returns true / false accordingly.
2484 * @param pRec The record.
2485 * @param pThreadSelf The current thread.
2486 */
2487DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2488{
2489 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2490 uint32_t cAllocated = pRec->cAllocated;
2491 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2492 if (cEntries == 0)
2493 return false;
2494
2495 for (uint32_t i = 0; i < cAllocated; i++)
2496 {
2497 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2498 if ( pEntry
2499 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2500 {
2501 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2502 if (!pCurThread)
2503 return false;
2504 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2505 return false;
2506 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2507 && pCurThread != pThreadSelf)
2508 return false;
2509 if (--cEntries == 0)
2510 break;
2511 }
2512 else
2513 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2514 }
2515
2516 return true;
2517}
2518
2519
2520/**
2521 * Verifies the deadlock stack before calling it a deadlock.
2522 *
2523 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2524 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2525 * @retval VERR_TRY_AGAIN if something changed.
2526 *
2527 * @param pStack The deadlock detection stack.
2528 * @param pThreadSelf The current thread.
2529 */
2530static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2531{
2532 uint32_t const c = pStack->c;
2533 for (uint32_t iPass = 0; iPass < 3; iPass++)
2534 {
2535 for (uint32_t i = 1; i < c; i++)
2536 {
2537 PRTTHREADINT pThread = pStack->a[i].pThread;
2538 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2539 return VERR_TRY_AGAIN;
2540 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2541 return VERR_TRY_AGAIN;
2542 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2543 return VERR_TRY_AGAIN;
2544 /* ASSUMES the signaller records won't have siblings! */
2545 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2546 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2547 && pRec->Shared.fSignaller
2548 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2549 return VERR_TRY_AGAIN;
2550 }
2551 RTThreadYield();
2552 }
2553
2554 if (c == 1)
2555 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2556 return VERR_SEM_LV_DEADLOCK;
2557}
2558
2559
2560/**
2561 * Checks for stack cycles caused by another deadlock before returning.
2562 *
2563 * @retval VINF_SUCCESS if the stack is simply too small.
2564 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2565 *
2566 * @param pStack The deadlock detection stack.
2567 */
2568static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2569{
2570 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2571 {
2572 PRTTHREADINT pThread = pStack->a[i].pThread;
2573 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2574 if (pStack->a[j].pThread == pThread)
2575 return VERR_SEM_LV_EXISTING_DEADLOCK;
2576 }
2577 static bool volatile s_fComplained = false;
2578 if (!s_fComplained)
2579 {
2580 s_fComplained = true;
2581 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2582 }
2583 return VINF_SUCCESS;
2584}
2585
2586
2587/**
2588 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2589 * detection.
2590 *
2591 * @retval VINF_SUCCESS
2592 * @retval VERR_SEM_LV_DEADLOCK
2593 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2594 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2595 * @retval VERR_TRY_AGAIN
2596 *
2597 * @param pStack The stack to use.
2598 * @param pOriginalRec The original record.
2599 * @param pThreadSelf The calling thread.
2600 */
2601static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2602 PRTTHREADINT const pThreadSelf)
2603{
2604 pStack->c = 0;
2605
2606 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2607 compiler may make a better job of it when using individual variables. */
2608 PRTLOCKVALRECUNION pRec = pOriginalRec;
2609 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2610 uint32_t iEntry = UINT32_MAX;
2611 PRTTHREADINT pThread = NIL_RTTHREAD;
2612 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2613 for (uint32_t iLoop = 0; ; iLoop++)
2614 {
2615 /*
2616 * Process the current record.
2617 */
2618 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2619
2620 /* Find the next relevant owner thread and record. */
2621 PRTLOCKVALRECUNION pNextRec = NULL;
2622 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2623 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2624 switch (pRec->Core.u32Magic)
2625 {
2626 case RTLOCKVALRECEXCL_MAGIC:
2627 Assert(iEntry == UINT32_MAX);
2628 for (;;)
2629 {
2630 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2631 if ( !pNextThread
2632 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2633 break;
2634 enmNextState = rtThreadGetState(pNextThread);
2635 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2636 && pNextThread != pThreadSelf)
2637 break;
2638 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2639 if (RT_LIKELY( !pNextRec
2640 || enmNextState == rtThreadGetState(pNextThread)))
2641 break;
2642 pNextRec = NULL;
2643 }
2644 if (!pNextRec)
2645 {
2646 pRec = pRec->Excl.pSibling;
2647 if ( pRec
2648 && pRec != pFirstSibling)
2649 continue;
2650 pNextThread = NIL_RTTHREAD;
2651 }
2652 break;
2653
2654 case RTLOCKVALRECSHRD_MAGIC:
2655 if (!pRec->Shared.fSignaller)
2656 {
2657 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2658 /** @todo The read side of a read-write lock is problematic if
2659 * the implementation prioritizes writers over readers because
2660 * that means we should could deadlock against current readers
2661 * if a writer showed up. If the RW sem implementation is
2662 * wrapping some native API, it's not so easy to detect when we
2663 * should do this and when we shouldn't. Checking when we
2664 * shouldn't is subject to wakeup scheduling and cannot easily
2665 * be made reliable.
2666 *
2667 * At the moment we circumvent all this mess by declaring that
2668 * readers has priority. This is TRUE on linux, but probably
2669 * isn't on Solaris and FreeBSD. */
2670 if ( pRec == pFirstSibling
2671 && pRec->Shared.pSibling != NULL
2672 && pRec->Shared.pSibling != pFirstSibling)
2673 {
2674 pRec = pRec->Shared.pSibling;
2675 Assert(iEntry == UINT32_MAX);
2676 continue;
2677 }
2678 }
2679
2680 /* Scan the owner table for blocked owners. */
2681 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2682 && ( !pRec->Shared.fSignaller
2683 || iEntry != UINT32_MAX
2684 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2685 )
2686 )
2687 {
2688 uint32_t cAllocated = pRec->Shared.cAllocated;
2689 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2690 while (++iEntry < cAllocated)
2691 {
2692 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2693 if (pEntry)
2694 {
2695 for (;;)
2696 {
2697 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2698 break;
2699 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2700 if ( !pNextThread
2701 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2702 break;
2703 enmNextState = rtThreadGetState(pNextThread);
2704 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2705 && pNextThread != pThreadSelf)
2706 break;
2707 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2708 if (RT_LIKELY( !pNextRec
2709 || enmNextState == rtThreadGetState(pNextThread)))
2710 break;
2711 pNextRec = NULL;
2712 }
2713 if (pNextRec)
2714 break;
2715 }
2716 else
2717 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2718 }
2719 if (pNextRec)
2720 break;
2721 pNextThread = NIL_RTTHREAD;
2722 }
2723
2724 /* Advance to the next sibling, if any. */
2725 pRec = pRec->Shared.pSibling;
2726 if ( pRec != NULL
2727 && pRec != pFirstSibling)
2728 {
2729 iEntry = UINT32_MAX;
2730 continue;
2731 }
2732 break;
2733
2734 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2735 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2736 break;
2737
2738 case RTLOCKVALRECSHRDOWN_MAGIC:
2739 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2740 default:
2741 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2742 break;
2743 }
2744
2745 if (pNextRec)
2746 {
2747 /*
2748 * Recurse and check for deadlock.
2749 */
2750 uint32_t i = pStack->c;
2751 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2752 return rtLockValidatorDdHandleStackOverflow(pStack);
2753
2754 pStack->c++;
2755 pStack->a[i].pRec = pRec;
2756 pStack->a[i].iEntry = iEntry;
2757 pStack->a[i].enmState = enmState;
2758 pStack->a[i].pThread = pThread;
2759 pStack->a[i].pFirstSibling = pFirstSibling;
2760
2761 if (RT_UNLIKELY( pNextThread == pThreadSelf
2762 && ( i != 0
2763 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2764 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2765 )
2766 )
2767 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2768
2769 pRec = pNextRec;
2770 pFirstSibling = pNextRec;
2771 iEntry = UINT32_MAX;
2772 enmState = enmNextState;
2773 pThread = pNextThread;
2774 }
2775 else
2776 {
2777 /*
2778 * No deadlock here, unwind the stack and deal with any unfinished
2779 * business there.
2780 */
2781 uint32_t i = pStack->c;
2782 for (;;)
2783 {
2784 /* pop */
2785 if (i == 0)
2786 return VINF_SUCCESS;
2787 i--;
2788 pRec = pStack->a[i].pRec;
2789 iEntry = pStack->a[i].iEntry;
2790
2791 /* Examine it. */
2792 uint32_t u32Magic = pRec->Core.u32Magic;
2793 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2794 pRec = pRec->Excl.pSibling;
2795 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2796 {
2797 if (iEntry + 1 < pRec->Shared.cAllocated)
2798 break; /* continue processing this record. */
2799 pRec = pRec->Shared.pSibling;
2800 }
2801 else
2802 {
2803 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2804 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2805 continue;
2806 }
2807
2808 /* Any next record to advance to? */
2809 if ( !pRec
2810 || pRec == pStack->a[i].pFirstSibling)
2811 continue;
2812 iEntry = UINT32_MAX;
2813 break;
2814 }
2815
2816 /* Restore the rest of the state and update the stack. */
2817 pFirstSibling = pStack->a[i].pFirstSibling;
2818 enmState = pStack->a[i].enmState;
2819 pThread = pStack->a[i].pThread;
2820 pStack->c = i;
2821 }
2822
2823 Assert(iLoop != 1000000);
2824 }
2825}
2826
2827
2828/**
2829 * Check for the simple no-deadlock case.
2830 *
2831 * @returns true if no deadlock, false if further investigation is required.
2832 *
2833 * @param pOriginalRec The original record.
2834 */
2835DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2836{
2837 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2838 && !pOriginalRec->Excl.pSibling)
2839 {
2840 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2841 if ( !pThread
2842 || pThread->u32Magic != RTTHREADINT_MAGIC)
2843 return true;
2844 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2845 if (!RTTHREAD_IS_SLEEPING(enmState))
2846 return true;
2847 }
2848 return false;
2849}
2850
2851
2852/**
2853 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2854 *
2855 * @param pStack The chain of locks causing the deadlock.
2856 * @param pRec The record relating to the current thread's lock
2857 * operation.
2858 * @param pThreadSelf This thread.
2859 * @param pSrcPos Where we are going to deadlock.
2860 * @param rc The return code.
2861 */
2862static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2863 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2864{
2865 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2866 {
2867 const char *pszWhat;
2868 switch (rc)
2869 {
2870 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2871 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2872 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2873 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2874 }
2875 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2876 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2877 for (uint32_t i = 0; i < pStack->c; i++)
2878 {
2879 char szPrefix[24];
2880 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2881 PRTLOCKVALRECUNION pShrdOwner = NULL;
2882 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2883 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2884 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2885 {
2886 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2887 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2888 }
2889 else
2890 {
2891 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2892 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2893 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2894 }
2895 }
2896 rtLockValComplainMore("---- end of deadlock chain ----\n");
2897 }
2898
2899 rtLockValComplainPanic();
2900}
2901
2902
2903/**
2904 * Perform deadlock detection.
2905 *
2906 * @retval VINF_SUCCESS
2907 * @retval VERR_SEM_LV_DEADLOCK
2908 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2909 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2910 *
2911 * @param pRec The record relating to the current thread's lock
2912 * operation.
2913 * @param pThreadSelf The current thread.
2914 * @param pSrcPos The position of the current lock operation.
2915 */
2916static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2917{
2918 RTLOCKVALDDSTACK Stack;
2919 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2920 if (RT_SUCCESS(rc))
2921 return VINF_SUCCESS;
2922
2923 if (rc == VERR_TRY_AGAIN)
2924 {
2925 for (uint32_t iLoop = 0; ; iLoop++)
2926 {
2927 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2928 if (RT_SUCCESS_NP(rc))
2929 return VINF_SUCCESS;
2930 if (rc != VERR_TRY_AGAIN)
2931 break;
2932 RTThreadYield();
2933 if (iLoop >= 3)
2934 return VINF_SUCCESS;
2935 }
2936 }
2937
2938 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2939 return rc;
2940}
2941
2942
2943RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2944 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2945{
2946 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2947 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2948 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2949 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2950 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2951
2952 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2953 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2954 pRec->afReserved[0] = 0;
2955 pRec->afReserved[1] = 0;
2956 pRec->afReserved[2] = 0;
2957 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2958 pRec->hThread = NIL_RTTHREAD;
2959 pRec->pDown = NULL;
2960 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2961 pRec->uSubClass = uSubClass;
2962 pRec->cRecursion = 0;
2963 pRec->hLock = hLock;
2964 pRec->pSibling = NULL;
2965 if (pszNameFmt)
2966 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2967 else
2968 {
2969 static uint32_t volatile s_cAnonymous = 0;
2970 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2971 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2972 }
2973
2974 /* Lazy initialization. */
2975 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2976 rtLockValidatorLazyInit();
2977}
2978
2979
2980RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2981 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2982{
2983 va_list va;
2984 va_start(va, pszNameFmt);
2985 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2986 va_end(va);
2987}
2988
2989
2990RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2991 uint32_t uSubClass, void *pvLock, bool fEnabled,
2992 const char *pszNameFmt, va_list va)
2993{
2994 PRTLOCKVALRECEXCL pRec;
2995 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2996 if (!pRec)
2997 return VERR_NO_MEMORY;
2998 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2999 return VINF_SUCCESS;
3000}
3001
3002
3003RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
3004 uint32_t uSubClass, void *pvLock, bool fEnabled,
3005 const char *pszNameFmt, ...)
3006{
3007 va_list va;
3008 va_start(va, pszNameFmt);
3009 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
3010 va_end(va);
3011 return rc;
3012}
3013
3014
3015RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
3016{
3017 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3018
3019 rtLockValidatorSerializeDestructEnter();
3020
3021 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3022 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3023 RTLOCKVALCLASS hClass;
3024 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3025 if (pRec->pSibling)
3026 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3027 rtLockValidatorSerializeDestructLeave();
3028 if (hClass != NIL_RTLOCKVALCLASS)
3029 RTLockValidatorClassRelease(hClass);
3030}
3031
3032
3033RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3034{
3035 PRTLOCKVALRECEXCL pRec = *ppRec;
3036 *ppRec = NULL;
3037 if (pRec)
3038 {
3039 RTLockValidatorRecExclDelete(pRec);
3040 RTMemFree(pRec);
3041 }
3042}
3043
3044
3045RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3046{
3047 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3048 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3049 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3050 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3051 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3052 RTLOCKVAL_SUB_CLASS_INVALID);
3053 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3054}
3055
3056
3057RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3058 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3059{
3060 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3061 if (!pRecU)
3062 return;
3063 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3064 if (!pRecU->Excl.fEnabled)
3065 return;
3066 if (hThreadSelf == NIL_RTTHREAD)
3067 {
3068 hThreadSelf = RTThreadSelfAutoAdopt();
3069 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3070 }
3071 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3072 Assert(hThreadSelf == RTThreadSelf());
3073
3074 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3075
3076 if (pRecU->Excl.hThread == hThreadSelf)
3077 {
3078 Assert(!fFirstRecursion);
3079 pRecU->Excl.cRecursion++;
3080 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3081 }
3082 else
3083 {
3084 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3085
3086 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3087 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3088 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3089
3090 rtLockValidatorStackPush(hThreadSelf, pRecU);
3091 }
3092}
3093
3094
3095/**
3096 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3097 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3098 */
3099static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3100{
3101 RTTHREADINT *pThread = pRec->Excl.hThread;
3102 AssertReturnVoid(pThread != NIL_RTTHREAD);
3103 Assert(pThread == RTThreadSelf());
3104
3105 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3106 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3107 if (c == 0)
3108 {
3109 rtLockValidatorStackPop(pThread, pRec);
3110 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3111 }
3112 else
3113 {
3114 Assert(c < UINT32_C(0xffff0000));
3115 Assert(!fFinalRecursion);
3116 rtLockValidatorStackPopRecursion(pThread, pRec);
3117 }
3118}
3119
3120RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3121{
3122 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3123 if (!pRecU)
3124 return VINF_SUCCESS;
3125 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3126 if (!pRecU->Excl.fEnabled)
3127 return VINF_SUCCESS;
3128
3129 /*
3130 * Check the release order.
3131 */
3132 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3133 && pRecU->Excl.hClass->fStrictReleaseOrder
3134 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3135 )
3136 {
3137 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3138 if (RT_FAILURE(rc))
3139 return rc;
3140 }
3141
3142 /*
3143 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3144 */
3145 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3146 return VINF_SUCCESS;
3147}
3148
3149
3150RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3151{
3152 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3153 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3154 if (pRecU->Excl.fEnabled)
3155 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3156}
3157
3158
3159RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3160{
3161 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3162 if (!pRecU)
3163 return VINF_SUCCESS;
3164 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3165 if (!pRecU->Excl.fEnabled)
3166 return VINF_SUCCESS;
3167 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3168 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3169
3170 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3171 && !pRecU->Excl.hClass->fRecursionOk)
3172 {
3173 rtLockValComplainFirst("Recursion not allowed by the class!",
3174 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3175 rtLockValComplainPanic();
3176 return VERR_SEM_LV_NESTED;
3177 }
3178
3179 Assert(pRecU->Excl.cRecursion < _1M);
3180 pRecU->Excl.cRecursion++;
3181 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3182 return VINF_SUCCESS;
3183}
3184
3185
3186RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3187{
3188 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3189 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3190 if (!pRecU->Excl.fEnabled)
3191 return VINF_SUCCESS;
3192 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3193 Assert(pRecU->Excl.hThread == RTThreadSelf());
3194 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3195
3196 /*
3197 * Check the release order.
3198 */
3199 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3200 && pRecU->Excl.hClass->fStrictReleaseOrder
3201 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3202 )
3203 {
3204 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3205 if (RT_FAILURE(rc))
3206 return rc;
3207 }
3208
3209 /*
3210 * Perform the unwind.
3211 */
3212 pRecU->Excl.cRecursion--;
3213 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3214 return VINF_SUCCESS;
3215}
3216
3217
3218RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3219{
3220 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3221 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3222 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3223 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3224 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3225 , VERR_SEM_LV_INVALID_PARAMETER);
3226 if (!pRecU->Excl.fEnabled)
3227 return VINF_SUCCESS;
3228 Assert(pRecU->Excl.hThread == RTThreadSelf());
3229 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3230 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3231
3232 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3233 && !pRecU->Excl.hClass->fRecursionOk)
3234 {
3235 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3236 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3237 rtLockValComplainPanic();
3238 return VERR_SEM_LV_NESTED;
3239 }
3240
3241 Assert(pRecU->Excl.cRecursion < _1M);
3242 pRecU->Excl.cRecursion++;
3243 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3244
3245 return VINF_SUCCESS;
3246}
3247
3248
3249RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3250{
3251 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3252 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3253 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3254 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3255 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3256 , VERR_SEM_LV_INVALID_PARAMETER);
3257 if (!pRecU->Excl.fEnabled)
3258 return VINF_SUCCESS;
3259 Assert(pRecU->Excl.hThread == RTThreadSelf());
3260 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3261 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3262
3263 /*
3264 * Check the release order.
3265 */
3266 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3267 && pRecU->Excl.hClass->fStrictReleaseOrder
3268 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3269 )
3270 {
3271 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3272 if (RT_FAILURE(rc))
3273 return rc;
3274 }
3275
3276 /*
3277 * Perform the unwind.
3278 */
3279 pRecU->Excl.cRecursion--;
3280 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3281 return VINF_SUCCESS;
3282}
3283
3284
3285RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3286 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3287{
3288 /*
3289 * Validate and adjust input. Quit early if order validation is disabled.
3290 */
3291 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3292 if (!pRecU)
3293 return VINF_SUCCESS;
3294 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3295 if ( !pRecU->Excl.fEnabled
3296 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3297 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3298 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3299 return VINF_SUCCESS;
3300
3301 if (hThreadSelf == NIL_RTTHREAD)
3302 {
3303 hThreadSelf = RTThreadSelfAutoAdopt();
3304 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3305 }
3306 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3307 Assert(hThreadSelf == RTThreadSelf());
3308
3309 /*
3310 * Detect recursion as it isn't subject to order restrictions.
3311 */
3312 if (pRec->hThread == hThreadSelf)
3313 return VINF_SUCCESS;
3314
3315 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3316}
3317
3318
3319RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3320 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3321 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3322{
3323 /*
3324 * Fend off wild life.
3325 */
3326 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3327 if (!pRecU)
3328 return VINF_SUCCESS;
3329 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3330 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3331 if (!pRec->fEnabled)
3332 return VINF_SUCCESS;
3333
3334 PRTTHREADINT pThreadSelf = hThreadSelf;
3335 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3336 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3337 Assert(pThreadSelf == RTThreadSelf());
3338
3339 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3340
3341 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3342 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3343 {
3344 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3345 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3346 , VERR_SEM_LV_INVALID_PARAMETER);
3347 enmSleepState = enmThreadState;
3348 }
3349
3350 /*
3351 * Record the location.
3352 */
3353 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3354 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3355 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3356 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3357 rtThreadSetState(pThreadSelf, enmSleepState);
3358
3359 /*
3360 * Don't do deadlock detection if we're recursing.
3361 *
3362 * On some hosts we don't do recursion accounting our selves and there
3363 * isn't any other place to check for this.
3364 */
3365 int rc = VINF_SUCCESS;
3366 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3367 {
3368 if ( !fRecursiveOk
3369 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3370 && !pRecU->Excl.hClass->fRecursionOk))
3371 {
3372 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3373 rtLockValComplainPanic();
3374 rc = VERR_SEM_LV_NESTED;
3375 }
3376 }
3377 /*
3378 * Perform deadlock detection.
3379 */
3380 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3381 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3382 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3383 rc = VINF_SUCCESS;
3384 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3385 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3386
3387 if (RT_SUCCESS(rc))
3388 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3389 else
3390 {
3391 rtThreadSetState(pThreadSelf, enmThreadState);
3392 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3393 }
3394 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3395 return rc;
3396}
3397RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3398
3399
3400RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3401 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3402 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3403{
3404 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3405 if (RT_SUCCESS(rc))
3406 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3407 enmSleepState, fReallySleeping);
3408 return rc;
3409}
3410RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3411
3412
3413RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3414 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3415{
3416 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3417 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3418 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3419 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3420 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3421
3422 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3423 pRec->uSubClass = uSubClass;
3424 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3425 pRec->hLock = hLock;
3426 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3427 pRec->fSignaller = fSignaller;
3428 pRec->pSibling = NULL;
3429
3430 /* the table */
3431 pRec->cEntries = 0;
3432 pRec->iLastEntry = 0;
3433 pRec->cAllocated = 0;
3434 pRec->fReallocating = false;
3435 pRec->fPadding = false;
3436 pRec->papOwners = NULL;
3437
3438 /* the name */
3439 if (pszNameFmt)
3440 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3441 else
3442 {
3443 static uint32_t volatile s_cAnonymous = 0;
3444 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3445 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3446 }
3447}
3448
3449
3450RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3451 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3452{
3453 va_list va;
3454 va_start(va, pszNameFmt);
3455 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3456 va_end(va);
3457}
3458
3459
3460RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3461{
3462 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3463
3464 /*
3465 * Flip it into table realloc mode and take the destruction lock.
3466 */
3467 rtLockValidatorSerializeDestructEnter();
3468 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3469 {
3470 rtLockValidatorSerializeDestructLeave();
3471
3472 rtLockValidatorSerializeDetectionEnter();
3473 rtLockValidatorSerializeDetectionLeave();
3474
3475 rtLockValidatorSerializeDestructEnter();
3476 }
3477
3478 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3479 RTLOCKVALCLASS hClass;
3480 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3481 if (pRec->papOwners)
3482 {
3483 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3484 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3485 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3486
3487 RTMemFree((void *)pRec->papOwners);
3488 }
3489 if (pRec->pSibling)
3490 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3491 ASMAtomicWriteBool(&pRec->fReallocating, false);
3492
3493 rtLockValidatorSerializeDestructLeave();
3494
3495 if (hClass != NIL_RTLOCKVALCLASS)
3496 RTLockValidatorClassRelease(hClass);
3497}
3498
3499
3500RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3501{
3502 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3503 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3504 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3505 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3506 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3507 RTLOCKVAL_SUB_CLASS_INVALID);
3508 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3509}
3510
3511
3512/**
3513 * Locates an owner (thread) in a shared lock record.
3514 *
3515 * @returns Pointer to the owner entry on success, NULL on failure..
3516 * @param pShared The shared lock record.
3517 * @param hThread The thread (owner) to find.
3518 * @param piEntry Where to optionally return the table in index.
3519 * Optional.
3520 */
3521DECLINLINE(PRTLOCKVALRECUNION)
3522rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3523{
3524 rtLockValidatorSerializeDetectionEnter();
3525
3526 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3527 if (papOwners)
3528 {
3529 uint32_t const cMax = pShared->cAllocated;
3530 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3531 {
3532 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3533 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3534 {
3535 rtLockValidatorSerializeDetectionLeave();
3536 if (piEntry)
3537 *piEntry = iEntry;
3538 return pEntry;
3539 }
3540 }
3541 }
3542
3543 rtLockValidatorSerializeDetectionLeave();
3544 return NULL;
3545}
3546
3547
3548RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3549 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3550{
3551 /*
3552 * Validate and adjust input. Quit early if order validation is disabled.
3553 */
3554 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3555 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3556 if ( !pRecU->Shared.fEnabled
3557 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3558 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3559 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3560 )
3561 return VINF_SUCCESS;
3562
3563 if (hThreadSelf == NIL_RTTHREAD)
3564 {
3565 hThreadSelf = RTThreadSelfAutoAdopt();
3566 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3567 }
3568 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3569 Assert(hThreadSelf == RTThreadSelf());
3570
3571 /*
3572 * Detect recursion as it isn't subject to order restrictions.
3573 */
3574 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3575 if (pEntry)
3576 return VINF_SUCCESS;
3577
3578 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3579}
3580
3581
3582RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3583 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3584 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3585{
3586 /*
3587 * Fend off wild life.
3588 */
3589 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3590 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3591 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3592 if (!pRecU->Shared.fEnabled)
3593 return VINF_SUCCESS;
3594
3595 PRTTHREADINT pThreadSelf = hThreadSelf;
3596 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3597 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3598 Assert(pThreadSelf == RTThreadSelf());
3599
3600 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3601
3602 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3603 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3604 {
3605 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3606 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3607 , VERR_SEM_LV_INVALID_PARAMETER);
3608 enmSleepState = enmThreadState;
3609 }
3610
3611 /*
3612 * Record the location.
3613 */
3614 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3615 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3616 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3617 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3618 rtThreadSetState(pThreadSelf, enmSleepState);
3619
3620 /*
3621 * Don't do deadlock detection if we're recursing.
3622 */
3623 int rc = VINF_SUCCESS;
3624 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3625 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3626 : NULL;
3627 if (pEntry)
3628 {
3629 if ( !fRecursiveOk
3630 || ( pRec->hClass
3631 && !pRec->hClass->fRecursionOk)
3632 )
3633 {
3634 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3635 rtLockValComplainPanic();
3636 rc = VERR_SEM_LV_NESTED;
3637 }
3638 }
3639 /*
3640 * Perform deadlock detection.
3641 */
3642 else if ( pRec->hClass
3643 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3644 || pRec->hClass->cMsMinDeadlock > cMillies))
3645 rc = VINF_SUCCESS;
3646 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3647 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3648
3649 if (RT_SUCCESS(rc))
3650 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3651 else
3652 {
3653 rtThreadSetState(pThreadSelf, enmThreadState);
3654 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3655 }
3656 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3657 return rc;
3658}
3659RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3660
3661
3662RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3663 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3664 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3665{
3666 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3667 if (RT_SUCCESS(rc))
3668 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3669 enmSleepState, fReallySleeping);
3670 return rc;
3671}
3672RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3673
3674
3675/**
3676 * Allocates and initializes an owner entry for the shared lock record.
3677 *
3678 * @returns The new owner entry.
3679 * @param pRec The shared lock record.
3680 * @param pThreadSelf The calling thread and owner. Used for record
3681 * initialization and allocation.
3682 * @param pSrcPos The source position.
3683 */
3684DECLINLINE(PRTLOCKVALRECUNION)
3685rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3686{
3687 PRTLOCKVALRECUNION pEntry;
3688
3689 /*
3690 * Check if the thread has any statically allocated records we can easily
3691 * make use of.
3692 */
3693 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3694 if ( iEntry > 0
3695 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3696 {
3697 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3698 Assert(!pEntry->ShrdOwner.fReserved);
3699 pEntry->ShrdOwner.fStaticAlloc = true;
3700 rtThreadGet(pThreadSelf);
3701 }
3702 else
3703 {
3704 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3705 if (RT_UNLIKELY(!pEntry))
3706 return NULL;
3707 pEntry->ShrdOwner.fStaticAlloc = false;
3708 }
3709
3710 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3711 pEntry->ShrdOwner.cRecursion = 1;
3712 pEntry->ShrdOwner.fReserved = true;
3713 pEntry->ShrdOwner.hThread = pThreadSelf;
3714 pEntry->ShrdOwner.pDown = NULL;
3715 pEntry->ShrdOwner.pSharedRec = pRec;
3716#if HC_ARCH_BITS == 32
3717 pEntry->ShrdOwner.pvReserved = NULL;
3718#endif
3719 if (pSrcPos)
3720 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3721 else
3722 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3723 return pEntry;
3724}
3725
3726
3727/**
3728 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3729 *
3730 * @param pEntry The owner entry.
3731 */
3732DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3733{
3734 if (pEntry)
3735 {
3736 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3737 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3738
3739 PRTTHREADINT pThread;
3740 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3741
3742 Assert(pEntry->fReserved);
3743 pEntry->fReserved = false;
3744
3745 if (pEntry->fStaticAlloc)
3746 {
3747 AssertPtrReturnVoid(pThread);
3748 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3749
3750 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3751 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3752
3753 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, iEntry));
3754 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, iEntry);
3755
3756 rtThreadRelease(pThread);
3757 }
3758 else
3759 {
3760 rtLockValidatorSerializeDestructEnter();
3761 rtLockValidatorSerializeDestructLeave();
3762
3763 RTMemFree(pEntry);
3764 }
3765 }
3766}
3767
3768
3769/**
3770 * Make more room in the table.
3771 *
3772 * @retval true on success
3773 * @retval false if we're out of memory or running into a bad race condition
3774 * (probably a bug somewhere). No longer holding the lock.
3775 *
3776 * @param pShared The shared lock record.
3777 */
3778static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3779{
3780 for (unsigned i = 0; i < 1000; i++)
3781 {
3782 /*
3783 * Switch to the other data access direction.
3784 */
3785 rtLockValidatorSerializeDetectionLeave();
3786 if (i >= 10)
3787 {
3788 Assert(i != 10 && i != 100);
3789 RTThreadSleep(i >= 100);
3790 }
3791 rtLockValidatorSerializeDestructEnter();
3792
3793 /*
3794 * Try grab the privilege to reallocating the table.
3795 */
3796 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3797 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3798 {
3799 uint32_t cAllocated = pShared->cAllocated;
3800 if (cAllocated < pShared->cEntries)
3801 {
3802 /*
3803 * Ok, still not enough space. Reallocate the table.
3804 */
3805#if 0 /** @todo enable this after making sure growing works flawlessly. */
3806 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3807#else
3808 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3809#endif
3810 PRTLOCKVALRECSHRDOWN *papOwners;
3811 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3812 (cAllocated + cInc) * sizeof(void *));
3813 if (!papOwners)
3814 {
3815 ASMAtomicWriteBool(&pShared->fReallocating, false);
3816 rtLockValidatorSerializeDestructLeave();
3817 /* RTMemRealloc will assert */
3818 return false;
3819 }
3820
3821 while (cInc-- > 0)
3822 {
3823 papOwners[cAllocated] = NULL;
3824 cAllocated++;
3825 }
3826
3827 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3828 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3829 }
3830 ASMAtomicWriteBool(&pShared->fReallocating, false);
3831 }
3832 rtLockValidatorSerializeDestructLeave();
3833
3834 rtLockValidatorSerializeDetectionEnter();
3835 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3836 break;
3837
3838 if (pShared->cAllocated >= pShared->cEntries)
3839 return true;
3840 }
3841
3842 rtLockValidatorSerializeDetectionLeave();
3843 AssertFailed(); /* too many iterations or destroyed while racing. */
3844 return false;
3845}
3846
3847
3848/**
3849 * Adds an owner entry to a shared lock record.
3850 *
3851 * @returns true on success, false on serious race or we're if out of memory.
3852 * @param pShared The shared lock record.
3853 * @param pEntry The owner entry.
3854 */
3855DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3856{
3857 rtLockValidatorSerializeDetectionEnter();
3858 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3859 {
3860 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3861 && !rtLockValidatorRecSharedMakeRoom(pShared))
3862 return false; /* the worker leave the lock */
3863
3864 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3865 uint32_t const cMax = pShared->cAllocated;
3866 for (unsigned i = 0; i < 100; i++)
3867 {
3868 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3869 {
3870 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3871 {
3872 rtLockValidatorSerializeDetectionLeave();
3873 return true;
3874 }
3875 }
3876 Assert(i != 25);
3877 }
3878 AssertFailed();
3879 }
3880 rtLockValidatorSerializeDetectionLeave();
3881 return false;
3882}
3883
3884
3885/**
3886 * Remove an owner entry from a shared lock record and free it.
3887 *
3888 * @param pShared The shared lock record.
3889 * @param pEntry The owner entry to remove.
3890 * @param iEntry The last known index.
3891 */
3892DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3893 uint32_t iEntry)
3894{
3895 /*
3896 * Remove it from the table.
3897 */
3898 rtLockValidatorSerializeDetectionEnter();
3899 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3900 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3901 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3902 {
3903 /* this shouldn't happen yet... */
3904 AssertFailed();
3905 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3906 uint32_t const cMax = pShared->cAllocated;
3907 for (iEntry = 0; iEntry < cMax; iEntry++)
3908 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3909 break;
3910 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3911 }
3912 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3913 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3914 rtLockValidatorSerializeDetectionLeave();
3915
3916 /*
3917 * Successfully removed, now free it.
3918 */
3919 rtLockValidatorRecSharedFreeOwner(pEntry);
3920}
3921
3922
3923RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3924{
3925 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3926 if (!pRec->fEnabled)
3927 return;
3928 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3929 AssertReturnVoid(pRec->fSignaller);
3930
3931 /*
3932 * Free all current owners.
3933 */
3934 rtLockValidatorSerializeDetectionEnter();
3935 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3936 {
3937 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3938 uint32_t iEntry = 0;
3939 uint32_t cEntries = pRec->cAllocated;
3940 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3941 while (iEntry < cEntries)
3942 {
3943 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3944 if (pEntry)
3945 {
3946 ASMAtomicDecU32(&pRec->cEntries);
3947 rtLockValidatorSerializeDetectionLeave();
3948
3949 rtLockValidatorRecSharedFreeOwner(pEntry);
3950
3951 rtLockValidatorSerializeDetectionEnter();
3952 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3953 break;
3954 cEntries = pRec->cAllocated;
3955 papEntries = pRec->papOwners;
3956 }
3957 iEntry++;
3958 }
3959 }
3960 rtLockValidatorSerializeDetectionLeave();
3961
3962 if (hThread != NIL_RTTHREAD)
3963 {
3964 /*
3965 * Allocate a new owner entry and insert it into the table.
3966 */
3967 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3968 if ( pEntry
3969 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3970 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3971 }
3972}
3973RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
3974
3975
3976RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3977{
3978 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3979 if (!pRec->fEnabled)
3980 return;
3981 if (hThread == NIL_RTTHREAD)
3982 {
3983 hThread = RTThreadSelfAutoAdopt();
3984 AssertReturnVoid(hThread != NIL_RTTHREAD);
3985 }
3986 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
3987
3988 /*
3989 * Recursive?
3990 *
3991 * Note! This code can be optimized to try avoid scanning the table on
3992 * insert. However, that's annoying work that makes the code big,
3993 * so it can wait til later sometime.
3994 */
3995 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
3996 if (pEntry)
3997 {
3998 Assert(!pRec->fSignaller);
3999 pEntry->ShrdOwner.cRecursion++;
4000 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
4001 return;
4002 }
4003
4004 /*
4005 * Allocate a new owner entry and insert it into the table.
4006 */
4007 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4008 if (pEntry)
4009 {
4010 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4011 {
4012 if (!pRec->fSignaller)
4013 rtLockValidatorStackPush(hThread, pEntry);
4014 }
4015 else
4016 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4017 }
4018}
4019RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4020
4021
4022RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4023{
4024 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4025 if (!pRec->fEnabled)
4026 return;
4027 if (hThread == NIL_RTTHREAD)
4028 {
4029 hThread = RTThreadSelfAutoAdopt();
4030 AssertReturnVoid(hThread != NIL_RTTHREAD);
4031 }
4032 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4033
4034 /*
4035 * Find the entry hope it's a recursive one.
4036 */
4037 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4038 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4039 AssertReturnVoid(pEntry);
4040 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4041
4042 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4043 if (c == 0)
4044 {
4045 if (!pRec->fSignaller)
4046 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4047 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4048 }
4049 else
4050 {
4051 Assert(!pRec->fSignaller);
4052 rtLockValidatorStackPopRecursion(hThread, pEntry);
4053 }
4054}
4055RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4056
4057
4058RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4059{
4060 /* Validate and resolve input. */
4061 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4062 if (!pRec->fEnabled)
4063 return false;
4064 if (hThread == NIL_RTTHREAD)
4065 {
4066 hThread = RTThreadSelfAutoAdopt();
4067 AssertReturn(hThread != NIL_RTTHREAD, false);
4068 }
4069 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4070
4071 /* Do the job. */
4072 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4073 return pEntry != NULL;
4074}
4075RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4076
4077
4078RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4079{
4080 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4081 if (!pRec->fEnabled)
4082 return VINF_SUCCESS;
4083 if (hThreadSelf == NIL_RTTHREAD)
4084 {
4085 hThreadSelf = RTThreadSelfAutoAdopt();
4086 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4087 }
4088 Assert(hThreadSelf == RTThreadSelf());
4089 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4090
4091 /*
4092 * Locate the entry for this thread in the table.
4093 */
4094 uint32_t iEntry = 0;
4095 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4096 if (RT_UNLIKELY(!pEntry))
4097 {
4098 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4099 rtLockValComplainPanic();
4100 return VERR_SEM_LV_NOT_OWNER;
4101 }
4102
4103 /*
4104 * Check the release order.
4105 */
4106 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4107 && pRec->hClass->fStrictReleaseOrder
4108 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4109 )
4110 {
4111 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4112 if (RT_FAILURE(rc))
4113 return rc;
4114 }
4115
4116 /*
4117 * Release the ownership or unwind a level of recursion.
4118 */
4119 Assert(pEntry->ShrdOwner.cRecursion > 0);
4120 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4121 if (c == 0)
4122 {
4123 rtLockValidatorStackPop(hThreadSelf, pEntry);
4124 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4125 }
4126 else
4127 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4128
4129 return VINF_SUCCESS;
4130}
4131
4132
4133RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4134{
4135 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4136 if (!pRec->fEnabled)
4137 return VINF_SUCCESS;
4138 if (hThreadSelf == NIL_RTTHREAD)
4139 {
4140 hThreadSelf = RTThreadSelfAutoAdopt();
4141 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4142 }
4143 Assert(hThreadSelf == RTThreadSelf());
4144 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4145
4146 /*
4147 * Locate the entry for this thread in the table.
4148 */
4149 uint32_t iEntry = 0;
4150 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4151 if (RT_UNLIKELY(!pEntry))
4152 {
4153 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4154 rtLockValComplainPanic();
4155 return VERR_SEM_LV_NOT_SIGNALLER;
4156 }
4157 return VINF_SUCCESS;
4158}
4159
4160
4161RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4162{
4163 if (Thread == NIL_RTTHREAD)
4164 return 0;
4165
4166 PRTTHREADINT pThread = rtThreadGet(Thread);
4167 if (!pThread)
4168 return VERR_INVALID_HANDLE;
4169 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4170 rtThreadRelease(pThread);
4171 return cWriteLocks;
4172}
4173RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4174
4175
4176RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4177{
4178 PRTTHREADINT pThread = rtThreadGet(Thread);
4179 AssertReturnVoid(pThread);
4180 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4181 rtThreadRelease(pThread);
4182}
4183RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4184
4185
4186RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4187{
4188 PRTTHREADINT pThread = rtThreadGet(Thread);
4189 AssertReturnVoid(pThread);
4190 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4191 rtThreadRelease(pThread);
4192}
4193RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4194
4195
4196RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4197{
4198 if (Thread == NIL_RTTHREAD)
4199 return 0;
4200
4201 PRTTHREADINT pThread = rtThreadGet(Thread);
4202 if (!pThread)
4203 return VERR_INVALID_HANDLE;
4204 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4205 rtThreadRelease(pThread);
4206 return cReadLocks;
4207}
4208RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4209
4210
4211RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4212{
4213 PRTTHREADINT pThread = rtThreadGet(Thread);
4214 Assert(pThread);
4215 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4216 rtThreadRelease(pThread);
4217}
4218RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4219
4220
4221RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4222{
4223 PRTTHREADINT pThread = rtThreadGet(Thread);
4224 Assert(pThread);
4225 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4226 rtThreadRelease(pThread);
4227}
4228RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4229
4230
4231RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4232{
4233 void *pvLock = NULL;
4234 PRTTHREADINT pThread = rtThreadGet(hThread);
4235 if (pThread)
4236 {
4237 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4238 if (RTTHREAD_IS_SLEEPING(enmState))
4239 {
4240 rtLockValidatorSerializeDetectionEnter();
4241
4242 enmState = rtThreadGetState(pThread);
4243 if (RTTHREAD_IS_SLEEPING(enmState))
4244 {
4245 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4246 if (pRec)
4247 {
4248 switch (pRec->Core.u32Magic)
4249 {
4250 case RTLOCKVALRECEXCL_MAGIC:
4251 pvLock = pRec->Excl.hLock;
4252 break;
4253
4254 case RTLOCKVALRECSHRDOWN_MAGIC:
4255 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4256 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4257 break;
4258 case RTLOCKVALRECSHRD_MAGIC:
4259 pvLock = pRec->Shared.hLock;
4260 break;
4261 }
4262 if (RTThreadGetState(pThread) != enmState)
4263 pvLock = NULL;
4264 }
4265 }
4266
4267 rtLockValidatorSerializeDetectionLeave();
4268 }
4269 rtThreadRelease(pThread);
4270 }
4271 return pvLock;
4272}
4273RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4274
4275
4276RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4277{
4278 bool fRet = false;
4279 PRTTHREADINT pThread = rtThreadGet(hThread);
4280 if (pThread)
4281 {
4282 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4283 rtThreadRelease(pThread);
4284 }
4285 return fRet;
4286}
4287RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4288
4289
4290RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4291{
4292 bool fRet = false;
4293 if (hCurrentThread == NIL_RTTHREAD)
4294 hCurrentThread = RTThreadSelf();
4295 else
4296 Assert(hCurrentThread == RTThreadSelf());
4297 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4298 if (pThread)
4299 {
4300 if (hClass != NIL_RTLOCKVALCLASS)
4301 {
4302 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4303 while (VALID_PTR(pCur) && !fRet)
4304 {
4305 switch (pCur->Core.u32Magic)
4306 {
4307 case RTLOCKVALRECEXCL_MAGIC:
4308 fRet = pCur->Excl.hClass == hClass;
4309 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4310 break;
4311 case RTLOCKVALRECSHRDOWN_MAGIC:
4312 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4313 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4314 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4315 break;
4316 case RTLOCKVALRECNEST_MAGIC:
4317 switch (pCur->Nest.pRec->Core.u32Magic)
4318 {
4319 case RTLOCKVALRECEXCL_MAGIC:
4320 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4321 break;
4322 case RTLOCKVALRECSHRDOWN_MAGIC:
4323 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4324 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4325 break;
4326 }
4327 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4328 break;
4329 default:
4330 pCur = NULL;
4331 break;
4332 }
4333 }
4334 }
4335
4336 rtThreadRelease(pThread);
4337 }
4338 return fRet;
4339}
4340RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4341
4342
4343RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4344{
4345 bool fRet = false;
4346 if (hCurrentThread == NIL_RTTHREAD)
4347 hCurrentThread = RTThreadSelf();
4348 else
4349 Assert(hCurrentThread == RTThreadSelf());
4350 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4351 if (pThread)
4352 {
4353 if (hClass != NIL_RTLOCKVALCLASS)
4354 {
4355 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4356 while (VALID_PTR(pCur) && !fRet)
4357 {
4358 switch (pCur->Core.u32Magic)
4359 {
4360 case RTLOCKVALRECEXCL_MAGIC:
4361 fRet = pCur->Excl.hClass == hClass
4362 && pCur->Excl.uSubClass == uSubClass;
4363 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4364 break;
4365 case RTLOCKVALRECSHRDOWN_MAGIC:
4366 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4367 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4368 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4369 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4370 break;
4371 case RTLOCKVALRECNEST_MAGIC:
4372 switch (pCur->Nest.pRec->Core.u32Magic)
4373 {
4374 case RTLOCKVALRECEXCL_MAGIC:
4375 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4376 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4377 break;
4378 case RTLOCKVALRECSHRDOWN_MAGIC:
4379 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4380 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4381 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4382 break;
4383 }
4384 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4385 break;
4386 default:
4387 pCur = NULL;
4388 break;
4389 }
4390 }
4391 }
4392
4393 rtThreadRelease(pThread);
4394 }
4395 return fRet;
4396}
4397RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4398
4399
4400RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4401{
4402 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4403}
4404RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4405
4406
4407RTDECL(bool) RTLockValidatorIsEnabled(void)
4408{
4409 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4410}
4411RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4412
4413
4414RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4415{
4416 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4417}
4418RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4419
4420
4421RTDECL(bool) RTLockValidatorIsQuiet(void)
4422{
4423 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4424}
4425RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4426
4427
4428RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4429{
4430 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4431}
4432RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4433
4434
4435RTDECL(bool) RTLockValidatorMayPanic(void)
4436{
4437 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4438}
4439RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4440
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette