VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 87182

Last change on this file since 87182 was 87151, checked in by vboxsync, 4 years ago

iprt/lockvalidator: size calc fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 160.3 KB
Line 
1/* $Id: lockvalidator.cpp 87151 2020-12-31 10:12:45Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include <iprt/lockvalidator.h>
32#include "internal/iprt.h"
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/env.h>
37#include <iprt/err.h>
38#include <iprt/mem.h>
39#include <iprt/once.h>
40#include <iprt/semaphore.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44#include "internal/lockvalidator.h"
45#include "internal/magics.h"
46#include "internal/strhash.h"
47#include "internal/thread.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** Macro that asserts that a pointer is aligned correctly.
54 * Only used when fighting bugs. */
55#if 1
56# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
57 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
58#else
59# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
60#endif
61
62/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
63#define RTLOCKVALCLASS_HASH(hClass) \
64 ( ((uintptr_t)(hClass) >> 6 ) \
65 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
66 / sizeof(PRTLOCKVALCLASSREF)) )
67
68/** The max value for RTLOCKVALCLASSINT::cRefs. */
69#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
70/** The max value for RTLOCKVALCLASSREF::cLookups. */
71#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
72/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
73 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
74#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
75
76
77/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
78 * Enable recursion records. */
79#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
80# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
81#endif
82
83/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
84 * Enables some extra verbosity in the lock dumping. */
85#if defined(DOXYGEN_RUNNING)
86# define RTLOCKVAL_WITH_VERBOSE_DUMPS
87#endif
88
89/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
90 * Enables collection prior class hash lookup statistics, dumping them when
91 * complaining about the class. */
92#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
93# define RTLOCKVAL_WITH_CLASS_HASH_STATS
94#endif
95
96
97/*********************************************************************************************************************************
98* Structures and Typedefs *
99*********************************************************************************************************************************/
100/**
101 * Deadlock detection stack entry.
102 */
103typedef struct RTLOCKVALDDENTRY
104{
105 /** The current record. */
106 PRTLOCKVALRECUNION pRec;
107 /** The current entry number if pRec is a shared one. */
108 uint32_t iEntry;
109 /** The thread state of the thread we followed to get to pFirstSibling.
110 * This is only used for validating a deadlock stack. */
111 RTTHREADSTATE enmState;
112 /** The thread we followed to get to pFirstSibling.
113 * This is only used for validating a deadlock stack. */
114 PRTTHREADINT pThread;
115 /** What pThread is waiting on, i.e. where we entered the circular list of
116 * siblings. This is used for validating a deadlock stack as well as
117 * terminating the sibling walk. */
118 PRTLOCKVALRECUNION pFirstSibling;
119} RTLOCKVALDDENTRY;
120
121
122/**
123 * Deadlock detection stack.
124 */
125typedef struct RTLOCKVALDDSTACK
126{
127 /** The number stack entries. */
128 uint32_t c;
129 /** The stack entries. */
130 RTLOCKVALDDENTRY a[32];
131} RTLOCKVALDDSTACK;
132/** Pointer to a deadlock detection stack. */
133typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
134
135
136/**
137 * Reference to another class.
138 */
139typedef struct RTLOCKVALCLASSREF
140{
141 /** The class. */
142 RTLOCKVALCLASS hClass;
143 /** The number of lookups of this class. */
144 uint32_t volatile cLookups;
145 /** Indicates whether the entry was added automatically during order checking
146 * (true) or manually via the API (false). */
147 bool fAutodidacticism;
148 /** Reserved / explicit alignment padding. */
149 bool afReserved[3];
150} RTLOCKVALCLASSREF;
151/** Pointer to a class reference. */
152typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
153
154
155/** Pointer to a chunk of class references. */
156typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
157/**
158 * Chunk of class references.
159 */
160typedef struct RTLOCKVALCLASSREFCHUNK
161{
162 /** Array of refs. */
163#if 0 /** @todo for testing allocation of new chunks. */
164 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
165#else
166 RTLOCKVALCLASSREF aRefs[2];
167#endif
168 /** Pointer to the next chunk. */
169 PRTLOCKVALCLASSREFCHUNK volatile pNext;
170} RTLOCKVALCLASSREFCHUNK;
171
172
173/**
174 * Lock class.
175 */
176typedef struct RTLOCKVALCLASSINT
177{
178 /** AVL node core. */
179 AVLLU32NODECORE Core;
180 /** Magic value (RTLOCKVALCLASS_MAGIC). */
181 uint32_t volatile u32Magic;
182 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
183 uint32_t volatile cRefs;
184 /** Whether the class is allowed to teach it self new locking order rules. */
185 bool fAutodidact;
186 /** Whether to allow recursion. */
187 bool fRecursionOk;
188 /** Strict release order. */
189 bool fStrictReleaseOrder;
190 /** Whether this class is in the tree. */
191 bool fInTree;
192 /** Donate a reference to the next retainer. This is a hack to make
193 * RTLockValidatorClassCreateUnique work. */
194 bool volatile fDonateRefToNextRetainer;
195 /** Reserved future use / explicit alignment. */
196 bool afReserved[3];
197 /** The minimum wait interval for which we do deadlock detection
198 * (milliseconds). */
199 RTMSINTERVAL cMsMinDeadlock;
200 /** The minimum wait interval for which we do order checks (milliseconds). */
201 RTMSINTERVAL cMsMinOrder;
202 /** More padding. */
203 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
204 /** Classes that may be taken prior to this one.
205 * This is a linked list where each node contains a chunk of locks so that we
206 * reduce the number of allocations as well as localize the data. */
207 RTLOCKVALCLASSREFCHUNK PriorLocks;
208 /** Hash table containing frequently encountered prior locks. */
209 PRTLOCKVALCLASSREF apPriorLocksHash[17];
210 /** Class name. (Allocated after the end of the block as usual.) */
211 char const *pszName;
212 /** Where this class was created.
213 * This is mainly used for finding automatically created lock classes.
214 * @remarks The strings are stored after this structure so we won't crash
215 * if the class lives longer than the module (dll/so/dylib) that
216 * spawned it. */
217 RTLOCKVALSRCPOS CreatePos;
218#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
219 /** Hash hits. */
220 uint32_t volatile cHashHits;
221 /** Hash misses. */
222 uint32_t volatile cHashMisses;
223#endif
224} RTLOCKVALCLASSINT;
225AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
226AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
227
228
229/*********************************************************************************************************************************
230* Global Variables *
231*********************************************************************************************************************************/
232/** Serializing object destruction and deadlock detection.
233 *
234 * This makes sure that none of the memory examined by the deadlock detection
235 * code will become invalid (reused for other purposes or made not present)
236 * while the detection is in progress.
237 *
238 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
239 * EW: Deadlock detection and some related activities.
240 */
241static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
242/** Serializing class tree insert and lookups. */
243static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
244/** Class tree. */
245static PAVLLU32NODECORE g_LockValClassTree = NULL;
246/** Critical section serializing the teaching new rules to the classes. */
247static RTCRITSECT g_LockValClassTeachCS;
248
249/** Whether the lock validator is enabled or disabled.
250 * Only applies to new locks. */
251static bool volatile g_fLockValidatorEnabled = true;
252/** Set if the lock validator is quiet. */
253#ifdef RT_STRICT
254static bool volatile g_fLockValidatorQuiet = false;
255#else
256static bool volatile g_fLockValidatorQuiet = true;
257#endif
258/** Set if the lock validator may panic. */
259#ifdef RT_STRICT
260static bool volatile g_fLockValidatorMayPanic = true;
261#else
262static bool volatile g_fLockValidatorMayPanic = false;
263#endif
264/** Whether to return an error status on wrong locking order. */
265static bool volatile g_fLockValSoftWrongOrder = false;
266
267
268/*********************************************************************************************************************************
269* Internal Functions *
270*********************************************************************************************************************************/
271static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
272static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
273
274
275/**
276 * Lazy initialization of the lock validator globals.
277 */
278static void rtLockValidatorLazyInit(void)
279{
280 static uint32_t volatile s_fInitializing = false;
281 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
282 {
283 /*
284 * The locks.
285 */
286 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
287 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
288 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
289
290 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
291 {
292 RTSEMRW hSemRW;
293 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
294 if (RT_SUCCESS(rc))
295 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
296 }
297
298 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
299 {
300 RTSEMXROADS hXRoads;
301 int rc = RTSemXRoadsCreate(&hXRoads);
302 if (RT_SUCCESS(rc))
303 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
304 }
305
306#ifdef IN_RING3
307 /*
308 * Check the environment for our config variables.
309 */
310 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
311 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
312 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
313 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
314
315 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
316 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
317 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
318 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
319
320 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
321 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
322 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
323 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
324
325 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
326 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
327 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
328 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
329#endif
330
331 /*
332 * Register cleanup
333 */
334 /** @todo register some cleanup callback if we care. */
335
336 ASMAtomicWriteU32(&s_fInitializing, false);
337 }
338}
339
340
341
342/** Wrapper around ASMAtomicReadPtr. */
343DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
344{
345 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
346 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
347 return p;
348}
349
350
351/** Wrapper around ASMAtomicWritePtr. */
352DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
353{
354 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
355 ASMAtomicWritePtr(ppRec, pRecNew);
356}
357
358
359/** Wrapper around ASMAtomicReadPtr. */
360DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
361{
362 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
363 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
364 return p;
365}
366
367
368/** Wrapper around ASMAtomicUoReadPtr. */
369DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
370{
371 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
372 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
373 return p;
374}
375
376
377/**
378 * Reads a volatile thread handle field and returns the thread name.
379 *
380 * @returns Thread name (read only).
381 * @param phThread The thread handle field.
382 */
383static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
384{
385 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
386 if (!pThread)
387 return "<NIL>";
388 if (!VALID_PTR(pThread))
389 return "<INVALID>";
390 if (pThread->u32Magic != RTTHREADINT_MAGIC)
391 return "<BAD-THREAD-MAGIC>";
392 return pThread->szName;
393}
394
395
396/**
397 * Launch a simple assertion like complaint w/ panic.
398 *
399 * @param SRC_POS The source position where call is being made from.
400 * @param pszWhat What we're complaining about.
401 * @param ... Format arguments.
402 */
403static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
404{
405 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
406 {
407 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
408 va_list va;
409 va_start(va, pszWhat);
410 RTAssertMsg2WeakV(pszWhat, va);
411 va_end(va);
412 }
413 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
414 RTAssertPanic();
415}
416
417
418/**
419 * Describes the class.
420 *
421 * @param pszPrefix Message prefix.
422 * @param pClass The class to complain about.
423 * @param uSubClass My sub-class.
424 * @param fVerbose Verbose description including relations to other
425 * classes.
426 */
427static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
428{
429 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
430 return;
431
432 /* Stringify the sub-class. */
433 const char *pszSubClass;
434 char szSubClass[32];
435 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
436 switch (uSubClass)
437 {
438 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
439 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
440 default:
441 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
442 pszSubClass = szSubClass;
443 break;
444 }
445 else
446 {
447 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
448 pszSubClass = szSubClass;
449 }
450
451 /* Validate the class pointer. */
452 if (!VALID_PTR(pClass))
453 {
454 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
455 return;
456 }
457 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
458 {
459 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
460 return;
461 }
462
463 /* OK, dump the class info. */
464 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
465 pClass,
466 pClass->pszName,
467 pClass->CreatePos.pszFile,
468 pClass->CreatePos.uLine,
469 pClass->CreatePos.pszFunction,
470 pClass->CreatePos.uId,
471 pszSubClass);
472 if (fVerbose)
473 {
474 uint32_t i = 0;
475 uint32_t cPrinted = 0;
476 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
477 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
478 {
479 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
480 if (pCurClass != NIL_RTLOCKVALCLASS)
481 {
482 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
483 cPrinted == 0
484 ? "Prior:"
485 : " ",
486 i,
487 pCurClass->pszName,
488 pChunk->aRefs[j].fAutodidacticism
489 ? "autodidactic"
490 : "manually ",
491 pChunk->aRefs[j].cLookups,
492 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
493 cPrinted++;
494 }
495 }
496 if (!cPrinted)
497 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
498#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
499 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
500#endif
501 }
502 else
503 {
504 uint32_t cPrinted = 0;
505 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
506 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
507 {
508 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
509 if (pCurClass != NIL_RTLOCKVALCLASS)
510 {
511 if ((cPrinted % 10) == 0)
512 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
513 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
514 else if ((cPrinted % 10) != 9)
515 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
516 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
517 else
518 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
519 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
520 cPrinted++;
521 }
522 }
523 if (!cPrinted)
524 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
525 else if ((cPrinted % 10) != 0)
526 RTAssertMsg2AddWeak("\n");
527 }
528}
529
530
531/**
532 * Helper for getting the class name.
533 * @returns Class name string.
534 * @param pClass The class.
535 */
536static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
537{
538 if (!pClass)
539 return "<nil-class>";
540 if (!VALID_PTR(pClass))
541 return "<bad-class-ptr>";
542 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
543 return "<bad-class-magic>";
544 if (!pClass->pszName)
545 return "<no-class-name>";
546 return pClass->pszName;
547}
548
549/**
550 * Formats the sub-class.
551 *
552 * @returns Stringified sub-class.
553 * @param uSubClass The name.
554 * @param pszBuf Buffer that is big enough.
555 */
556static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
557{
558 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
559 switch (uSubClass)
560 {
561 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
562 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
563 default:
564 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
565 break;
566 }
567 else
568 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
569 return pszBuf;
570}
571
572
573/**
574 * Helper for rtLockValComplainAboutLock.
575 */
576DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
577 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
578 const char *pszFrameType)
579{
580 char szBuf[32];
581 switch (u32Magic)
582 {
583 case RTLOCKVALRECEXCL_MAGIC:
584#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
585 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
586 pRec->Excl.hLock, pRec->Excl.szName, pRec,
587 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
588 rtLockValComplainGetClassName(pRec->Excl.hClass),
589 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
590 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
591 pszFrameType, pszSuffix);
592#else
593 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
594 pRec->Excl.hLock, pRec->Excl.szName,
595 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
596 rtLockValComplainGetClassName(pRec->Excl.hClass),
597 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
598 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
599 pszFrameType, pszSuffix);
600#endif
601 break;
602
603 case RTLOCKVALRECSHRD_MAGIC:
604 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
605 pRec->Shared.hLock, pRec->Shared.szName, pRec,
606 rtLockValComplainGetClassName(pRec->Shared.hClass),
607 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
608 pszFrameType, pszSuffix);
609 break;
610
611 case RTLOCKVALRECSHRDOWN_MAGIC:
612 {
613 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
614 if ( VALID_PTR(pShared)
615 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
616#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
617 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
618 pShared->hLock, pShared->szName, pShared,
619 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
620 rtLockValComplainGetClassName(pShared->hClass),
621 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
622 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
623 pszSuffix, pszSuffix);
624#else
625 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
626 pShared->hLock, pShared->szName,
627 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
628 rtLockValComplainGetClassName(pShared->hClass),
629 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
630 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
631 pszFrameType, pszSuffix);
632#endif
633 else
634 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
635 pShared,
636 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
637 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
638 pszFrameType, pszSuffix);
639 break;
640 }
641
642 default:
643 AssertMsgFailed(("%#x\n", u32Magic));
644 }
645}
646
647
648/**
649 * Describes the lock.
650 *
651 * @param pszPrefix Message prefix.
652 * @param pRec The lock record we're working on.
653 * @param pszSuffix Message suffix.
654 */
655static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
656{
657#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
658# define FIX_REC(r) 1
659#else
660# define FIX_REC(r) (r)
661#endif
662 if ( VALID_PTR(pRec)
663 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
664 {
665 switch (pRec->Core.u32Magic)
666 {
667 case RTLOCKVALRECEXCL_MAGIC:
668 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
669 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
670 break;
671
672 case RTLOCKVALRECSHRD_MAGIC:
673 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
674 break;
675
676 case RTLOCKVALRECSHRDOWN_MAGIC:
677 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
678 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
679 break;
680
681 case RTLOCKVALRECNEST_MAGIC:
682 {
683 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
684 uint32_t u32Magic;
685 if ( VALID_PTR(pRealRec)
686 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
687 || u32Magic == RTLOCKVALRECSHRD_MAGIC
688 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
689 )
690 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
691 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
692 else
693 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
694 pRealRec, pRec, pRec->Nest.cRecursion,
695 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
696 pszSuffix);
697 break;
698 }
699
700 default:
701 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
702 break;
703 }
704 }
705#undef FIX_REC
706}
707
708
709/**
710 * Dump the lock stack.
711 *
712 * @param pThread The thread which lock stack we're gonna dump.
713 * @param cchIndent The indentation in chars.
714 * @param cMinFrames The minimum number of frames to consider
715 * dumping.
716 * @param pHighightRec Record that should be marked specially in the
717 * dump.
718 */
719static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
720 PRTLOCKVALRECUNION pHighightRec)
721{
722 if ( VALID_PTR(pThread)
723 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
724 && pThread->u32Magic == RTTHREADINT_MAGIC
725 )
726 {
727 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
728 if (cEntries >= cMinFrames)
729 {
730 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
731 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
732 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
733 for (uint32_t i = 0; VALID_PTR(pCur); i++)
734 {
735 char szPrefix[80];
736 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
737 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
738 switch (pCur->Core.u32Magic)
739 {
740 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
741 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
742 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
743 default:
744 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
745 pCur = NULL;
746 break;
747 }
748 }
749 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
750 }
751 }
752}
753
754
755/**
756 * Launch the initial complaint.
757 *
758 * @param pszWhat What we're complaining about.
759 * @param pSrcPos Where we are complaining from, as it were.
760 * @param pThreadSelf The calling thread.
761 * @param pRec The main lock involved. Can be NULL.
762 * @param fDumpStack Whether to dump the lock stack (true) or not
763 * (false).
764 */
765static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
766 PRTLOCKVALRECUNION pRec, bool fDumpStack)
767{
768 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
769 {
770 ASMCompilerBarrier(); /* paranoia */
771 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
772 if (pSrcPos && pSrcPos->uId)
773 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
774 else
775 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
776 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
777 if (fDumpStack)
778 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
779 }
780}
781
782
783/**
784 * Continue bitching.
785 *
786 * @param pszFormat Format string.
787 * @param ... Format arguments.
788 */
789static void rtLockValComplainMore(const char *pszFormat, ...)
790{
791 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
792 {
793 va_list va;
794 va_start(va, pszFormat);
795 RTAssertMsg2AddWeakV(pszFormat, va);
796 va_end(va);
797 }
798}
799
800
801/**
802 * Raise a panic if enabled.
803 */
804static void rtLockValComplainPanic(void)
805{
806 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
807 RTAssertPanic();
808}
809
810
811/**
812 * Copy a source position record.
813 *
814 * @param pDst The destination.
815 * @param pSrc The source. Can be NULL.
816 */
817DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
818{
819 if (pSrc)
820 {
821 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
822 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
823 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
824 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
825 }
826 else
827 {
828 ASMAtomicUoWriteU32(&pDst->uLine, 0);
829 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
830 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
831 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
832 }
833}
834
835
836/**
837 * Init a source position record.
838 *
839 * @param pSrcPos The source position record.
840 */
841DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
842{
843 pSrcPos->pszFile = NULL;
844 pSrcPos->pszFunction = NULL;
845 pSrcPos->uId = 0;
846 pSrcPos->uLine = 0;
847#if HC_ARCH_BITS == 64
848 pSrcPos->u32Padding = 0;
849#endif
850}
851
852
853/**
854 * Hashes the specified source position.
855 *
856 * @returns Hash.
857 * @param pSrcPos The source position record.
858 */
859static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
860{
861 uint32_t uHash;
862 if ( ( pSrcPos->pszFile
863 || pSrcPos->pszFunction)
864 && pSrcPos->uLine != 0)
865 {
866 uHash = 0;
867 if (pSrcPos->pszFile)
868 uHash = sdbmInc(pSrcPos->pszFile, uHash);
869 if (pSrcPos->pszFunction)
870 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
871 uHash += pSrcPos->uLine;
872 }
873 else
874 {
875 Assert(pSrcPos->uId);
876 uHash = (uint32_t)pSrcPos->uId;
877 }
878
879 return uHash;
880}
881
882
883/**
884 * Compares two source positions.
885 *
886 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
887 * otherwise.
888 * @param pSrcPos1 The first source position.
889 * @param pSrcPos2 The second source position.
890 */
891static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
892{
893 if (pSrcPos1->uLine != pSrcPos2->uLine)
894 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
895
896 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
897 if (iDiff != 0)
898 return iDiff;
899
900 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
901 if (iDiff != 0)
902 return iDiff;
903
904 if (pSrcPos1->uId != pSrcPos2->uId)
905 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
906 return 0;
907}
908
909
910
911/**
912 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
913 */
914DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
915{
916 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
917 if (hXRoads != NIL_RTSEMXROADS)
918 RTSemXRoadsNSEnter(hXRoads);
919}
920
921
922/**
923 * Call after rtLockValidatorSerializeDestructEnter.
924 */
925DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
926{
927 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
928 if (hXRoads != NIL_RTSEMXROADS)
929 RTSemXRoadsNSLeave(hXRoads);
930}
931
932
933/**
934 * Serializes deadlock detection against destruction of the objects being
935 * inspected.
936 */
937DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
938{
939 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
940 if (hXRoads != NIL_RTSEMXROADS)
941 RTSemXRoadsEWEnter(hXRoads);
942}
943
944
945/**
946 * Call after rtLockValidatorSerializeDetectionEnter.
947 */
948DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
949{
950 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
951 if (hXRoads != NIL_RTSEMXROADS)
952 RTSemXRoadsEWLeave(hXRoads);
953}
954
955
956/**
957 * Initializes the per thread lock validator data.
958 *
959 * @param pPerThread The data.
960 */
961DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
962{
963 pPerThread->bmFreeShrdOwners = UINT32_MAX;
964
965 /* ASSUMES the rest has already been zeroed. */
966 Assert(pPerThread->pRec == NULL);
967 Assert(pPerThread->cWriteLocks == 0);
968 Assert(pPerThread->cReadLocks == 0);
969 Assert(pPerThread->fInValidator == false);
970 Assert(pPerThread->pStackTop == NULL);
971}
972
973
974/**
975 * Delete the per thread lock validator data.
976 *
977 * @param pPerThread The data.
978 */
979DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
980{
981 /*
982 * Check that the thread doesn't own any locks at this time.
983 */
984 if (pPerThread->pStackTop)
985 {
986 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
987 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
988 pPerThread->pStackTop, true);
989 rtLockValComplainPanic();
990 }
991
992 /*
993 * Free the recursion records.
994 */
995 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
996 pPerThread->pFreeNestRecs = NULL;
997 while (pCur)
998 {
999 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1000 RTMemFree(pCur);
1001 pCur = pNext;
1002 }
1003}
1004
1005RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1006 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1007 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1008 const char *pszNameFmt, ...)
1009{
1010 va_list va;
1011 va_start(va, pszNameFmt);
1012 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1013 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1014 va_end(va);
1015 return rc;
1016}
1017
1018
1019RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1020 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1021 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1022 const char *pszNameFmt, va_list va)
1023{
1024 Assert(cMsMinDeadlock >= 1);
1025 Assert(cMsMinOrder >= 1);
1026 AssertPtr(pSrcPos);
1027
1028 /*
1029 * Format the name and calc its length.
1030 */
1031 size_t cbName;
1032 char szName[32];
1033 if (pszNameFmt && *pszNameFmt)
1034 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1035 else
1036 {
1037 static uint32_t volatile s_cAnonymous = 0;
1038 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1039 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1040 }
1041
1042 /*
1043 * Figure out the file and function name lengths and allocate memory for
1044 * it all.
1045 */
1046 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1047 size_t const cbFunction = pSrcPos->pszFunction ? strlen(pSrcPos->pszFunction) + 1 : 0;
1048 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVarTag(sizeof(*pThis) + cbFile + cbFunction + cbName,
1049 "may-leak:RTLockValidatorClassCreateExV");
1050 if (!pThis)
1051 return VERR_NO_MEMORY;
1052 RTMEM_MAY_LEAK(pThis);
1053
1054 /*
1055 * Initialize the class data.
1056 */
1057 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1058 pThis->Core.uchHeight = 0;
1059 pThis->Core.pLeft = NULL;
1060 pThis->Core.pRight = NULL;
1061 pThis->Core.pList = NULL;
1062 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1063 pThis->cRefs = 1;
1064 pThis->fAutodidact = fAutodidact;
1065 pThis->fRecursionOk = fRecursionOk;
1066 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1067 pThis->fInTree = false;
1068 pThis->fDonateRefToNextRetainer = false;
1069 pThis->afReserved[0] = false;
1070 pThis->afReserved[1] = false;
1071 pThis->afReserved[2] = false;
1072 pThis->cMsMinDeadlock = cMsMinDeadlock;
1073 pThis->cMsMinOrder = cMsMinOrder;
1074 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1075 pThis->au32Reserved[i] = 0;
1076 for (unsigned i = 0; i < RT_ELEMENTS(pThis->PriorLocks.aRefs); i++)
1077 {
1078 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1079 pThis->PriorLocks.aRefs[i].cLookups = 0;
1080 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1081 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1082 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1083 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1084 }
1085 pThis->PriorLocks.pNext = NULL;
1086 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1087 pThis->apPriorLocksHash[i] = NULL;
1088 char *pszDst = (char *)(pThis + 1);
1089 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1090 pszDst += cbName;
1091 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1092 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1093 pszDst += cbFile;
1094 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1095 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1096#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1097 pThis->cHashHits = 0;
1098 pThis->cHashMisses = 0;
1099#endif
1100
1101 *phClass = pThis;
1102 return VINF_SUCCESS;
1103}
1104
1105
1106RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1107{
1108 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1109 va_list va;
1110 va_start(va, pszNameFmt);
1111 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1112 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1113 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1114 pszNameFmt, va);
1115 va_end(va);
1116 return rc;
1117}
1118
1119
1120/**
1121 * Creates a new lock validator class with a reference that is consumed by the
1122 * first call to RTLockValidatorClassRetain.
1123 *
1124 * This is tailored for use in the parameter list of a semaphore constructor.
1125 *
1126 * @returns Class handle with a reference that is automatically consumed by the
1127 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1128 *
1129 * @param SRC_POS The source position where call is being made from.
1130 * Use RT_SRC_POS when possible. Optional.
1131 * @param pszNameFmt Class name format string, optional (NULL). Max
1132 * length is 32 bytes.
1133 * @param ... Format string arguments.
1134 */
1135RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1136{
1137 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1138 RTLOCKVALCLASSINT *pClass;
1139 va_list va;
1140 va_start(va, pszNameFmt);
1141 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1142 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1143 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1144 pszNameFmt, va);
1145 va_end(va);
1146 if (RT_FAILURE(rc))
1147 return NIL_RTLOCKVALCLASS;
1148 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1149 return pClass;
1150}
1151
1152
1153/**
1154 * Internal class retainer.
1155 * @returns The new reference count.
1156 * @param pClass The class.
1157 */
1158DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1159{
1160 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1161 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1162 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1163 else if ( cRefs == 2
1164 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1165 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1166 return cRefs;
1167}
1168
1169
1170/**
1171 * Validates and retains a lock validator class.
1172 *
1173 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1174 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1175 */
1176DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1177{
1178 if (hClass == NIL_RTLOCKVALCLASS)
1179 return hClass;
1180 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1181 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1182 rtLockValidatorClassRetain(hClass);
1183 return hClass;
1184}
1185
1186
1187/**
1188 * Internal class releaser.
1189 * @returns The new reference count.
1190 * @param pClass The class.
1191 */
1192DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1193{
1194 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1195 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1196 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1197 else if (!cRefs)
1198 rtLockValidatorClassDestroy(pClass);
1199 return cRefs;
1200}
1201
1202
1203/**
1204 * Destroys a class once there are not more references to it.
1205 *
1206 * @param pClass The class.
1207 */
1208static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1209{
1210 AssertReturnVoid(!pClass->fInTree);
1211 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1212
1213 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1214 while (pChunk)
1215 {
1216 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1217 {
1218 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1219 if (pClass2 != NIL_RTLOCKVALCLASS)
1220 {
1221 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1222 rtLockValidatorClassRelease(pClass2);
1223 }
1224 }
1225
1226 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1227 pChunk->pNext = NULL;
1228 if (pChunk != &pClass->PriorLocks)
1229 RTMemFree(pChunk);
1230 pChunk = pNext;
1231 }
1232
1233 RTMemFree(pClass);
1234}
1235
1236
1237RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1238{
1239 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1240 rtLockValidatorLazyInit();
1241 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1242
1243 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1244 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1245 while (pClass)
1246 {
1247 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1248 break;
1249 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1250 }
1251
1252 if (RT_SUCCESS(rcLock))
1253 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1254 return pClass;
1255}
1256
1257
1258RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1259{
1260 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1261 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1262 if (hClass == NIL_RTLOCKVALCLASS)
1263 {
1264 /*
1265 * Create a new class and insert it into the tree.
1266 */
1267 va_list va;
1268 va_start(va, pszNameFmt);
1269 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1270 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1271 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1272 pszNameFmt, va);
1273 va_end(va);
1274 if (RT_SUCCESS(rc))
1275 {
1276 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1277 rtLockValidatorLazyInit();
1278 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1279
1280 Assert(!hClass->fInTree);
1281 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1282 Assert(hClass->fInTree);
1283
1284 if (RT_SUCCESS(rcLock))
1285 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1286 return hClass;
1287 }
1288 }
1289 return hClass;
1290}
1291
1292
1293RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1294{
1295 RTLOCKVALCLASSINT *pClass = hClass;
1296 AssertPtrReturn(pClass, UINT32_MAX);
1297 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1298 return rtLockValidatorClassRetain(pClass);
1299}
1300
1301
1302RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1303{
1304 RTLOCKVALCLASSINT *pClass = hClass;
1305 if (pClass == NIL_RTLOCKVALCLASS)
1306 return 0;
1307 AssertPtrReturn(pClass, UINT32_MAX);
1308 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1309 return rtLockValidatorClassRelease(pClass);
1310}
1311
1312
1313/**
1314 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1315 * all the chunks for @a pPriorClass.
1316 *
1317 * @returns true / false.
1318 * @param pClass The class to search.
1319 * @param pPriorClass The class to search for.
1320 */
1321static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1322{
1323 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1324 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1325 {
1326 if (pChunk->aRefs[i].hClass == pPriorClass)
1327 {
1328 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1329 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1330 {
1331 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1332 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1333 }
1334
1335 /* update the hash table entry. */
1336 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1337 if ( !(*ppHashEntry)
1338 || (*ppHashEntry)->cLookups + 128 < cLookups)
1339 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1340
1341#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1342 ASMAtomicIncU32(&pClass->cHashMisses);
1343#endif
1344 return true;
1345 }
1346 }
1347
1348 return false;
1349}
1350
1351
1352/**
1353 * Checks if @a pPriorClass is a known prior class.
1354 *
1355 * @returns true / false.
1356 * @param pClass The class to search.
1357 * @param pPriorClass The class to search for.
1358 */
1359DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1360{
1361 /*
1362 * Hash lookup here.
1363 */
1364 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1365 if ( pRef
1366 && pRef->hClass == pPriorClass)
1367 {
1368 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1369 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1370 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1371#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1372 ASMAtomicIncU32(&pClass->cHashHits);
1373#endif
1374 return true;
1375 }
1376
1377 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1378}
1379
1380
1381/**
1382 * Adds a class to the prior list.
1383 *
1384 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1385 * @param pClass The class to work on.
1386 * @param pPriorClass The class to add.
1387 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1388 * somebody is teaching us via the API (false).
1389 * @param pSrcPos Where this rule was added (optional).
1390 */
1391static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1392 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1393{
1394 NOREF(pSrcPos);
1395 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1396 rtLockValidatorLazyInit();
1397 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1398
1399 /*
1400 * Check that there are no conflict (no assert since we might race each other).
1401 */
1402 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1403 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1404 {
1405 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1406 {
1407 /*
1408 * Scan the table for a free entry, allocating a new chunk if necessary.
1409 */
1410 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1411 {
1412 bool fDone = false;
1413 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1414 {
1415 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1416 if (fDone)
1417 {
1418 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1419 rtLockValidatorClassRetain(pPriorClass);
1420 rc = VINF_SUCCESS;
1421 break;
1422 }
1423 }
1424 if (fDone)
1425 break;
1426
1427 /* If no more chunks, allocate a new one and insert the class before linking it. */
1428 if (!pChunk->pNext)
1429 {
1430 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1431 if (!pNew)
1432 {
1433 rc = VERR_NO_MEMORY;
1434 break;
1435 }
1436 RTMEM_MAY_LEAK(pNew);
1437 pNew->pNext = NULL;
1438 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1439 {
1440 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1441 pNew->aRefs[i].cLookups = 0;
1442 pNew->aRefs[i].fAutodidacticism = false;
1443 pNew->aRefs[i].afReserved[0] = false;
1444 pNew->aRefs[i].afReserved[1] = false;
1445 pNew->aRefs[i].afReserved[2] = false;
1446 }
1447
1448 pNew->aRefs[0].hClass = pPriorClass;
1449 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1450
1451 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1452 rtLockValidatorClassRetain(pPriorClass);
1453 rc = VINF_SUCCESS;
1454 break;
1455 }
1456 } /* chunk loop */
1457 }
1458 else
1459 rc = VINF_SUCCESS;
1460 }
1461 else
1462 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1463
1464 if (RT_SUCCESS(rcLock))
1465 RTCritSectLeave(&g_LockValClassTeachCS);
1466 return rc;
1467}
1468
1469
1470RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1471{
1472 RTLOCKVALCLASSINT *pClass = hClass;
1473 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1474 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1475
1476 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1477 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1478 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1479
1480 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1481}
1482
1483
1484RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1485{
1486 RTLOCKVALCLASSINT *pClass = hClass;
1487 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1488 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1489
1490 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1491 return VINF_SUCCESS;
1492}
1493
1494
1495/**
1496 * Unlinks all siblings.
1497 *
1498 * This is used during record deletion and assumes no races.
1499 *
1500 * @param pCore One of the siblings.
1501 */
1502static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1503{
1504 /* ASSUMES sibling destruction doesn't involve any races and that all
1505 related records are to be disposed off now. */
1506 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1507 while (pSibling)
1508 {
1509 PRTLOCKVALRECUNION volatile *ppCoreNext;
1510 switch (pSibling->Core.u32Magic)
1511 {
1512 case RTLOCKVALRECEXCL_MAGIC:
1513 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1514 ppCoreNext = &pSibling->Excl.pSibling;
1515 break;
1516
1517 case RTLOCKVALRECSHRD_MAGIC:
1518 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1519 ppCoreNext = &pSibling->Shared.pSibling;
1520 break;
1521
1522 default:
1523 AssertFailed();
1524 ppCoreNext = NULL;
1525 break;
1526 }
1527 if (RT_UNLIKELY(ppCoreNext))
1528 break;
1529 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1530 }
1531}
1532
1533
1534RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1535{
1536 /*
1537 * Validate input.
1538 */
1539 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1540 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1541
1542 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1543 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1544 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1545 , VERR_SEM_LV_INVALID_PARAMETER);
1546
1547 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1548 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1549 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1550 , VERR_SEM_LV_INVALID_PARAMETER);
1551
1552 /*
1553 * Link them (circular list).
1554 */
1555 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1556 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1557 {
1558 p1->Excl.pSibling = p2;
1559 p2->Shared.pSibling = p1;
1560 }
1561 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1562 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1563 {
1564 p1->Shared.pSibling = p2;
1565 p2->Excl.pSibling = p1;
1566 }
1567 else
1568 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1569
1570 return VINF_SUCCESS;
1571}
1572
1573
1574#if 0 /* unused */
1575/**
1576 * Gets the lock name for the given record.
1577 *
1578 * @returns Read-only lock name.
1579 * @param pRec The lock record.
1580 */
1581DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1582{
1583 switch (pRec->Core.u32Magic)
1584 {
1585 case RTLOCKVALRECEXCL_MAGIC:
1586 return pRec->Excl.szName;
1587 case RTLOCKVALRECSHRD_MAGIC:
1588 return pRec->Shared.szName;
1589 case RTLOCKVALRECSHRDOWN_MAGIC:
1590 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1591 case RTLOCKVALRECNEST_MAGIC:
1592 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1593 if (VALID_PTR(pRec))
1594 {
1595 switch (pRec->Core.u32Magic)
1596 {
1597 case RTLOCKVALRECEXCL_MAGIC:
1598 return pRec->Excl.szName;
1599 case RTLOCKVALRECSHRD_MAGIC:
1600 return pRec->Shared.szName;
1601 case RTLOCKVALRECSHRDOWN_MAGIC:
1602 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1603 default:
1604 return "unknown-nested";
1605 }
1606 }
1607 return "orphaned-nested";
1608 default:
1609 return "unknown";
1610 }
1611}
1612#endif /* unused */
1613
1614
1615#if 0 /* unused */
1616/**
1617 * Gets the class for this locking record.
1618 *
1619 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1620 * @param pRec The lock validator record.
1621 */
1622DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1623{
1624 switch (pRec->Core.u32Magic)
1625 {
1626 case RTLOCKVALRECEXCL_MAGIC:
1627 return pRec->Excl.hClass;
1628
1629 case RTLOCKVALRECSHRD_MAGIC:
1630 return pRec->Shared.hClass;
1631
1632 case RTLOCKVALRECSHRDOWN_MAGIC:
1633 {
1634 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1635 if (RT_LIKELY( VALID_PTR(pSharedRec)
1636 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1637 return pSharedRec->hClass;
1638 return NIL_RTLOCKVALCLASS;
1639 }
1640
1641 case RTLOCKVALRECNEST_MAGIC:
1642 {
1643 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1644 if (VALID_PTR(pRealRec))
1645 {
1646 switch (pRealRec->Core.u32Magic)
1647 {
1648 case RTLOCKVALRECEXCL_MAGIC:
1649 return pRealRec->Excl.hClass;
1650
1651 case RTLOCKVALRECSHRDOWN_MAGIC:
1652 {
1653 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1654 if (RT_LIKELY( VALID_PTR(pSharedRec)
1655 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1656 return pSharedRec->hClass;
1657 break;
1658 }
1659
1660 default:
1661 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1662 break;
1663 }
1664 }
1665 return NIL_RTLOCKVALCLASS;
1666 }
1667
1668 default:
1669 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1670 return NIL_RTLOCKVALCLASS;
1671 }
1672}
1673#endif /* unused */
1674
1675/**
1676 * Gets the class for this locking record and the pointer to the one below it in
1677 * the stack.
1678 *
1679 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1680 * @param pRec The lock validator record.
1681 * @param puSubClass Where to return the sub-class.
1682 * @param ppDown Where to return the pointer to the record below.
1683 */
1684DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1685rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1686{
1687 switch (pRec->Core.u32Magic)
1688 {
1689 case RTLOCKVALRECEXCL_MAGIC:
1690 *ppDown = pRec->Excl.pDown;
1691 *puSubClass = pRec->Excl.uSubClass;
1692 return pRec->Excl.hClass;
1693
1694 case RTLOCKVALRECSHRD_MAGIC:
1695 *ppDown = NULL;
1696 *puSubClass = pRec->Shared.uSubClass;
1697 return pRec->Shared.hClass;
1698
1699 case RTLOCKVALRECSHRDOWN_MAGIC:
1700 {
1701 *ppDown = pRec->ShrdOwner.pDown;
1702
1703 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1704 if (RT_LIKELY( VALID_PTR(pSharedRec)
1705 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1706 {
1707 *puSubClass = pSharedRec->uSubClass;
1708 return pSharedRec->hClass;
1709 }
1710 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1711 return NIL_RTLOCKVALCLASS;
1712 }
1713
1714 case RTLOCKVALRECNEST_MAGIC:
1715 {
1716 *ppDown = pRec->Nest.pDown;
1717
1718 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1719 if (VALID_PTR(pRealRec))
1720 {
1721 switch (pRealRec->Core.u32Magic)
1722 {
1723 case RTLOCKVALRECEXCL_MAGIC:
1724 *puSubClass = pRealRec->Excl.uSubClass;
1725 return pRealRec->Excl.hClass;
1726
1727 case RTLOCKVALRECSHRDOWN_MAGIC:
1728 {
1729 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1730 if (RT_LIKELY( VALID_PTR(pSharedRec)
1731 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1732 {
1733 *puSubClass = pSharedRec->uSubClass;
1734 return pSharedRec->hClass;
1735 }
1736 break;
1737 }
1738
1739 default:
1740 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1741 break;
1742 }
1743 }
1744 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1745 return NIL_RTLOCKVALCLASS;
1746 }
1747
1748 default:
1749 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1750 *ppDown = NULL;
1751 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1752 return NIL_RTLOCKVALCLASS;
1753 }
1754}
1755
1756
1757/**
1758 * Gets the sub-class for a lock record.
1759 *
1760 * @returns the sub-class.
1761 * @param pRec The lock validator record.
1762 */
1763DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1764{
1765 switch (pRec->Core.u32Magic)
1766 {
1767 case RTLOCKVALRECEXCL_MAGIC:
1768 return pRec->Excl.uSubClass;
1769
1770 case RTLOCKVALRECSHRD_MAGIC:
1771 return pRec->Shared.uSubClass;
1772
1773 case RTLOCKVALRECSHRDOWN_MAGIC:
1774 {
1775 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1776 if (RT_LIKELY( VALID_PTR(pSharedRec)
1777 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1778 return pSharedRec->uSubClass;
1779 return RTLOCKVAL_SUB_CLASS_NONE;
1780 }
1781
1782 case RTLOCKVALRECNEST_MAGIC:
1783 {
1784 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1785 if (VALID_PTR(pRealRec))
1786 {
1787 switch (pRealRec->Core.u32Magic)
1788 {
1789 case RTLOCKVALRECEXCL_MAGIC:
1790 return pRec->Excl.uSubClass;
1791
1792 case RTLOCKVALRECSHRDOWN_MAGIC:
1793 {
1794 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1795 if (RT_LIKELY( VALID_PTR(pSharedRec)
1796 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1797 return pSharedRec->uSubClass;
1798 break;
1799 }
1800
1801 default:
1802 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1803 break;
1804 }
1805 }
1806 return RTLOCKVAL_SUB_CLASS_NONE;
1807 }
1808
1809 default:
1810 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1811 return RTLOCKVAL_SUB_CLASS_NONE;
1812 }
1813}
1814
1815
1816
1817
1818/**
1819 * Calculates the depth of a lock stack.
1820 *
1821 * @returns Number of stack frames.
1822 * @param pThread The thread.
1823 */
1824static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1825{
1826 uint32_t cEntries = 0;
1827 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1828 while (VALID_PTR(pCur))
1829 {
1830 switch (pCur->Core.u32Magic)
1831 {
1832 case RTLOCKVALRECEXCL_MAGIC:
1833 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1834 break;
1835
1836 case RTLOCKVALRECSHRDOWN_MAGIC:
1837 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1838 break;
1839
1840 case RTLOCKVALRECNEST_MAGIC:
1841 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1842 break;
1843
1844 default:
1845 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1846 }
1847 cEntries++;
1848 }
1849 return cEntries;
1850}
1851
1852
1853#ifdef RT_STRICT
1854/**
1855 * Checks if the stack contains @a pRec.
1856 *
1857 * @returns true / false.
1858 * @param pThreadSelf The current thread.
1859 * @param pRec The lock record.
1860 */
1861static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1862{
1863 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1864 while (pCur)
1865 {
1866 AssertPtrReturn(pCur, false);
1867 if (pCur == pRec)
1868 return true;
1869 switch (pCur->Core.u32Magic)
1870 {
1871 case RTLOCKVALRECEXCL_MAGIC:
1872 Assert(pCur->Excl.cRecursion >= 1);
1873 pCur = pCur->Excl.pDown;
1874 break;
1875
1876 case RTLOCKVALRECSHRDOWN_MAGIC:
1877 Assert(pCur->ShrdOwner.cRecursion >= 1);
1878 pCur = pCur->ShrdOwner.pDown;
1879 break;
1880
1881 case RTLOCKVALRECNEST_MAGIC:
1882 Assert(pCur->Nest.cRecursion > 1);
1883 pCur = pCur->Nest.pDown;
1884 break;
1885
1886 default:
1887 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1888 }
1889 }
1890 return false;
1891}
1892#endif /* RT_STRICT */
1893
1894
1895/**
1896 * Pushes a lock record onto the stack.
1897 *
1898 * @param pThreadSelf The current thread.
1899 * @param pRec The lock record.
1900 */
1901static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1902{
1903 Assert(pThreadSelf == RTThreadSelf());
1904 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1905
1906 switch (pRec->Core.u32Magic)
1907 {
1908 case RTLOCKVALRECEXCL_MAGIC:
1909 Assert(pRec->Excl.cRecursion == 1);
1910 Assert(pRec->Excl.pDown == NULL);
1911 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1912 break;
1913
1914 case RTLOCKVALRECSHRDOWN_MAGIC:
1915 Assert(pRec->ShrdOwner.cRecursion == 1);
1916 Assert(pRec->ShrdOwner.pDown == NULL);
1917 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1918 break;
1919
1920 default:
1921 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1922 }
1923 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1924}
1925
1926
1927/**
1928 * Pops a lock record off the stack.
1929 *
1930 * @param pThreadSelf The current thread.
1931 * @param pRec The lock.
1932 */
1933static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1934{
1935 Assert(pThreadSelf == RTThreadSelf());
1936
1937 PRTLOCKVALRECUNION pDown;
1938 switch (pRec->Core.u32Magic)
1939 {
1940 case RTLOCKVALRECEXCL_MAGIC:
1941 Assert(pRec->Excl.cRecursion == 0);
1942 pDown = pRec->Excl.pDown;
1943 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1944 break;
1945
1946 case RTLOCKVALRECSHRDOWN_MAGIC:
1947 Assert(pRec->ShrdOwner.cRecursion == 0);
1948 pDown = pRec->ShrdOwner.pDown;
1949 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1950 break;
1951
1952 default:
1953 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1954 }
1955 if (pThreadSelf->LockValidator.pStackTop == pRec)
1956 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1957 else
1958 {
1959 /* Find the pointer to our record and unlink ourselves. */
1960 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1961 while (pCur)
1962 {
1963 PRTLOCKVALRECUNION volatile *ppDown;
1964 switch (pCur->Core.u32Magic)
1965 {
1966 case RTLOCKVALRECEXCL_MAGIC:
1967 Assert(pCur->Excl.cRecursion >= 1);
1968 ppDown = &pCur->Excl.pDown;
1969 break;
1970
1971 case RTLOCKVALRECSHRDOWN_MAGIC:
1972 Assert(pCur->ShrdOwner.cRecursion >= 1);
1973 ppDown = &pCur->ShrdOwner.pDown;
1974 break;
1975
1976 case RTLOCKVALRECNEST_MAGIC:
1977 Assert(pCur->Nest.cRecursion >= 1);
1978 ppDown = &pCur->Nest.pDown;
1979 break;
1980
1981 default:
1982 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1983 }
1984 pCur = *ppDown;
1985 if (pCur == pRec)
1986 {
1987 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1988 return;
1989 }
1990 }
1991 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
1992 }
1993}
1994
1995
1996/**
1997 * Creates and pushes lock recursion record onto the stack.
1998 *
1999 * @param pThreadSelf The current thread.
2000 * @param pRec The lock record.
2001 * @param pSrcPos Where the recursion occurred.
2002 */
2003static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2004{
2005 Assert(pThreadSelf == RTThreadSelf());
2006 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2007
2008#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2009 /*
2010 * Allocate a new recursion record
2011 */
2012 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2013 if (pRecursionRec)
2014 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2015 else
2016 {
2017 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2018 if (!pRecursionRec)
2019 return;
2020 }
2021
2022 /*
2023 * Initialize it.
2024 */
2025 switch (pRec->Core.u32Magic)
2026 {
2027 case RTLOCKVALRECEXCL_MAGIC:
2028 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2029 break;
2030
2031 case RTLOCKVALRECSHRDOWN_MAGIC:
2032 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2033 break;
2034
2035 default:
2036 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2037 rtLockValidatorSerializeDestructEnter();
2038 rtLockValidatorSerializeDestructLeave();
2039 RTMemFree(pRecursionRec);
2040 return;
2041 }
2042 Assert(pRecursionRec->cRecursion > 1);
2043 pRecursionRec->pRec = pRec;
2044 pRecursionRec->pDown = NULL;
2045 pRecursionRec->pNextFree = NULL;
2046 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2047 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2048
2049 /*
2050 * Link it.
2051 */
2052 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2053 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2054#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2055}
2056
2057
2058/**
2059 * Pops a lock recursion record off the stack.
2060 *
2061 * @param pThreadSelf The current thread.
2062 * @param pRec The lock record.
2063 */
2064static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2065{
2066 Assert(pThreadSelf == RTThreadSelf());
2067 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2068
2069 uint32_t cRecursion;
2070 switch (pRec->Core.u32Magic)
2071 {
2072 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2073 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2074 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2075 }
2076 Assert(cRecursion >= 1);
2077
2078#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2079 /*
2080 * Pop the recursion record.
2081 */
2082 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2083 if ( pNest != NULL
2084 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2085 && pNest->Nest.pRec == pRec
2086 )
2087 {
2088 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2089 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2090 }
2091 else
2092 {
2093 /* Find the record above ours. */
2094 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2095 for (;;)
2096 {
2097 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2098 switch (pNest->Core.u32Magic)
2099 {
2100 case RTLOCKVALRECEXCL_MAGIC:
2101 ppDown = &pNest->Excl.pDown;
2102 pNest = *ppDown;
2103 continue;
2104 case RTLOCKVALRECSHRDOWN_MAGIC:
2105 ppDown = &pNest->ShrdOwner.pDown;
2106 pNest = *ppDown;
2107 continue;
2108 case RTLOCKVALRECNEST_MAGIC:
2109 if (pNest->Nest.pRec == pRec)
2110 break;
2111 ppDown = &pNest->Nest.pDown;
2112 pNest = *ppDown;
2113 continue;
2114 default:
2115 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2116 }
2117 break; /* ugly */
2118 }
2119 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2120 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2121 }
2122
2123 /*
2124 * Invalidate and free the record.
2125 */
2126 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2127 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2128 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2129 pNest->Nest.cRecursion = 0;
2130 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2131 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2132#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2133}
2134
2135
2136/**
2137 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2138 * returns VERR_SEM_LV_WRONG_ORDER.
2139 */
2140static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2141 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2142 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2143
2144
2145{
2146 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2147 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2148 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2149 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2150 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2151 rtLockValComplainPanic();
2152 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2153}
2154
2155
2156/**
2157 * Checks if the sub-class order is ok or not.
2158 *
2159 * Used to deal with two locks from the same class.
2160 *
2161 * @returns true if ok, false if not.
2162 * @param uSubClass1 The sub-class of the lock that is being
2163 * considered.
2164 * @param uSubClass2 The sub-class of the lock that is already being
2165 * held.
2166 */
2167DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2168{
2169 if (uSubClass1 > uSubClass2)
2170 {
2171 /* NONE kills ANY. */
2172 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2173 return false;
2174 return true;
2175 }
2176
2177 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2178 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2179 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2180 return true;
2181 return false;
2182}
2183
2184
2185/**
2186 * Checks if the class and sub-class lock order is ok.
2187 *
2188 * @returns true if ok, false if not.
2189 * @param pClass1 The class of the lock that is being considered.
2190 * @param uSubClass1 The sub-class that goes with @a pClass1.
2191 * @param pClass2 The class of the lock that is already being
2192 * held.
2193 * @param uSubClass2 The sub-class that goes with @a pClass2.
2194 */
2195DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2196 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2197{
2198 if (pClass1 == pClass2)
2199 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2200 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2201}
2202
2203
2204/**
2205 * Checks the locking order, part two.
2206 *
2207 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2208 * @param pClass The lock class.
2209 * @param uSubClass The lock sub-class.
2210 * @param pThreadSelf The current thread.
2211 * @param pRec The lock record.
2212 * @param pSrcPos The source position of the locking operation.
2213 * @param pFirstBadClass The first bad class.
2214 * @param pFirstBadRec The first bad lock record.
2215 * @param pFirstBadDown The next record on the lock stack.
2216 */
2217static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2218 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2219 PCRTLOCKVALSRCPOS const pSrcPos,
2220 RTLOCKVALCLASSINT * const pFirstBadClass,
2221 PRTLOCKVALRECUNION const pFirstBadRec,
2222 PRTLOCKVALRECUNION const pFirstBadDown)
2223{
2224 /*
2225 * Something went wrong, pCur is pointing to where.
2226 */
2227 if ( pClass == pFirstBadClass
2228 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2229 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2230 pRec, pFirstBadRec, pClass, pFirstBadClass);
2231 if (!pClass->fAutodidact)
2232 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2233 pRec, pFirstBadRec, pClass, pFirstBadClass);
2234
2235 /*
2236 * This class is an autodidact, so we have to check out the rest of the stack
2237 * for direct violations.
2238 */
2239 uint32_t cNewRules = 1;
2240 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2241 while (pCur)
2242 {
2243 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2244
2245 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2246 pCur = pCur->Nest.pDown;
2247 else
2248 {
2249 PRTLOCKVALRECUNION pDown;
2250 uint32_t uPriorSubClass;
2251 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2252 if (pPriorClass != NIL_RTLOCKVALCLASS)
2253 {
2254 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2255 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2256 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2257 {
2258 if ( pClass == pPriorClass
2259 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2260 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2261 pRec, pCur, pClass, pPriorClass);
2262 cNewRules++;
2263 }
2264 }
2265 pCur = pDown;
2266 }
2267 }
2268
2269 if (cNewRules == 1)
2270 {
2271 /*
2272 * Special case the simple operation, hoping that it will be a
2273 * frequent case.
2274 */
2275 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2276 if (rc == VERR_SEM_LV_WRONG_ORDER)
2277 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2278 pRec, pFirstBadRec, pClass, pFirstBadClass);
2279 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2280 }
2281 else
2282 {
2283 /*
2284 * We may be adding more than one rule, so we have to take the lock
2285 * before starting to add the rules. This means we have to check
2286 * the state after taking it since we might be racing someone adding
2287 * a conflicting rule.
2288 */
2289 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2290 rtLockValidatorLazyInit();
2291 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2292
2293 /* Check */
2294 pCur = pFirstBadRec;
2295 while (pCur)
2296 {
2297 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2298 pCur = pCur->Nest.pDown;
2299 else
2300 {
2301 uint32_t uPriorSubClass;
2302 PRTLOCKVALRECUNION pDown;
2303 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2304 if (pPriorClass != NIL_RTLOCKVALCLASS)
2305 {
2306 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2307 {
2308 if ( pClass == pPriorClass
2309 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2310 {
2311 if (RT_SUCCESS(rcLock))
2312 RTCritSectLeave(&g_LockValClassTeachCS);
2313 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2314 pRec, pCur, pClass, pPriorClass);
2315 }
2316 }
2317 }
2318 pCur = pDown;
2319 }
2320 }
2321
2322 /* Iterate the stack yet again, adding new rules this time. */
2323 pCur = pFirstBadRec;
2324 while (pCur)
2325 {
2326 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2327 pCur = pCur->Nest.pDown;
2328 else
2329 {
2330 uint32_t uPriorSubClass;
2331 PRTLOCKVALRECUNION pDown;
2332 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2333 if (pPriorClass != NIL_RTLOCKVALCLASS)
2334 {
2335 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2336 {
2337 Assert( pClass != pPriorClass
2338 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2339 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2340 if (RT_FAILURE(rc))
2341 {
2342 Assert(rc == VERR_NO_MEMORY);
2343 break;
2344 }
2345 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2346 }
2347 }
2348 pCur = pDown;
2349 }
2350 }
2351
2352 if (RT_SUCCESS(rcLock))
2353 RTCritSectLeave(&g_LockValClassTeachCS);
2354 }
2355
2356 return VINF_SUCCESS;
2357}
2358
2359
2360
2361/**
2362 * Checks the locking order.
2363 *
2364 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2365 * @param pClass The lock class.
2366 * @param uSubClass The lock sub-class.
2367 * @param pThreadSelf The current thread.
2368 * @param pRec The lock record.
2369 * @param pSrcPos The source position of the locking operation.
2370 */
2371static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2372 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2373 PCRTLOCKVALSRCPOS pSrcPos)
2374{
2375 /*
2376 * Some internal paranoia first.
2377 */
2378 AssertPtr(pClass);
2379 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2380 AssertPtr(pThreadSelf);
2381 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2382 AssertPtr(pRec);
2383 AssertPtrNull(pSrcPos);
2384
2385 /*
2386 * Walk the stack, delegate problems to a worker routine.
2387 */
2388 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2389 if (!pCur)
2390 return VINF_SUCCESS;
2391
2392 for (;;)
2393 {
2394 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2395
2396 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2397 pCur = pCur->Nest.pDown;
2398 else
2399 {
2400 uint32_t uPriorSubClass;
2401 PRTLOCKVALRECUNION pDown;
2402 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2403 if (pPriorClass != NIL_RTLOCKVALCLASS)
2404 {
2405 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2406 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2407 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2408 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2409 pPriorClass, pCur, pDown);
2410 }
2411 pCur = pDown;
2412 }
2413 if (!pCur)
2414 return VINF_SUCCESS;
2415 }
2416}
2417
2418
2419/**
2420 * Check that the lock record is the topmost one on the stack, complain and fail
2421 * if it isn't.
2422 *
2423 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2424 * VERR_SEM_LV_INVALID_PARAMETER.
2425 * @param pThreadSelf The current thread.
2426 * @param pRec The record.
2427 */
2428static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2429{
2430 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2431 Assert(pThreadSelf == RTThreadSelf());
2432
2433 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2434 if (RT_LIKELY( pTop == pRec
2435 || ( pTop
2436 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2437 && pTop->Nest.pRec == pRec) ))
2438 return VINF_SUCCESS;
2439
2440#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2441 /* Look for a recursion record so the right frame is dumped and marked. */
2442 while (pTop)
2443 {
2444 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2445 {
2446 if (pTop->Nest.pRec == pRec)
2447 {
2448 pRec = pTop;
2449 break;
2450 }
2451 pTop = pTop->Nest.pDown;
2452 }
2453 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2454 pTop = pTop->Excl.pDown;
2455 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2456 pTop = pTop->ShrdOwner.pDown;
2457 else
2458 break;
2459 }
2460#endif
2461
2462 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2463 rtLockValComplainPanic();
2464 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2465}
2466
2467
2468/**
2469 * Checks if all owners are blocked - shared record operated in signaller mode.
2470 *
2471 * @returns true / false accordingly.
2472 * @param pRec The record.
2473 * @param pThreadSelf The current thread.
2474 */
2475DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2476{
2477 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2478 uint32_t cAllocated = pRec->cAllocated;
2479 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2480 if (cEntries == 0)
2481 return false;
2482
2483 for (uint32_t i = 0; i < cAllocated; i++)
2484 {
2485 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2486 if ( pEntry
2487 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2488 {
2489 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2490 if (!pCurThread)
2491 return false;
2492 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2493 return false;
2494 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2495 && pCurThread != pThreadSelf)
2496 return false;
2497 if (--cEntries == 0)
2498 break;
2499 }
2500 else
2501 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2502 }
2503
2504 return true;
2505}
2506
2507
2508/**
2509 * Verifies the deadlock stack before calling it a deadlock.
2510 *
2511 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2512 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2513 * @retval VERR_TRY_AGAIN if something changed.
2514 *
2515 * @param pStack The deadlock detection stack.
2516 * @param pThreadSelf The current thread.
2517 */
2518static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2519{
2520 uint32_t const c = pStack->c;
2521 for (uint32_t iPass = 0; iPass < 3; iPass++)
2522 {
2523 for (uint32_t i = 1; i < c; i++)
2524 {
2525 PRTTHREADINT pThread = pStack->a[i].pThread;
2526 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2527 return VERR_TRY_AGAIN;
2528 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2529 return VERR_TRY_AGAIN;
2530 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2531 return VERR_TRY_AGAIN;
2532 /* ASSUMES the signaller records won't have siblings! */
2533 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2534 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2535 && pRec->Shared.fSignaller
2536 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2537 return VERR_TRY_AGAIN;
2538 }
2539 RTThreadYield();
2540 }
2541
2542 if (c == 1)
2543 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2544 return VERR_SEM_LV_DEADLOCK;
2545}
2546
2547
2548/**
2549 * Checks for stack cycles caused by another deadlock before returning.
2550 *
2551 * @retval VINF_SUCCESS if the stack is simply too small.
2552 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2553 *
2554 * @param pStack The deadlock detection stack.
2555 */
2556static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2557{
2558 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2559 {
2560 PRTTHREADINT pThread = pStack->a[i].pThread;
2561 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2562 if (pStack->a[j].pThread == pThread)
2563 return VERR_SEM_LV_EXISTING_DEADLOCK;
2564 }
2565 static bool volatile s_fComplained = false;
2566 if (!s_fComplained)
2567 {
2568 s_fComplained = true;
2569 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2570 }
2571 return VINF_SUCCESS;
2572}
2573
2574
2575/**
2576 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2577 * detection.
2578 *
2579 * @retval VINF_SUCCESS
2580 * @retval VERR_SEM_LV_DEADLOCK
2581 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2582 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2583 * @retval VERR_TRY_AGAIN
2584 *
2585 * @param pStack The stack to use.
2586 * @param pOriginalRec The original record.
2587 * @param pThreadSelf The calling thread.
2588 */
2589static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2590 PRTTHREADINT const pThreadSelf)
2591{
2592 pStack->c = 0;
2593
2594 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2595 compiler may make a better job of it when using individual variables. */
2596 PRTLOCKVALRECUNION pRec = pOriginalRec;
2597 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2598 uint32_t iEntry = UINT32_MAX;
2599 PRTTHREADINT pThread = NIL_RTTHREAD;
2600 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2601 for (uint32_t iLoop = 0; ; iLoop++)
2602 {
2603 /*
2604 * Process the current record.
2605 */
2606 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2607
2608 /* Find the next relevant owner thread and record. */
2609 PRTLOCKVALRECUNION pNextRec = NULL;
2610 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2611 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2612 switch (pRec->Core.u32Magic)
2613 {
2614 case RTLOCKVALRECEXCL_MAGIC:
2615 Assert(iEntry == UINT32_MAX);
2616 for (;;)
2617 {
2618 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2619 if ( !pNextThread
2620 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2621 break;
2622 enmNextState = rtThreadGetState(pNextThread);
2623 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2624 && pNextThread != pThreadSelf)
2625 break;
2626 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2627 if (RT_LIKELY( !pNextRec
2628 || enmNextState == rtThreadGetState(pNextThread)))
2629 break;
2630 pNextRec = NULL;
2631 }
2632 if (!pNextRec)
2633 {
2634 pRec = pRec->Excl.pSibling;
2635 if ( pRec
2636 && pRec != pFirstSibling)
2637 continue;
2638 pNextThread = NIL_RTTHREAD;
2639 }
2640 break;
2641
2642 case RTLOCKVALRECSHRD_MAGIC:
2643 if (!pRec->Shared.fSignaller)
2644 {
2645 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2646 /** @todo The read side of a read-write lock is problematic if
2647 * the implementation prioritizes writers over readers because
2648 * that means we should could deadlock against current readers
2649 * if a writer showed up. If the RW sem implementation is
2650 * wrapping some native API, it's not so easy to detect when we
2651 * should do this and when we shouldn't. Checking when we
2652 * shouldn't is subject to wakeup scheduling and cannot easily
2653 * be made reliable.
2654 *
2655 * At the moment we circumvent all this mess by declaring that
2656 * readers has priority. This is TRUE on linux, but probably
2657 * isn't on Solaris and FreeBSD. */
2658 if ( pRec == pFirstSibling
2659 && pRec->Shared.pSibling != NULL
2660 && pRec->Shared.pSibling != pFirstSibling)
2661 {
2662 pRec = pRec->Shared.pSibling;
2663 Assert(iEntry == UINT32_MAX);
2664 continue;
2665 }
2666 }
2667
2668 /* Scan the owner table for blocked owners. */
2669 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2670 && ( !pRec->Shared.fSignaller
2671 || iEntry != UINT32_MAX
2672 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2673 )
2674 )
2675 {
2676 uint32_t cAllocated = pRec->Shared.cAllocated;
2677 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2678 while (++iEntry < cAllocated)
2679 {
2680 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2681 if (pEntry)
2682 {
2683 for (;;)
2684 {
2685 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2686 break;
2687 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2688 if ( !pNextThread
2689 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2690 break;
2691 enmNextState = rtThreadGetState(pNextThread);
2692 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2693 && pNextThread != pThreadSelf)
2694 break;
2695 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2696 if (RT_LIKELY( !pNextRec
2697 || enmNextState == rtThreadGetState(pNextThread)))
2698 break;
2699 pNextRec = NULL;
2700 }
2701 if (pNextRec)
2702 break;
2703 }
2704 else
2705 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2706 }
2707 if (pNextRec)
2708 break;
2709 pNextThread = NIL_RTTHREAD;
2710 }
2711
2712 /* Advance to the next sibling, if any. */
2713 pRec = pRec->Shared.pSibling;
2714 if ( pRec != NULL
2715 && pRec != pFirstSibling)
2716 {
2717 iEntry = UINT32_MAX;
2718 continue;
2719 }
2720 break;
2721
2722 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2723 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2724 break;
2725
2726 case RTLOCKVALRECSHRDOWN_MAGIC:
2727 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2728 default:
2729 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core.u32Magic));
2730 break;
2731 }
2732
2733 if (pNextRec)
2734 {
2735 /*
2736 * Recurse and check for deadlock.
2737 */
2738 uint32_t i = pStack->c;
2739 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2740 return rtLockValidatorDdHandleStackOverflow(pStack);
2741
2742 pStack->c++;
2743 pStack->a[i].pRec = pRec;
2744 pStack->a[i].iEntry = iEntry;
2745 pStack->a[i].enmState = enmState;
2746 pStack->a[i].pThread = pThread;
2747 pStack->a[i].pFirstSibling = pFirstSibling;
2748
2749 if (RT_UNLIKELY( pNextThread == pThreadSelf
2750 && ( i != 0
2751 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2752 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2753 )
2754 )
2755 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2756
2757 pRec = pNextRec;
2758 pFirstSibling = pNextRec;
2759 iEntry = UINT32_MAX;
2760 enmState = enmNextState;
2761 pThread = pNextThread;
2762 }
2763 else
2764 {
2765 /*
2766 * No deadlock here, unwind the stack and deal with any unfinished
2767 * business there.
2768 */
2769 uint32_t i = pStack->c;
2770 for (;;)
2771 {
2772 /* pop */
2773 if (i == 0)
2774 return VINF_SUCCESS;
2775 i--;
2776 pRec = pStack->a[i].pRec;
2777 iEntry = pStack->a[i].iEntry;
2778
2779 /* Examine it. */
2780 uint32_t u32Magic = pRec->Core.u32Magic;
2781 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2782 pRec = pRec->Excl.pSibling;
2783 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2784 {
2785 if (iEntry + 1 < pRec->Shared.cAllocated)
2786 break; /* continue processing this record. */
2787 pRec = pRec->Shared.pSibling;
2788 }
2789 else
2790 {
2791 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2792 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2793 continue;
2794 }
2795
2796 /* Any next record to advance to? */
2797 if ( !pRec
2798 || pRec == pStack->a[i].pFirstSibling)
2799 continue;
2800 iEntry = UINT32_MAX;
2801 break;
2802 }
2803
2804 /* Restore the rest of the state and update the stack. */
2805 pFirstSibling = pStack->a[i].pFirstSibling;
2806 enmState = pStack->a[i].enmState;
2807 pThread = pStack->a[i].pThread;
2808 pStack->c = i;
2809 }
2810
2811 Assert(iLoop != 1000000);
2812 }
2813}
2814
2815
2816/**
2817 * Check for the simple no-deadlock case.
2818 *
2819 * @returns true if no deadlock, false if further investigation is required.
2820 *
2821 * @param pOriginalRec The original record.
2822 */
2823DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2824{
2825 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2826 && !pOriginalRec->Excl.pSibling)
2827 {
2828 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2829 if ( !pThread
2830 || pThread->u32Magic != RTTHREADINT_MAGIC)
2831 return true;
2832 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2833 if (!RTTHREAD_IS_SLEEPING(enmState))
2834 return true;
2835 }
2836 return false;
2837}
2838
2839
2840/**
2841 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2842 *
2843 * @param pStack The chain of locks causing the deadlock.
2844 * @param pRec The record relating to the current thread's lock
2845 * operation.
2846 * @param pThreadSelf This thread.
2847 * @param pSrcPos Where we are going to deadlock.
2848 * @param rc The return code.
2849 */
2850static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2851 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2852{
2853 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2854 {
2855 const char *pszWhat;
2856 switch (rc)
2857 {
2858 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2859 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2860 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2861 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2862 }
2863 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2864 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2865 for (uint32_t i = 0; i < pStack->c; i++)
2866 {
2867 char szPrefix[24];
2868 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2869 PRTLOCKVALRECUNION pShrdOwner = NULL;
2870 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2871 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2872 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2873 {
2874 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2875 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2876 }
2877 else
2878 {
2879 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2880 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2881 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2882 }
2883 }
2884 rtLockValComplainMore("---- end of deadlock chain ----\n");
2885 }
2886
2887 rtLockValComplainPanic();
2888}
2889
2890
2891/**
2892 * Perform deadlock detection.
2893 *
2894 * @retval VINF_SUCCESS
2895 * @retval VERR_SEM_LV_DEADLOCK
2896 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2897 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2898 *
2899 * @param pRec The record relating to the current thread's lock
2900 * operation.
2901 * @param pThreadSelf The current thread.
2902 * @param pSrcPos The position of the current lock operation.
2903 */
2904static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2905{
2906 RTLOCKVALDDSTACK Stack;
2907 rtLockValidatorSerializeDetectionEnter();
2908 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2909 rtLockValidatorSerializeDetectionLeave();
2910 if (RT_SUCCESS(rc))
2911 return VINF_SUCCESS;
2912
2913 if (rc == VERR_TRY_AGAIN)
2914 {
2915 for (uint32_t iLoop = 0; ; iLoop++)
2916 {
2917 rtLockValidatorSerializeDetectionEnter();
2918 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2919 rtLockValidatorSerializeDetectionLeave();
2920 if (RT_SUCCESS_NP(rc))
2921 return VINF_SUCCESS;
2922 if (rc != VERR_TRY_AGAIN)
2923 break;
2924 RTThreadYield();
2925 if (iLoop >= 3)
2926 return VINF_SUCCESS;
2927 }
2928 }
2929
2930 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2931 return rc;
2932}
2933
2934
2935RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2936 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2937{
2938 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2939 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2940 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2941 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2942 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2943
2944 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2945 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2946 pRec->afReserved[0] = 0;
2947 pRec->afReserved[1] = 0;
2948 pRec->afReserved[2] = 0;
2949 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2950 pRec->hThread = NIL_RTTHREAD;
2951 pRec->pDown = NULL;
2952 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2953 pRec->uSubClass = uSubClass;
2954 pRec->cRecursion = 0;
2955 pRec->hLock = hLock;
2956 pRec->pSibling = NULL;
2957 if (pszNameFmt)
2958 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2959 else
2960 {
2961 static uint32_t volatile s_cAnonymous = 0;
2962 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2963 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2964 }
2965
2966 /* Lazy initialization. */
2967 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2968 rtLockValidatorLazyInit();
2969}
2970
2971
2972RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2973 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2974{
2975 va_list va;
2976 va_start(va, pszNameFmt);
2977 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2978 va_end(va);
2979}
2980
2981
2982RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2983 uint32_t uSubClass, void *pvLock, bool fEnabled,
2984 const char *pszNameFmt, va_list va)
2985{
2986 PRTLOCKVALRECEXCL pRec;
2987 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2988 if (!pRec)
2989 return VERR_NO_MEMORY;
2990 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2991 return VINF_SUCCESS;
2992}
2993
2994
2995RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2996 uint32_t uSubClass, void *pvLock, bool fEnabled,
2997 const char *pszNameFmt, ...)
2998{
2999 va_list va;
3000 va_start(va, pszNameFmt);
3001 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
3002 va_end(va);
3003 return rc;
3004}
3005
3006
3007RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
3008{
3009 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3010
3011 rtLockValidatorSerializeDestructEnter();
3012
3013 /** @todo Check that it's not on our stack first. Need to make it
3014 * configurable whether deleting a owned lock is acceptable? */
3015
3016 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3017 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3018 RTLOCKVALCLASS hClass;
3019 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3020 if (pRec->pSibling)
3021 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3022 rtLockValidatorSerializeDestructLeave();
3023 if (hClass != NIL_RTLOCKVALCLASS)
3024 RTLockValidatorClassRelease(hClass);
3025}
3026
3027
3028RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3029{
3030 PRTLOCKVALRECEXCL pRec = *ppRec;
3031 *ppRec = NULL;
3032 if (pRec)
3033 {
3034 RTLockValidatorRecExclDelete(pRec);
3035 RTMemFree(pRec);
3036 }
3037}
3038
3039
3040RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3041{
3042 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3043 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3044 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3045 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3046 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3047 RTLOCKVAL_SUB_CLASS_INVALID);
3048 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3049}
3050
3051
3052RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3053 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3054{
3055 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3056 if (!pRecU)
3057 return;
3058 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3059 if (!pRecU->Excl.fEnabled)
3060 return;
3061 if (hThreadSelf == NIL_RTTHREAD)
3062 {
3063 hThreadSelf = RTThreadSelfAutoAdopt();
3064 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3065 }
3066 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3067 Assert(hThreadSelf == RTThreadSelf());
3068
3069 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3070
3071 if (pRecU->Excl.hThread == hThreadSelf)
3072 {
3073 Assert(!fFirstRecursion); RT_NOREF_PV(fFirstRecursion);
3074 pRecU->Excl.cRecursion++;
3075 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3076 }
3077 else
3078 {
3079 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3080
3081 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3082 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3083 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3084
3085 rtLockValidatorStackPush(hThreadSelf, pRecU);
3086 }
3087}
3088
3089
3090/**
3091 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3092 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3093 */
3094static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3095{
3096 RTTHREADINT *pThread = pRec->Excl.hThread;
3097 AssertReturnVoid(pThread != NIL_RTTHREAD);
3098 Assert(pThread == RTThreadSelf());
3099
3100 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3101 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3102 if (c == 0)
3103 {
3104 rtLockValidatorStackPop(pThread, pRec);
3105 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3106 }
3107 else
3108 {
3109 Assert(c < UINT32_C(0xffff0000));
3110 Assert(!fFinalRecursion); RT_NOREF_PV(fFinalRecursion);
3111 rtLockValidatorStackPopRecursion(pThread, pRec);
3112 }
3113}
3114
3115RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3116{
3117 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3118 if (!pRecU)
3119 return VINF_SUCCESS;
3120 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3121 if (!pRecU->Excl.fEnabled)
3122 return VINF_SUCCESS;
3123
3124 /*
3125 * Check the release order.
3126 */
3127 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3128 && pRecU->Excl.hClass->fStrictReleaseOrder
3129 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3130 )
3131 {
3132 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3133 if (RT_FAILURE(rc))
3134 return rc;
3135 }
3136
3137 /*
3138 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3139 */
3140 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3141 return VINF_SUCCESS;
3142}
3143
3144
3145RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3146{
3147 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3148 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3149 if (pRecU->Excl.fEnabled)
3150 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3151}
3152
3153
3154RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3155{
3156 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3157 if (!pRecU)
3158 return VINF_SUCCESS;
3159 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3160 if (!pRecU->Excl.fEnabled)
3161 return VINF_SUCCESS;
3162 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3163 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3164
3165 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3166 && !pRecU->Excl.hClass->fRecursionOk)
3167 {
3168 rtLockValComplainFirst("Recursion not allowed by the class!",
3169 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3170 rtLockValComplainPanic();
3171 return VERR_SEM_LV_NESTED;
3172 }
3173
3174 Assert(pRecU->Excl.cRecursion < _1M);
3175 pRecU->Excl.cRecursion++;
3176 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3177 return VINF_SUCCESS;
3178}
3179
3180
3181RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3182{
3183 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3184 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3185 if (!pRecU->Excl.fEnabled)
3186 return VINF_SUCCESS;
3187 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3188 Assert(pRecU->Excl.hThread == RTThreadSelf());
3189 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3190
3191 /*
3192 * Check the release order.
3193 */
3194 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3195 && pRecU->Excl.hClass->fStrictReleaseOrder
3196 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3197 )
3198 {
3199 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3200 if (RT_FAILURE(rc))
3201 return rc;
3202 }
3203
3204 /*
3205 * Perform the unwind.
3206 */
3207 pRecU->Excl.cRecursion--;
3208 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3209 return VINF_SUCCESS;
3210}
3211
3212
3213RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3214{
3215 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3216 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3217 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3218 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3219 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3220 , VERR_SEM_LV_INVALID_PARAMETER);
3221 if (!pRecU->Excl.fEnabled)
3222 return VINF_SUCCESS;
3223 Assert(pRecU->Excl.hThread == RTThreadSelf());
3224 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3225 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3226
3227 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3228 && !pRecU->Excl.hClass->fRecursionOk)
3229 {
3230 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3231 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3232 rtLockValComplainPanic();
3233 return VERR_SEM_LV_NESTED;
3234 }
3235
3236 Assert(pRecU->Excl.cRecursion < _1M);
3237 pRecU->Excl.cRecursion++;
3238 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3239
3240 return VINF_SUCCESS;
3241}
3242
3243
3244RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3245{
3246 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3247 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3248 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3249 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3250 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3251 , VERR_SEM_LV_INVALID_PARAMETER);
3252 if (!pRecU->Excl.fEnabled)
3253 return VINF_SUCCESS;
3254 Assert(pRecU->Excl.hThread == RTThreadSelf());
3255 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3256 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3257
3258 /*
3259 * Check the release order.
3260 */
3261 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3262 && pRecU->Excl.hClass->fStrictReleaseOrder
3263 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3264 )
3265 {
3266 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3267 if (RT_FAILURE(rc))
3268 return rc;
3269 }
3270
3271 /*
3272 * Perform the unwind.
3273 */
3274 pRecU->Excl.cRecursion--;
3275 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3276 return VINF_SUCCESS;
3277}
3278
3279
3280RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3281 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3282{
3283 /*
3284 * Validate and adjust input. Quit early if order validation is disabled.
3285 */
3286 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3287 if (!pRecU)
3288 return VINF_SUCCESS;
3289 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3290 if ( !pRecU->Excl.fEnabled
3291 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3292 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3293 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3294 return VINF_SUCCESS;
3295
3296 if (hThreadSelf == NIL_RTTHREAD)
3297 {
3298 hThreadSelf = RTThreadSelfAutoAdopt();
3299 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3300 }
3301 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3302 Assert(hThreadSelf == RTThreadSelf());
3303
3304 /*
3305 * Detect recursion as it isn't subject to order restrictions.
3306 */
3307 if (pRec->hThread == hThreadSelf)
3308 return VINF_SUCCESS;
3309
3310 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3311}
3312
3313
3314RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3315 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3316 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3317{
3318 /*
3319 * Fend off wild life.
3320 */
3321 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3322 if (!pRecU)
3323 return VINF_SUCCESS;
3324 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3325 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3326 if (!pRec->fEnabled)
3327 return VINF_SUCCESS;
3328
3329 PRTTHREADINT pThreadSelf = hThreadSelf;
3330 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3331 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3332 Assert(pThreadSelf == RTThreadSelf());
3333
3334 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3335
3336 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3337 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3338 {
3339 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3340 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3341 , VERR_SEM_LV_INVALID_PARAMETER);
3342 enmSleepState = enmThreadState;
3343 }
3344
3345 /*
3346 * Record the location.
3347 */
3348 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3349 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3350 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3351 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3352 rtThreadSetState(pThreadSelf, enmSleepState);
3353
3354 /*
3355 * Don't do deadlock detection if we're recursing.
3356 *
3357 * On some hosts we don't do recursion accounting our selves and there
3358 * isn't any other place to check for this.
3359 */
3360 int rc = VINF_SUCCESS;
3361 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3362 {
3363 if ( !fRecursiveOk
3364 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3365 && !pRecU->Excl.hClass->fRecursionOk))
3366 {
3367 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3368 rtLockValComplainPanic();
3369 rc = VERR_SEM_LV_NESTED;
3370 }
3371 }
3372 /*
3373 * Perform deadlock detection.
3374 */
3375 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3376 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3377 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3378 rc = VINF_SUCCESS;
3379 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3380 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3381
3382 if (RT_SUCCESS(rc))
3383 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3384 else
3385 {
3386 rtThreadSetState(pThreadSelf, enmThreadState);
3387 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3388 }
3389 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3390 return rc;
3391}
3392RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3393
3394
3395RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3396 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3397 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3398{
3399 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3400 if (RT_SUCCESS(rc))
3401 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3402 enmSleepState, fReallySleeping);
3403 return rc;
3404}
3405RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3406
3407
3408RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3409 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3410{
3411 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3412 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3413 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3414 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3415 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3416
3417 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3418 pRec->uSubClass = uSubClass;
3419 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3420 pRec->hLock = hLock;
3421 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3422 pRec->fSignaller = fSignaller;
3423 pRec->pSibling = NULL;
3424
3425 /* the table */
3426 pRec->cEntries = 0;
3427 pRec->iLastEntry = 0;
3428 pRec->cAllocated = 0;
3429 pRec->fReallocating = false;
3430 pRec->fPadding = false;
3431 pRec->papOwners = NULL;
3432
3433 /* the name */
3434 if (pszNameFmt)
3435 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3436 else
3437 {
3438 static uint32_t volatile s_cAnonymous = 0;
3439 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3440 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3441 }
3442}
3443
3444
3445RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3446 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3447{
3448 va_list va;
3449 va_start(va, pszNameFmt);
3450 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3451 va_end(va);
3452}
3453
3454
3455RTDECL(int) RTLockValidatorRecSharedCreateV(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3456 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3457 const char *pszNameFmt, va_list va)
3458{
3459 PRTLOCKVALRECSHRD pRec;
3460 *ppRec = pRec = (PRTLOCKVALRECSHRD)RTMemAlloc(sizeof(*pRec));
3461 if (!pRec)
3462 return VERR_NO_MEMORY;
3463 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3464 return VINF_SUCCESS;
3465}
3466
3467
3468RTDECL(int) RTLockValidatorRecSharedCreate(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3469 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3470 const char *pszNameFmt, ...)
3471{
3472 va_list va;
3473 va_start(va, pszNameFmt);
3474 int rc = RTLockValidatorRecSharedCreateV(ppRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3475 va_end(va);
3476 return rc;
3477}
3478
3479
3480RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3481{
3482 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3483
3484 /** @todo Check that it's not on our stack first. Need to make it
3485 * configurable whether deleting a owned lock is acceptable? */
3486
3487 /*
3488 * Flip it into table realloc mode and take the destruction lock.
3489 */
3490 rtLockValidatorSerializeDestructEnter();
3491 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3492 {
3493 rtLockValidatorSerializeDestructLeave();
3494
3495 rtLockValidatorSerializeDetectionEnter();
3496 rtLockValidatorSerializeDetectionLeave();
3497
3498 rtLockValidatorSerializeDestructEnter();
3499 }
3500
3501 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3502 RTLOCKVALCLASS hClass;
3503 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3504 if (pRec->papOwners)
3505 {
3506 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3507 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3508 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3509
3510 RTMemFree((void *)papOwners);
3511 }
3512 if (pRec->pSibling)
3513 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3514 ASMAtomicWriteBool(&pRec->fReallocating, false);
3515
3516 rtLockValidatorSerializeDestructLeave();
3517
3518 if (hClass != NIL_RTLOCKVALCLASS)
3519 RTLockValidatorClassRelease(hClass);
3520}
3521
3522
3523RTDECL(void) RTLockValidatorRecSharedDestroy(PRTLOCKVALRECSHRD *ppRec)
3524{
3525 PRTLOCKVALRECSHRD pRec = *ppRec;
3526 *ppRec = NULL;
3527 if (pRec)
3528 {
3529 RTLockValidatorRecSharedDelete(pRec);
3530 RTMemFree(pRec);
3531 }
3532}
3533
3534
3535RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3536{
3537 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3538 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3539 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3540 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3541 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3542 RTLOCKVAL_SUB_CLASS_INVALID);
3543 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3544}
3545
3546
3547/**
3548 * Locates an owner (thread) in a shared lock record.
3549 *
3550 * @returns Pointer to the owner entry on success, NULL on failure..
3551 * @param pShared The shared lock record.
3552 * @param hThread The thread (owner) to find.
3553 * @param piEntry Where to optionally return the table in index.
3554 * Optional.
3555 */
3556DECLINLINE(PRTLOCKVALRECUNION)
3557rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3558{
3559 rtLockValidatorSerializeDetectionEnter();
3560
3561 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3562 if (papOwners)
3563 {
3564 uint32_t const cMax = pShared->cAllocated;
3565 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3566 {
3567 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3568 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3569 {
3570 rtLockValidatorSerializeDetectionLeave();
3571 if (piEntry)
3572 *piEntry = iEntry;
3573 return pEntry;
3574 }
3575 }
3576 }
3577
3578 rtLockValidatorSerializeDetectionLeave();
3579 return NULL;
3580}
3581
3582
3583RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3584 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3585{
3586 /*
3587 * Validate and adjust input. Quit early if order validation is disabled.
3588 */
3589 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3590 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3591 if ( !pRecU->Shared.fEnabled
3592 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3593 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3594 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3595 )
3596 return VINF_SUCCESS;
3597
3598 if (hThreadSelf == NIL_RTTHREAD)
3599 {
3600 hThreadSelf = RTThreadSelfAutoAdopt();
3601 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3602 }
3603 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3604 Assert(hThreadSelf == RTThreadSelf());
3605
3606 /*
3607 * Detect recursion as it isn't subject to order restrictions.
3608 */
3609 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3610 if (pEntry)
3611 return VINF_SUCCESS;
3612
3613 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3614}
3615
3616
3617RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3618 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3619 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3620{
3621 /*
3622 * Fend off wild life.
3623 */
3624 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3625 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3626 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3627 if (!pRecU->Shared.fEnabled)
3628 return VINF_SUCCESS;
3629
3630 PRTTHREADINT pThreadSelf = hThreadSelf;
3631 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3632 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3633 Assert(pThreadSelf == RTThreadSelf());
3634
3635 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3636
3637 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3638 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3639 {
3640 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3641 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3642 , VERR_SEM_LV_INVALID_PARAMETER);
3643 enmSleepState = enmThreadState;
3644 }
3645
3646 /*
3647 * Record the location.
3648 */
3649 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3650 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3651 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3652 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3653 rtThreadSetState(pThreadSelf, enmSleepState);
3654
3655 /*
3656 * Don't do deadlock detection if we're recursing.
3657 */
3658 int rc = VINF_SUCCESS;
3659 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3660 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3661 : NULL;
3662 if (pEntry)
3663 {
3664 if ( !fRecursiveOk
3665 || ( pRec->hClass
3666 && !pRec->hClass->fRecursionOk)
3667 )
3668 {
3669 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3670 rtLockValComplainPanic();
3671 rc = VERR_SEM_LV_NESTED;
3672 }
3673 }
3674 /*
3675 * Perform deadlock detection.
3676 */
3677 else if ( pRec->hClass
3678 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3679 || pRec->hClass->cMsMinDeadlock > cMillies))
3680 rc = VINF_SUCCESS;
3681 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3682 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3683
3684 if (RT_SUCCESS(rc))
3685 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3686 else
3687 {
3688 rtThreadSetState(pThreadSelf, enmThreadState);
3689 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3690 }
3691 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3692 return rc;
3693}
3694RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3695
3696
3697RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3698 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3699 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3700{
3701 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3702 if (RT_SUCCESS(rc))
3703 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3704 enmSleepState, fReallySleeping);
3705 return rc;
3706}
3707RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3708
3709
3710/**
3711 * Allocates and initializes an owner entry for the shared lock record.
3712 *
3713 * @returns The new owner entry.
3714 * @param pRec The shared lock record.
3715 * @param pThreadSelf The calling thread and owner. Used for record
3716 * initialization and allocation.
3717 * @param pSrcPos The source position.
3718 */
3719DECLINLINE(PRTLOCKVALRECUNION)
3720rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3721{
3722 PRTLOCKVALRECUNION pEntry;
3723
3724 /*
3725 * Check if the thread has any statically allocated records we can easily
3726 * make use of.
3727 */
3728 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3729 if ( iEntry > 0
3730 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3731 {
3732 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3733 Assert(!pEntry->ShrdOwner.fReserved);
3734 pEntry->ShrdOwner.fStaticAlloc = true;
3735 rtThreadGet(pThreadSelf);
3736 }
3737 else
3738 {
3739 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3740 if (RT_UNLIKELY(!pEntry))
3741 return NULL;
3742 pEntry->ShrdOwner.fStaticAlloc = false;
3743 }
3744
3745 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3746 pEntry->ShrdOwner.cRecursion = 1;
3747 pEntry->ShrdOwner.fReserved = true;
3748 pEntry->ShrdOwner.hThread = pThreadSelf;
3749 pEntry->ShrdOwner.pDown = NULL;
3750 pEntry->ShrdOwner.pSharedRec = pRec;
3751#if HC_ARCH_BITS == 32
3752 pEntry->ShrdOwner.pvReserved = NULL;
3753#endif
3754 if (pSrcPos)
3755 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3756 else
3757 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3758 return pEntry;
3759}
3760
3761
3762/**
3763 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3764 *
3765 * @param pEntry The owner entry.
3766 */
3767DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3768{
3769 if (pEntry)
3770 {
3771 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3772 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3773
3774 PRTTHREADINT pThread;
3775 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3776
3777 Assert(pEntry->fReserved);
3778 pEntry->fReserved = false;
3779
3780 if (pEntry->fStaticAlloc)
3781 {
3782 AssertPtrReturnVoid(pThread);
3783 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3784
3785 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3786 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3787
3788 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3789 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3790
3791 rtThreadRelease(pThread);
3792 }
3793 else
3794 {
3795 rtLockValidatorSerializeDestructEnter();
3796 rtLockValidatorSerializeDestructLeave();
3797
3798 RTMemFree(pEntry);
3799 }
3800 }
3801}
3802
3803
3804/**
3805 * Make more room in the table.
3806 *
3807 * @retval true on success
3808 * @retval false if we're out of memory or running into a bad race condition
3809 * (probably a bug somewhere). No longer holding the lock.
3810 *
3811 * @param pShared The shared lock record.
3812 */
3813static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3814{
3815 for (unsigned i = 0; i < 1000; i++)
3816 {
3817 /*
3818 * Switch to the other data access direction.
3819 */
3820 rtLockValidatorSerializeDetectionLeave();
3821 if (i >= 10)
3822 {
3823 Assert(i != 10 && i != 100);
3824 RTThreadSleep(i >= 100);
3825 }
3826 rtLockValidatorSerializeDestructEnter();
3827
3828 /*
3829 * Try grab the privilege to reallocating the table.
3830 */
3831 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3832 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3833 {
3834 uint32_t cAllocated = pShared->cAllocated;
3835 if (cAllocated < pShared->cEntries)
3836 {
3837 /*
3838 * Ok, still not enough space. Reallocate the table.
3839 */
3840 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3841 PRTLOCKVALRECSHRDOWN *papOwners;
3842 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3843 (cAllocated + cInc) * sizeof(void *));
3844 if (!papOwners)
3845 {
3846 ASMAtomicWriteBool(&pShared->fReallocating, false);
3847 rtLockValidatorSerializeDestructLeave();
3848 /* RTMemRealloc will assert */
3849 return false;
3850 }
3851
3852 while (cInc-- > 0)
3853 {
3854 papOwners[cAllocated] = NULL;
3855 cAllocated++;
3856 }
3857
3858 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3859 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3860 }
3861 ASMAtomicWriteBool(&pShared->fReallocating, false);
3862 }
3863 rtLockValidatorSerializeDestructLeave();
3864
3865 rtLockValidatorSerializeDetectionEnter();
3866 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3867 break;
3868
3869 if (pShared->cAllocated >= pShared->cEntries)
3870 return true;
3871 }
3872
3873 rtLockValidatorSerializeDetectionLeave();
3874 AssertFailed(); /* too many iterations or destroyed while racing. */
3875 return false;
3876}
3877
3878
3879/**
3880 * Adds an owner entry to a shared lock record.
3881 *
3882 * @returns true on success, false on serious race or we're if out of memory.
3883 * @param pShared The shared lock record.
3884 * @param pEntry The owner entry.
3885 */
3886DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3887{
3888 rtLockValidatorSerializeDetectionEnter();
3889 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3890 {
3891 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3892 && !rtLockValidatorRecSharedMakeRoom(pShared))
3893 return false; /* the worker leave the lock */
3894
3895 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3896 uint32_t const cMax = pShared->cAllocated;
3897 for (unsigned i = 0; i < 100; i++)
3898 {
3899 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3900 {
3901 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3902 {
3903 rtLockValidatorSerializeDetectionLeave();
3904 return true;
3905 }
3906 }
3907 Assert(i != 25);
3908 }
3909 AssertFailed();
3910 }
3911 rtLockValidatorSerializeDetectionLeave();
3912 return false;
3913}
3914
3915
3916/**
3917 * Remove an owner entry from a shared lock record and free it.
3918 *
3919 * @param pShared The shared lock record.
3920 * @param pEntry The owner entry to remove.
3921 * @param iEntry The last known index.
3922 */
3923DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3924 uint32_t iEntry)
3925{
3926 /*
3927 * Remove it from the table.
3928 */
3929 rtLockValidatorSerializeDetectionEnter();
3930 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3931 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3932 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3933 {
3934 /* this shouldn't happen yet... */
3935 AssertFailed();
3936 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3937 uint32_t const cMax = pShared->cAllocated;
3938 for (iEntry = 0; iEntry < cMax; iEntry++)
3939 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3940 break;
3941 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3942 }
3943 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3944 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3945 rtLockValidatorSerializeDetectionLeave();
3946
3947 /*
3948 * Successfully removed, now free it.
3949 */
3950 rtLockValidatorRecSharedFreeOwner(pEntry);
3951}
3952
3953
3954RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3955{
3956 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3957 if (!pRec->fEnabled)
3958 return;
3959 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3960 AssertReturnVoid(pRec->fSignaller);
3961
3962 /*
3963 * Free all current owners.
3964 */
3965 rtLockValidatorSerializeDetectionEnter();
3966 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3967 {
3968 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3969 uint32_t iEntry = 0;
3970 uint32_t cEntries = pRec->cAllocated;
3971 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3972 while (iEntry < cEntries)
3973 {
3974 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3975 if (pEntry)
3976 {
3977 ASMAtomicDecU32(&pRec->cEntries);
3978 rtLockValidatorSerializeDetectionLeave();
3979
3980 rtLockValidatorRecSharedFreeOwner(pEntry);
3981
3982 rtLockValidatorSerializeDetectionEnter();
3983 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3984 break;
3985 cEntries = pRec->cAllocated;
3986 papEntries = pRec->papOwners;
3987 }
3988 iEntry++;
3989 }
3990 }
3991 rtLockValidatorSerializeDetectionLeave();
3992
3993 if (hThread != NIL_RTTHREAD)
3994 {
3995 /*
3996 * Allocate a new owner entry and insert it into the table.
3997 */
3998 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3999 if ( pEntry
4000 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4001 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4002 }
4003}
4004RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
4005
4006
4007RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
4008{
4009 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4010 if (!pRec->fEnabled)
4011 return;
4012 if (hThread == NIL_RTTHREAD)
4013 {
4014 hThread = RTThreadSelfAutoAdopt();
4015 AssertReturnVoid(hThread != NIL_RTTHREAD);
4016 }
4017 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4018
4019 /*
4020 * Recursive?
4021 *
4022 * Note! This code can be optimized to try avoid scanning the table on
4023 * insert. However, that's annoying work that makes the code big,
4024 * so it can wait til later sometime.
4025 */
4026 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4027 if (pEntry)
4028 {
4029 Assert(!pRec->fSignaller);
4030 pEntry->ShrdOwner.cRecursion++;
4031 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
4032 return;
4033 }
4034
4035 /*
4036 * Allocate a new owner entry and insert it into the table.
4037 */
4038 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4039 if (pEntry)
4040 {
4041 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4042 {
4043 if (!pRec->fSignaller)
4044 rtLockValidatorStackPush(hThread, pEntry);
4045 }
4046 else
4047 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4048 }
4049}
4050RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4051
4052
4053RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4054{
4055 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4056 if (!pRec->fEnabled)
4057 return;
4058 if (hThread == NIL_RTTHREAD)
4059 {
4060 hThread = RTThreadSelfAutoAdopt();
4061 AssertReturnVoid(hThread != NIL_RTTHREAD);
4062 }
4063 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4064
4065 /*
4066 * Find the entry hope it's a recursive one.
4067 */
4068 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4069 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4070 AssertReturnVoid(pEntry);
4071 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4072
4073 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4074 if (c == 0)
4075 {
4076 if (!pRec->fSignaller)
4077 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4078 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4079 }
4080 else
4081 {
4082 Assert(!pRec->fSignaller);
4083 rtLockValidatorStackPopRecursion(hThread, pEntry);
4084 }
4085}
4086RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4087
4088
4089RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4090{
4091 /* Validate and resolve input. */
4092 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4093 if (!pRec->fEnabled)
4094 return false;
4095 if (hThread == NIL_RTTHREAD)
4096 {
4097 hThread = RTThreadSelfAutoAdopt();
4098 AssertReturn(hThread != NIL_RTTHREAD, false);
4099 }
4100 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4101
4102 /* Do the job. */
4103 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4104 return pEntry != NULL;
4105}
4106RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4107
4108
4109RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4110{
4111 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4112 if (!pRec->fEnabled)
4113 return VINF_SUCCESS;
4114 if (hThreadSelf == NIL_RTTHREAD)
4115 {
4116 hThreadSelf = RTThreadSelfAutoAdopt();
4117 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4118 }
4119 Assert(hThreadSelf == RTThreadSelf());
4120 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4121
4122 /*
4123 * Locate the entry for this thread in the table.
4124 */
4125 uint32_t iEntry = 0;
4126 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4127 if (RT_UNLIKELY(!pEntry))
4128 {
4129 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4130 rtLockValComplainPanic();
4131 return VERR_SEM_LV_NOT_OWNER;
4132 }
4133
4134 /*
4135 * Check the release order.
4136 */
4137 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4138 && pRec->hClass->fStrictReleaseOrder
4139 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4140 )
4141 {
4142 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4143 if (RT_FAILURE(rc))
4144 return rc;
4145 }
4146
4147 /*
4148 * Release the ownership or unwind a level of recursion.
4149 */
4150 Assert(pEntry->ShrdOwner.cRecursion > 0);
4151 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4152 if (c == 0)
4153 {
4154 rtLockValidatorStackPop(hThreadSelf, pEntry);
4155 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4156 }
4157 else
4158 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4159
4160 return VINF_SUCCESS;
4161}
4162
4163
4164RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4165{
4166 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4167 if (!pRec->fEnabled)
4168 return VINF_SUCCESS;
4169 if (hThreadSelf == NIL_RTTHREAD)
4170 {
4171 hThreadSelf = RTThreadSelfAutoAdopt();
4172 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4173 }
4174 Assert(hThreadSelf == RTThreadSelf());
4175 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4176
4177 /*
4178 * Locate the entry for this thread in the table.
4179 */
4180 uint32_t iEntry = 0;
4181 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4182 if (RT_UNLIKELY(!pEntry))
4183 {
4184 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4185 rtLockValComplainPanic();
4186 return VERR_SEM_LV_NOT_SIGNALLER;
4187 }
4188 return VINF_SUCCESS;
4189}
4190
4191
4192RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4193{
4194 if (Thread == NIL_RTTHREAD)
4195 return 0;
4196
4197 PRTTHREADINT pThread = rtThreadGet(Thread);
4198 if (!pThread)
4199 return VERR_INVALID_HANDLE;
4200 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4201 rtThreadRelease(pThread);
4202 return cWriteLocks;
4203}
4204RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4205
4206
4207RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4208{
4209 PRTTHREADINT pThread = rtThreadGet(Thread);
4210 AssertReturnVoid(pThread);
4211 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4212 rtThreadRelease(pThread);
4213}
4214RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4215
4216
4217RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4218{
4219 PRTTHREADINT pThread = rtThreadGet(Thread);
4220 AssertReturnVoid(pThread);
4221 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4222 rtThreadRelease(pThread);
4223}
4224RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4225
4226
4227RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4228{
4229 if (Thread == NIL_RTTHREAD)
4230 return 0;
4231
4232 PRTTHREADINT pThread = rtThreadGet(Thread);
4233 if (!pThread)
4234 return VERR_INVALID_HANDLE;
4235 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4236 rtThreadRelease(pThread);
4237 return cReadLocks;
4238}
4239RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4240
4241
4242RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4243{
4244 PRTTHREADINT pThread = rtThreadGet(Thread);
4245 Assert(pThread);
4246 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4247 rtThreadRelease(pThread);
4248}
4249RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4250
4251
4252RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4253{
4254 PRTTHREADINT pThread = rtThreadGet(Thread);
4255 Assert(pThread);
4256 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4257 rtThreadRelease(pThread);
4258}
4259RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4260
4261
4262RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4263{
4264 void *pvLock = NULL;
4265 PRTTHREADINT pThread = rtThreadGet(hThread);
4266 if (pThread)
4267 {
4268 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4269 if (RTTHREAD_IS_SLEEPING(enmState))
4270 {
4271 rtLockValidatorSerializeDetectionEnter();
4272
4273 enmState = rtThreadGetState(pThread);
4274 if (RTTHREAD_IS_SLEEPING(enmState))
4275 {
4276 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4277 if (pRec)
4278 {
4279 switch (pRec->Core.u32Magic)
4280 {
4281 case RTLOCKVALRECEXCL_MAGIC:
4282 pvLock = pRec->Excl.hLock;
4283 break;
4284
4285 case RTLOCKVALRECSHRDOWN_MAGIC:
4286 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4287 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4288 break;
4289 RT_FALL_THRU();
4290 case RTLOCKVALRECSHRD_MAGIC:
4291 pvLock = pRec->Shared.hLock;
4292 break;
4293 }
4294 if (RTThreadGetState(pThread) != enmState)
4295 pvLock = NULL;
4296 }
4297 }
4298
4299 rtLockValidatorSerializeDetectionLeave();
4300 }
4301 rtThreadRelease(pThread);
4302 }
4303 return pvLock;
4304}
4305RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4306
4307
4308RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4309{
4310 bool fRet = false;
4311 PRTTHREADINT pThread = rtThreadGet(hThread);
4312 if (pThread)
4313 {
4314 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4315 rtThreadRelease(pThread);
4316 }
4317 return fRet;
4318}
4319RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4320
4321
4322RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4323{
4324 bool fRet = false;
4325 if (hCurrentThread == NIL_RTTHREAD)
4326 hCurrentThread = RTThreadSelf();
4327 else
4328 Assert(hCurrentThread == RTThreadSelf());
4329 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4330 if (pThread)
4331 {
4332 if (hClass != NIL_RTLOCKVALCLASS)
4333 {
4334 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4335 while (VALID_PTR(pCur) && !fRet)
4336 {
4337 switch (pCur->Core.u32Magic)
4338 {
4339 case RTLOCKVALRECEXCL_MAGIC:
4340 fRet = pCur->Excl.hClass == hClass;
4341 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4342 break;
4343 case RTLOCKVALRECSHRDOWN_MAGIC:
4344 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4345 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4346 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4347 break;
4348 case RTLOCKVALRECNEST_MAGIC:
4349 switch (pCur->Nest.pRec->Core.u32Magic)
4350 {
4351 case RTLOCKVALRECEXCL_MAGIC:
4352 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4353 break;
4354 case RTLOCKVALRECSHRDOWN_MAGIC:
4355 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4356 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4357 break;
4358 }
4359 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4360 break;
4361 default:
4362 pCur = NULL;
4363 break;
4364 }
4365 }
4366 }
4367
4368 rtThreadRelease(pThread);
4369 }
4370 return fRet;
4371}
4372RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4373
4374
4375RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4376{
4377 bool fRet = false;
4378 if (hCurrentThread == NIL_RTTHREAD)
4379 hCurrentThread = RTThreadSelf();
4380 else
4381 Assert(hCurrentThread == RTThreadSelf());
4382 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4383 if (pThread)
4384 {
4385 if (hClass != NIL_RTLOCKVALCLASS)
4386 {
4387 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4388 while (VALID_PTR(pCur) && !fRet)
4389 {
4390 switch (pCur->Core.u32Magic)
4391 {
4392 case RTLOCKVALRECEXCL_MAGIC:
4393 fRet = pCur->Excl.hClass == hClass
4394 && pCur->Excl.uSubClass == uSubClass;
4395 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4396 break;
4397 case RTLOCKVALRECSHRDOWN_MAGIC:
4398 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4399 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4400 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4401 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4402 break;
4403 case RTLOCKVALRECNEST_MAGIC:
4404 switch (pCur->Nest.pRec->Core.u32Magic)
4405 {
4406 case RTLOCKVALRECEXCL_MAGIC:
4407 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4408 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4409 break;
4410 case RTLOCKVALRECSHRDOWN_MAGIC:
4411 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4412 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4413 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4414 break;
4415 }
4416 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4417 break;
4418 default:
4419 pCur = NULL;
4420 break;
4421 }
4422 }
4423 }
4424
4425 rtThreadRelease(pThread);
4426 }
4427 return fRet;
4428}
4429RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4430
4431
4432RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4433{
4434 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4435}
4436RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4437
4438
4439RTDECL(bool) RTLockValidatorIsEnabled(void)
4440{
4441 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4442}
4443RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4444
4445
4446RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4447{
4448 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4449}
4450RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4451
4452
4453RTDECL(bool) RTLockValidatorIsQuiet(void)
4454{
4455 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4456}
4457RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4458
4459
4460RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4461{
4462 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4463}
4464RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4465
4466
4467RTDECL(bool) RTLockValidatorMayPanic(void)
4468{
4469 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4470}
4471RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4472
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette