VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 79786

Last change on this file since 79786 was 79786, checked in by vboxsync, 5 years ago

IPRT/lockvalidator: Must serialize for detection before calling rtLockValidatorDeadlockDetection since it's worker (rtLockValidatorDdDoDetection) accesses volatile structures like RTLOCKVALRECSHRD::Shared.papOwners that can be reallocated to make room for more entries.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 160.4 KB
Line 
1/* $Id: lockvalidator.cpp 79786 2019-07-15 11:08:36Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include <iprt/lockvalidator.h>
32#include "internal/iprt.h"
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/env.h>
37#include <iprt/err.h>
38#include <iprt/mem.h>
39#include <iprt/once.h>
40#include <iprt/semaphore.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44#include "internal/lockvalidator.h"
45#include "internal/magics.h"
46#include "internal/strhash.h"
47#include "internal/thread.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** Macro that asserts that a pointer is aligned correctly.
54 * Only used when fighting bugs. */
55#if 1
56# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
57 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
58#else
59# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
60#endif
61
62/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
63#define RTLOCKVALCLASS_HASH(hClass) \
64 ( ((uintptr_t)(hClass) >> 6 ) \
65 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
66 / sizeof(PRTLOCKVALCLASSREF)) )
67
68/** The max value for RTLOCKVALCLASSINT::cRefs. */
69#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
70/** The max value for RTLOCKVALCLASSREF::cLookups. */
71#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
72/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
73 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
74#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
75
76
77/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
78 * Enable recursion records. */
79#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
80# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
81#endif
82
83/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
84 * Enables some extra verbosity in the lock dumping. */
85#if defined(DOXYGEN_RUNNING)
86# define RTLOCKVAL_WITH_VERBOSE_DUMPS
87#endif
88
89/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
90 * Enables collection prior class hash lookup statistics, dumping them when
91 * complaining about the class. */
92#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
93# define RTLOCKVAL_WITH_CLASS_HASH_STATS
94#endif
95
96
97/*********************************************************************************************************************************
98* Structures and Typedefs *
99*********************************************************************************************************************************/
100/**
101 * Deadlock detection stack entry.
102 */
103typedef struct RTLOCKVALDDENTRY
104{
105 /** The current record. */
106 PRTLOCKVALRECUNION pRec;
107 /** The current entry number if pRec is a shared one. */
108 uint32_t iEntry;
109 /** The thread state of the thread we followed to get to pFirstSibling.
110 * This is only used for validating a deadlock stack. */
111 RTTHREADSTATE enmState;
112 /** The thread we followed to get to pFirstSibling.
113 * This is only used for validating a deadlock stack. */
114 PRTTHREADINT pThread;
115 /** What pThread is waiting on, i.e. where we entered the circular list of
116 * siblings. This is used for validating a deadlock stack as well as
117 * terminating the sibling walk. */
118 PRTLOCKVALRECUNION pFirstSibling;
119} RTLOCKVALDDENTRY;
120
121
122/**
123 * Deadlock detection stack.
124 */
125typedef struct RTLOCKVALDDSTACK
126{
127 /** The number stack entries. */
128 uint32_t c;
129 /** The stack entries. */
130 RTLOCKVALDDENTRY a[32];
131} RTLOCKVALDDSTACK;
132/** Pointer to a deadlock detection stack. */
133typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
134
135
136/**
137 * Reference to another class.
138 */
139typedef struct RTLOCKVALCLASSREF
140{
141 /** The class. */
142 RTLOCKVALCLASS hClass;
143 /** The number of lookups of this class. */
144 uint32_t volatile cLookups;
145 /** Indicates whether the entry was added automatically during order checking
146 * (true) or manually via the API (false). */
147 bool fAutodidacticism;
148 /** Reserved / explicit alignment padding. */
149 bool afReserved[3];
150} RTLOCKVALCLASSREF;
151/** Pointer to a class reference. */
152typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
153
154
155/** Pointer to a chunk of class references. */
156typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
157/**
158 * Chunk of class references.
159 */
160typedef struct RTLOCKVALCLASSREFCHUNK
161{
162 /** Array of refs. */
163#if 0 /** @todo for testing allocation of new chunks. */
164 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
165#else
166 RTLOCKVALCLASSREF aRefs[2];
167#endif
168 /** Pointer to the next chunk. */
169 PRTLOCKVALCLASSREFCHUNK volatile pNext;
170} RTLOCKVALCLASSREFCHUNK;
171
172
173/**
174 * Lock class.
175 */
176typedef struct RTLOCKVALCLASSINT
177{
178 /** AVL node core. */
179 AVLLU32NODECORE Core;
180 /** Magic value (RTLOCKVALCLASS_MAGIC). */
181 uint32_t volatile u32Magic;
182 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
183 uint32_t volatile cRefs;
184 /** Whether the class is allowed to teach it self new locking order rules. */
185 bool fAutodidact;
186 /** Whether to allow recursion. */
187 bool fRecursionOk;
188 /** Strict release order. */
189 bool fStrictReleaseOrder;
190 /** Whether this class is in the tree. */
191 bool fInTree;
192 /** Donate a reference to the next retainer. This is a hack to make
193 * RTLockValidatorClassCreateUnique work. */
194 bool volatile fDonateRefToNextRetainer;
195 /** Reserved future use / explicit alignment. */
196 bool afReserved[3];
197 /** The minimum wait interval for which we do deadlock detection
198 * (milliseconds). */
199 RTMSINTERVAL cMsMinDeadlock;
200 /** The minimum wait interval for which we do order checks (milliseconds). */
201 RTMSINTERVAL cMsMinOrder;
202 /** More padding. */
203 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
204 /** Classes that may be taken prior to this one.
205 * This is a linked list where each node contains a chunk of locks so that we
206 * reduce the number of allocations as well as localize the data. */
207 RTLOCKVALCLASSREFCHUNK PriorLocks;
208 /** Hash table containing frequently encountered prior locks. */
209 PRTLOCKVALCLASSREF apPriorLocksHash[17];
210 /** Class name. (Allocated after the end of the block as usual.) */
211 char const *pszName;
212 /** Where this class was created.
213 * This is mainly used for finding automatically created lock classes.
214 * @remarks The strings are stored after this structure so we won't crash
215 * if the class lives longer than the module (dll/so/dylib) that
216 * spawned it. */
217 RTLOCKVALSRCPOS CreatePos;
218#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
219 /** Hash hits. */
220 uint32_t volatile cHashHits;
221 /** Hash misses. */
222 uint32_t volatile cHashMisses;
223#endif
224} RTLOCKVALCLASSINT;
225AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
226AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
227
228
229/*********************************************************************************************************************************
230* Global Variables *
231*********************************************************************************************************************************/
232/** Serializing object destruction and deadlock detection.
233 *
234 * This makes sure that none of the memory examined by the deadlock detection
235 * code will become invalid (reused for other purposes or made not present)
236 * while the detection is in progress.
237 *
238 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
239 * EW: Deadlock detection and some related activities.
240 */
241static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
242/** Serializing class tree insert and lookups. */
243static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
244/** Class tree. */
245static PAVLLU32NODECORE g_LockValClassTree = NULL;
246/** Critical section serializing the teaching new rules to the classes. */
247static RTCRITSECT g_LockValClassTeachCS;
248
249/** Whether the lock validator is enabled or disabled.
250 * Only applies to new locks. */
251static bool volatile g_fLockValidatorEnabled = true;
252/** Set if the lock validator is quiet. */
253#ifdef RT_STRICT
254static bool volatile g_fLockValidatorQuiet = false;
255#else
256static bool volatile g_fLockValidatorQuiet = true;
257#endif
258/** Set if the lock validator may panic. */
259#ifdef RT_STRICT
260static bool volatile g_fLockValidatorMayPanic = true;
261#else
262static bool volatile g_fLockValidatorMayPanic = false;
263#endif
264/** Whether to return an error status on wrong locking order. */
265static bool volatile g_fLockValSoftWrongOrder = false;
266
267
268/*********************************************************************************************************************************
269* Internal Functions *
270*********************************************************************************************************************************/
271static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
272static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
273
274
275/**
276 * Lazy initialization of the lock validator globals.
277 */
278static void rtLockValidatorLazyInit(void)
279{
280 static uint32_t volatile s_fInitializing = false;
281 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
282 {
283 /*
284 * The locks.
285 */
286 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
287 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
288 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
289
290 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
291 {
292 RTSEMRW hSemRW;
293 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
294 if (RT_SUCCESS(rc))
295 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
296 }
297
298 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
299 {
300 RTSEMXROADS hXRoads;
301 int rc = RTSemXRoadsCreate(&hXRoads);
302 if (RT_SUCCESS(rc))
303 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
304 }
305
306#ifdef IN_RING3
307 /*
308 * Check the environment for our config variables.
309 */
310 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
311 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
312 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
313 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
314
315 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
316 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
317 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
318 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
319
320 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
321 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
322 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
323 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
324
325 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
326 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
327 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
328 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
329#endif
330
331 /*
332 * Register cleanup
333 */
334 /** @todo register some cleanup callback if we care. */
335
336 ASMAtomicWriteU32(&s_fInitializing, false);
337 }
338}
339
340
341
342/** Wrapper around ASMAtomicReadPtr. */
343DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
344{
345 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
346 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
347 return p;
348}
349
350
351/** Wrapper around ASMAtomicWritePtr. */
352DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
353{
354 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
355 ASMAtomicWritePtr(ppRec, pRecNew);
356}
357
358
359/** Wrapper around ASMAtomicReadPtr. */
360DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
361{
362 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
363 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
364 return p;
365}
366
367
368/** Wrapper around ASMAtomicUoReadPtr. */
369DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
370{
371 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
372 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
373 return p;
374}
375
376
377/**
378 * Reads a volatile thread handle field and returns the thread name.
379 *
380 * @returns Thread name (read only).
381 * @param phThread The thread handle field.
382 */
383static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
384{
385 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
386 if (!pThread)
387 return "<NIL>";
388 if (!VALID_PTR(pThread))
389 return "<INVALID>";
390 if (pThread->u32Magic != RTTHREADINT_MAGIC)
391 return "<BAD-THREAD-MAGIC>";
392 return pThread->szName;
393}
394
395
396/**
397 * Launch a simple assertion like complaint w/ panic.
398 *
399 * @param SRC_POS The source position where call is being made from.
400 * @param pszWhat What we're complaining about.
401 * @param ... Format arguments.
402 */
403static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
404{
405 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
406 {
407 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
408 va_list va;
409 va_start(va, pszWhat);
410 RTAssertMsg2WeakV(pszWhat, va);
411 va_end(va);
412 }
413 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
414 RTAssertPanic();
415}
416
417
418/**
419 * Describes the class.
420 *
421 * @param pszPrefix Message prefix.
422 * @param pClass The class to complain about.
423 * @param uSubClass My sub-class.
424 * @param fVerbose Verbose description including relations to other
425 * classes.
426 */
427static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
428{
429 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
430 return;
431
432 /* Stringify the sub-class. */
433 const char *pszSubClass;
434 char szSubClass[32];
435 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
436 switch (uSubClass)
437 {
438 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
439 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
440 default:
441 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
442 pszSubClass = szSubClass;
443 break;
444 }
445 else
446 {
447 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
448 pszSubClass = szSubClass;
449 }
450
451 /* Validate the class pointer. */
452 if (!VALID_PTR(pClass))
453 {
454 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
455 return;
456 }
457 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
458 {
459 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
460 return;
461 }
462
463 /* OK, dump the class info. */
464 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
465 pClass,
466 pClass->pszName,
467 pClass->CreatePos.pszFile,
468 pClass->CreatePos.uLine,
469 pClass->CreatePos.pszFunction,
470 pClass->CreatePos.uId,
471 pszSubClass);
472 if (fVerbose)
473 {
474 uint32_t i = 0;
475 uint32_t cPrinted = 0;
476 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
477 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
478 {
479 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
480 if (pCurClass != NIL_RTLOCKVALCLASS)
481 {
482 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
483 cPrinted == 0
484 ? "Prior:"
485 : " ",
486 i,
487 pCurClass->pszName,
488 pChunk->aRefs[j].fAutodidacticism
489 ? "autodidactic"
490 : "manually ",
491 pChunk->aRefs[j].cLookups,
492 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
493 cPrinted++;
494 }
495 }
496 if (!cPrinted)
497 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
498#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
499 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
500#endif
501 }
502 else
503 {
504 uint32_t cPrinted = 0;
505 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
506 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
507 {
508 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
509 if (pCurClass != NIL_RTLOCKVALCLASS)
510 {
511 if ((cPrinted % 10) == 0)
512 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
513 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
514 else if ((cPrinted % 10) != 9)
515 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
516 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
517 else
518 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
519 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
520 cPrinted++;
521 }
522 }
523 if (!cPrinted)
524 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
525 else if ((cPrinted % 10) != 0)
526 RTAssertMsg2AddWeak("\n");
527 }
528}
529
530
531/**
532 * Helper for getting the class name.
533 * @returns Class name string.
534 * @param pClass The class.
535 */
536static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
537{
538 if (!pClass)
539 return "<nil-class>";
540 if (!VALID_PTR(pClass))
541 return "<bad-class-ptr>";
542 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
543 return "<bad-class-magic>";
544 if (!pClass->pszName)
545 return "<no-class-name>";
546 return pClass->pszName;
547}
548
549/**
550 * Formats the sub-class.
551 *
552 * @returns Stringified sub-class.
553 * @param uSubClass The name.
554 * @param pszBuf Buffer that is big enough.
555 */
556static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
557{
558 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
559 switch (uSubClass)
560 {
561 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
562 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
563 default:
564 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
565 break;
566 }
567 else
568 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
569 return pszBuf;
570}
571
572
573/**
574 * Helper for rtLockValComplainAboutLock.
575 */
576DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
577 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
578 const char *pszFrameType)
579{
580 char szBuf[32];
581 switch (u32Magic)
582 {
583 case RTLOCKVALRECEXCL_MAGIC:
584#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
585 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
586 pRec->Excl.hLock, pRec->Excl.szName, pRec,
587 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
588 rtLockValComplainGetClassName(pRec->Excl.hClass),
589 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
590 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
591 pszFrameType, pszSuffix);
592#else
593 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
594 pRec->Excl.hLock, pRec->Excl.szName,
595 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
596 rtLockValComplainGetClassName(pRec->Excl.hClass),
597 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
598 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
599 pszFrameType, pszSuffix);
600#endif
601 break;
602
603 case RTLOCKVALRECSHRD_MAGIC:
604 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
605 pRec->Shared.hLock, pRec->Shared.szName, pRec,
606 rtLockValComplainGetClassName(pRec->Shared.hClass),
607 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
608 pszFrameType, pszSuffix);
609 break;
610
611 case RTLOCKVALRECSHRDOWN_MAGIC:
612 {
613 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
614 if ( VALID_PTR(pShared)
615 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
616#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
617 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
618 pShared->hLock, pShared->szName, pShared,
619 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
620 rtLockValComplainGetClassName(pShared->hClass),
621 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
622 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
623 pszSuffix, pszSuffix);
624#else
625 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
626 pShared->hLock, pShared->szName,
627 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
628 rtLockValComplainGetClassName(pShared->hClass),
629 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
630 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
631 pszFrameType, pszSuffix);
632#endif
633 else
634 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
635 pShared,
636 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
637 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
638 pszFrameType, pszSuffix);
639 break;
640 }
641
642 default:
643 AssertMsgFailed(("%#x\n", u32Magic));
644 }
645}
646
647
648/**
649 * Describes the lock.
650 *
651 * @param pszPrefix Message prefix.
652 * @param pRec The lock record we're working on.
653 * @param pszSuffix Message suffix.
654 */
655static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
656{
657#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
658# define FIX_REC(r) 1
659#else
660# define FIX_REC(r) (r)
661#endif
662 if ( VALID_PTR(pRec)
663 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
664 {
665 switch (pRec->Core.u32Magic)
666 {
667 case RTLOCKVALRECEXCL_MAGIC:
668 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
669 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
670 break;
671
672 case RTLOCKVALRECSHRD_MAGIC:
673 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
674 break;
675
676 case RTLOCKVALRECSHRDOWN_MAGIC:
677 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
678 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
679 break;
680
681 case RTLOCKVALRECNEST_MAGIC:
682 {
683 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
684 uint32_t u32Magic;
685 if ( VALID_PTR(pRealRec)
686 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
687 || u32Magic == RTLOCKVALRECSHRD_MAGIC
688 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
689 )
690 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
691 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
692 else
693 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
694 pRealRec, pRec, pRec->Nest.cRecursion,
695 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
696 pszSuffix);
697 break;
698 }
699
700 default:
701 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
702 break;
703 }
704 }
705#undef FIX_REC
706}
707
708
709/**
710 * Dump the lock stack.
711 *
712 * @param pThread The thread which lock stack we're gonna dump.
713 * @param cchIndent The indentation in chars.
714 * @param cMinFrames The minimum number of frames to consider
715 * dumping.
716 * @param pHighightRec Record that should be marked specially in the
717 * dump.
718 */
719static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
720 PRTLOCKVALRECUNION pHighightRec)
721{
722 if ( VALID_PTR(pThread)
723 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
724 && pThread->u32Magic == RTTHREADINT_MAGIC
725 )
726 {
727 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
728 if (cEntries >= cMinFrames)
729 {
730 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
731 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
732 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
733 for (uint32_t i = 0; VALID_PTR(pCur); i++)
734 {
735 char szPrefix[80];
736 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
737 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
738 switch (pCur->Core.u32Magic)
739 {
740 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
741 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
742 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
743 default:
744 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
745 pCur = NULL;
746 break;
747 }
748 }
749 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
750 }
751 }
752}
753
754
755/**
756 * Launch the initial complaint.
757 *
758 * @param pszWhat What we're complaining about.
759 * @param pSrcPos Where we are complaining from, as it were.
760 * @param pThreadSelf The calling thread.
761 * @param pRec The main lock involved. Can be NULL.
762 * @param fDumpStack Whether to dump the lock stack (true) or not
763 * (false).
764 */
765static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
766 PRTLOCKVALRECUNION pRec, bool fDumpStack)
767{
768 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
769 {
770 ASMCompilerBarrier(); /* paranoia */
771 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
772 if (pSrcPos && pSrcPos->uId)
773 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
774 else
775 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
776 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
777 if (fDumpStack)
778 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
779 }
780}
781
782
783/**
784 * Continue bitching.
785 *
786 * @param pszFormat Format string.
787 * @param ... Format arguments.
788 */
789static void rtLockValComplainMore(const char *pszFormat, ...)
790{
791 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
792 {
793 va_list va;
794 va_start(va, pszFormat);
795 RTAssertMsg2AddWeakV(pszFormat, va);
796 va_end(va);
797 }
798}
799
800
801/**
802 * Raise a panic if enabled.
803 */
804static void rtLockValComplainPanic(void)
805{
806 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
807 RTAssertPanic();
808}
809
810
811/**
812 * Copy a source position record.
813 *
814 * @param pDst The destination.
815 * @param pSrc The source. Can be NULL.
816 */
817DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
818{
819 if (pSrc)
820 {
821 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
822 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
823 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
824 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
825 }
826 else
827 {
828 ASMAtomicUoWriteU32(&pDst->uLine, 0);
829 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
830 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
831 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
832 }
833}
834
835
836/**
837 * Init a source position record.
838 *
839 * @param pSrcPos The source position record.
840 */
841DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
842{
843 pSrcPos->pszFile = NULL;
844 pSrcPos->pszFunction = NULL;
845 pSrcPos->uId = 0;
846 pSrcPos->uLine = 0;
847#if HC_ARCH_BITS == 64
848 pSrcPos->u32Padding = 0;
849#endif
850}
851
852
853/**
854 * Hashes the specified source position.
855 *
856 * @returns Hash.
857 * @param pSrcPos The source position record.
858 */
859static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
860{
861 uint32_t uHash;
862 if ( ( pSrcPos->pszFile
863 || pSrcPos->pszFunction)
864 && pSrcPos->uLine != 0)
865 {
866 uHash = 0;
867 if (pSrcPos->pszFile)
868 uHash = sdbmInc(pSrcPos->pszFile, uHash);
869 if (pSrcPos->pszFunction)
870 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
871 uHash += pSrcPos->uLine;
872 }
873 else
874 {
875 Assert(pSrcPos->uId);
876 uHash = (uint32_t)pSrcPos->uId;
877 }
878
879 return uHash;
880}
881
882
883/**
884 * Compares two source positions.
885 *
886 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
887 * otherwise.
888 * @param pSrcPos1 The first source position.
889 * @param pSrcPos2 The second source position.
890 */
891static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
892{
893 if (pSrcPos1->uLine != pSrcPos2->uLine)
894 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
895
896 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
897 if (iDiff != 0)
898 return iDiff;
899
900 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
901 if (iDiff != 0)
902 return iDiff;
903
904 if (pSrcPos1->uId != pSrcPos2->uId)
905 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
906 return 0;
907}
908
909
910
911/**
912 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
913 */
914DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
915{
916 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
917 if (hXRoads != NIL_RTSEMXROADS)
918 RTSemXRoadsNSEnter(hXRoads);
919}
920
921
922/**
923 * Call after rtLockValidatorSerializeDestructEnter.
924 */
925DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
926{
927 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
928 if (hXRoads != NIL_RTSEMXROADS)
929 RTSemXRoadsNSLeave(hXRoads);
930}
931
932
933/**
934 * Serializes deadlock detection against destruction of the objects being
935 * inspected.
936 */
937DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
938{
939 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
940 if (hXRoads != NIL_RTSEMXROADS)
941 RTSemXRoadsEWEnter(hXRoads);
942}
943
944
945/**
946 * Call after rtLockValidatorSerializeDetectionEnter.
947 */
948DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
949{
950 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
951 if (hXRoads != NIL_RTSEMXROADS)
952 RTSemXRoadsEWLeave(hXRoads);
953}
954
955
956/**
957 * Initializes the per thread lock validator data.
958 *
959 * @param pPerThread The data.
960 */
961DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
962{
963 pPerThread->bmFreeShrdOwners = UINT32_MAX;
964
965 /* ASSUMES the rest has already been zeroed. */
966 Assert(pPerThread->pRec == NULL);
967 Assert(pPerThread->cWriteLocks == 0);
968 Assert(pPerThread->cReadLocks == 0);
969 Assert(pPerThread->fInValidator == false);
970 Assert(pPerThread->pStackTop == NULL);
971}
972
973
974/**
975 * Delete the per thread lock validator data.
976 *
977 * @param pPerThread The data.
978 */
979DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
980{
981 /*
982 * Check that the thread doesn't own any locks at this time.
983 */
984 if (pPerThread->pStackTop)
985 {
986 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
987 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
988 pPerThread->pStackTop, true);
989 rtLockValComplainPanic();
990 }
991
992 /*
993 * Free the recursion records.
994 */
995 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
996 pPerThread->pFreeNestRecs = NULL;
997 while (pCur)
998 {
999 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1000 RTMemFree(pCur);
1001 pCur = pNext;
1002 }
1003}
1004
1005RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1006 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1007 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1008 const char *pszNameFmt, ...)
1009{
1010 va_list va;
1011 va_start(va, pszNameFmt);
1012 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1013 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1014 va_end(va);
1015 return rc;
1016}
1017
1018
1019RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1020 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1021 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1022 const char *pszNameFmt, va_list va)
1023{
1024 Assert(cMsMinDeadlock >= 1);
1025 Assert(cMsMinOrder >= 1);
1026 AssertPtr(pSrcPos);
1027
1028 /*
1029 * Format the name and calc its length.
1030 */
1031 size_t cbName;
1032 char szName[32];
1033 if (pszNameFmt && *pszNameFmt)
1034 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1035 else
1036 {
1037 static uint32_t volatile s_cAnonymous = 0;
1038 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1039 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1040 }
1041
1042 /*
1043 * Figure out the file and function name lengths and allocate memory for
1044 * it all.
1045 */
1046 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1047 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1048 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVarTag(sizeof(*pThis) + cbFile + cbFunction + cbName,
1049 "may-leak:RTLockValidatorClassCreateExV");
1050 if (!pThis)
1051 return VERR_NO_MEMORY;
1052 RTMEM_MAY_LEAK(pThis);
1053
1054 /*
1055 * Initialize the class data.
1056 */
1057 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1058 pThis->Core.uchHeight = 0;
1059 pThis->Core.pLeft = NULL;
1060 pThis->Core.pRight = NULL;
1061 pThis->Core.pList = NULL;
1062 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1063 pThis->cRefs = 1;
1064 pThis->fAutodidact = fAutodidact;
1065 pThis->fRecursionOk = fRecursionOk;
1066 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1067 pThis->fInTree = false;
1068 pThis->fDonateRefToNextRetainer = false;
1069 pThis->afReserved[0] = false;
1070 pThis->afReserved[1] = false;
1071 pThis->afReserved[2] = false;
1072 pThis->cMsMinDeadlock = cMsMinDeadlock;
1073 pThis->cMsMinOrder = cMsMinOrder;
1074 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1075 pThis->au32Reserved[i] = 0;
1076 for (unsigned i = 0; i < RT_ELEMENTS(pThis->PriorLocks.aRefs); i++)
1077 {
1078 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1079 pThis->PriorLocks.aRefs[i].cLookups = 0;
1080 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1081 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1082 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1083 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1084 }
1085 pThis->PriorLocks.pNext = NULL;
1086 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1087 pThis->apPriorLocksHash[i] = NULL;
1088 char *pszDst = (char *)(pThis + 1);
1089 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1090 pszDst += cbName;
1091 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1092 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1093 pszDst += cbFile;
1094 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1095 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1096#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1097 pThis->cHashHits = 0;
1098 pThis->cHashMisses = 0;
1099#endif
1100
1101 *phClass = pThis;
1102 return VINF_SUCCESS;
1103}
1104
1105
1106RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1107{
1108 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1109 va_list va;
1110 va_start(va, pszNameFmt);
1111 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1112 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1113 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1114 pszNameFmt, va);
1115 va_end(va);
1116 return rc;
1117}
1118
1119
1120/**
1121 * Creates a new lock validator class with a reference that is consumed by the
1122 * first call to RTLockValidatorClassRetain.
1123 *
1124 * This is tailored for use in the parameter list of a semaphore constructor.
1125 *
1126 * @returns Class handle with a reference that is automatically consumed by the
1127 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1128 *
1129 * @param SRC_POS The source position where call is being made from.
1130 * Use RT_SRC_POS when possible. Optional.
1131 * @param pszNameFmt Class name format string, optional (NULL). Max
1132 * length is 32 bytes.
1133 * @param ... Format string arguments.
1134 */
1135RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1136{
1137 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1138 RTLOCKVALCLASSINT *pClass;
1139 va_list va;
1140 va_start(va, pszNameFmt);
1141 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1142 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1143 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1144 pszNameFmt, va);
1145 va_end(va);
1146 if (RT_FAILURE(rc))
1147 return NIL_RTLOCKVALCLASS;
1148 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1149 return pClass;
1150}
1151
1152
1153/**
1154 * Internal class retainer.
1155 * @returns The new reference count.
1156 * @param pClass The class.
1157 */
1158DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1159{
1160 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1161 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1162 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1163 else if ( cRefs == 2
1164 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1165 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1166 return cRefs;
1167}
1168
1169
1170/**
1171 * Validates and retains a lock validator class.
1172 *
1173 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1174 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1175 */
1176DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1177{
1178 if (hClass == NIL_RTLOCKVALCLASS)
1179 return hClass;
1180 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1181 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1182 rtLockValidatorClassRetain(hClass);
1183 return hClass;
1184}
1185
1186
1187/**
1188 * Internal class releaser.
1189 * @returns The new reference count.
1190 * @param pClass The class.
1191 */
1192DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1193{
1194 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1195 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1196 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1197 else if (!cRefs)
1198 rtLockValidatorClassDestroy(pClass);
1199 return cRefs;
1200}
1201
1202
1203/**
1204 * Destroys a class once there are not more references to it.
1205 *
1206 * @param pClass The class.
1207 */
1208static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1209{
1210 AssertReturnVoid(!pClass->fInTree);
1211 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1212
1213 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1214 while (pChunk)
1215 {
1216 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1217 {
1218 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1219 if (pClass2 != NIL_RTLOCKVALCLASS)
1220 {
1221 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1222 rtLockValidatorClassRelease(pClass2);
1223 }
1224 }
1225
1226 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1227 pChunk->pNext = NULL;
1228 if (pChunk != &pClass->PriorLocks)
1229 RTMemFree(pChunk);
1230 pChunk = pNext;
1231 }
1232
1233 RTMemFree(pClass);
1234}
1235
1236
1237RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1238{
1239 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1240 rtLockValidatorLazyInit();
1241 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1242
1243 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1244 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1245 while (pClass)
1246 {
1247 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1248 break;
1249 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1250 }
1251
1252 if (RT_SUCCESS(rcLock))
1253 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1254 return pClass;
1255}
1256
1257
1258RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1259{
1260 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1261 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1262 if (hClass == NIL_RTLOCKVALCLASS)
1263 {
1264 /*
1265 * Create a new class and insert it into the tree.
1266 */
1267 va_list va;
1268 va_start(va, pszNameFmt);
1269 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1270 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1271 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1272 pszNameFmt, va);
1273 va_end(va);
1274 if (RT_SUCCESS(rc))
1275 {
1276 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1277 rtLockValidatorLazyInit();
1278 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1279
1280 Assert(!hClass->fInTree);
1281 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1282 Assert(hClass->fInTree);
1283
1284 if (RT_SUCCESS(rcLock))
1285 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1286 return hClass;
1287 }
1288 }
1289 return hClass;
1290}
1291
1292
1293RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1294{
1295 RTLOCKVALCLASSINT *pClass = hClass;
1296 AssertPtrReturn(pClass, UINT32_MAX);
1297 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1298 return rtLockValidatorClassRetain(pClass);
1299}
1300
1301
1302RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1303{
1304 RTLOCKVALCLASSINT *pClass = hClass;
1305 if (pClass == NIL_RTLOCKVALCLASS)
1306 return 0;
1307 AssertPtrReturn(pClass, UINT32_MAX);
1308 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1309 return rtLockValidatorClassRelease(pClass);
1310}
1311
1312
1313/**
1314 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1315 * all the chunks for @a pPriorClass.
1316 *
1317 * @returns true / false.
1318 * @param pClass The class to search.
1319 * @param pPriorClass The class to search for.
1320 */
1321static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1322{
1323 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1324 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1325 {
1326 if (pChunk->aRefs[i].hClass == pPriorClass)
1327 {
1328 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1329 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1330 {
1331 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1332 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1333 }
1334
1335 /* update the hash table entry. */
1336 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1337 if ( !(*ppHashEntry)
1338 || (*ppHashEntry)->cLookups + 128 < cLookups)
1339 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1340
1341#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1342 ASMAtomicIncU32(&pClass->cHashMisses);
1343#endif
1344 return true;
1345 }
1346 }
1347
1348 return false;
1349}
1350
1351
1352/**
1353 * Checks if @a pPriorClass is a known prior class.
1354 *
1355 * @returns true / false.
1356 * @param pClass The class to search.
1357 * @param pPriorClass The class to search for.
1358 */
1359DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1360{
1361 /*
1362 * Hash lookup here.
1363 */
1364 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1365 if ( pRef
1366 && pRef->hClass == pPriorClass)
1367 {
1368 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1369 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1370 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1371#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1372 ASMAtomicIncU32(&pClass->cHashHits);
1373#endif
1374 return true;
1375 }
1376
1377 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1378}
1379
1380
1381/**
1382 * Adds a class to the prior list.
1383 *
1384 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1385 * @param pClass The class to work on.
1386 * @param pPriorClass The class to add.
1387 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1388 * somebody is teaching us via the API (false).
1389 * @param pSrcPos Where this rule was added (optional).
1390 */
1391static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1392 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1393{
1394 NOREF(pSrcPos);
1395 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1396 rtLockValidatorLazyInit();
1397 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1398
1399 /*
1400 * Check that there are no conflict (no assert since we might race each other).
1401 */
1402 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1403 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1404 {
1405 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1406 {
1407 /*
1408 * Scan the table for a free entry, allocating a new chunk if necessary.
1409 */
1410 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1411 {
1412 bool fDone = false;
1413 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1414 {
1415 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1416 if (fDone)
1417 {
1418 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1419 rtLockValidatorClassRetain(pPriorClass);
1420 rc = VINF_SUCCESS;
1421 break;
1422 }
1423 }
1424 if (fDone)
1425 break;
1426
1427 /* If no more chunks, allocate a new one and insert the class before linking it. */
1428 if (!pChunk->pNext)
1429 {
1430 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1431 if (!pNew)
1432 {
1433 rc = VERR_NO_MEMORY;
1434 break;
1435 }
1436 RTMEM_MAY_LEAK(pNew);
1437 pNew->pNext = NULL;
1438 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1439 {
1440 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1441 pNew->aRefs[i].cLookups = 0;
1442 pNew->aRefs[i].fAutodidacticism = false;
1443 pNew->aRefs[i].afReserved[0] = false;
1444 pNew->aRefs[i].afReserved[1] = false;
1445 pNew->aRefs[i].afReserved[2] = false;
1446 }
1447
1448 pNew->aRefs[0].hClass = pPriorClass;
1449 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1450
1451 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1452 rtLockValidatorClassRetain(pPriorClass);
1453 rc = VINF_SUCCESS;
1454 break;
1455 }
1456 } /* chunk loop */
1457 }
1458 else
1459 rc = VINF_SUCCESS;
1460 }
1461 else
1462 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1463
1464 if (RT_SUCCESS(rcLock))
1465 RTCritSectLeave(&g_LockValClassTeachCS);
1466 return rc;
1467}
1468
1469
1470RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1471{
1472 RTLOCKVALCLASSINT *pClass = hClass;
1473 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1474 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1475
1476 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1477 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1478 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1479
1480 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1481}
1482
1483
1484RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1485{
1486 RTLOCKVALCLASSINT *pClass = hClass;
1487 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1488 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1489
1490 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1491 return VINF_SUCCESS;
1492}
1493
1494
1495/**
1496 * Unlinks all siblings.
1497 *
1498 * This is used during record deletion and assumes no races.
1499 *
1500 * @param pCore One of the siblings.
1501 */
1502static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1503{
1504 /* ASSUMES sibling destruction doesn't involve any races and that all
1505 related records are to be disposed off now. */
1506 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1507 while (pSibling)
1508 {
1509 PRTLOCKVALRECUNION volatile *ppCoreNext;
1510 switch (pSibling->Core.u32Magic)
1511 {
1512 case RTLOCKVALRECEXCL_MAGIC:
1513 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1514 ppCoreNext = &pSibling->Excl.pSibling;
1515 break;
1516
1517 case RTLOCKVALRECSHRD_MAGIC:
1518 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1519 ppCoreNext = &pSibling->Shared.pSibling;
1520 break;
1521
1522 default:
1523 AssertFailed();
1524 ppCoreNext = NULL;
1525 break;
1526 }
1527 if (RT_UNLIKELY(ppCoreNext))
1528 break;
1529 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1530 }
1531}
1532
1533
1534RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1535{
1536 /*
1537 * Validate input.
1538 */
1539 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1540 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1541
1542 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1543 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1544 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1545 , VERR_SEM_LV_INVALID_PARAMETER);
1546
1547 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1548 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1549 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1550 , VERR_SEM_LV_INVALID_PARAMETER);
1551
1552 /*
1553 * Link them (circular list).
1554 */
1555 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1556 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1557 {
1558 p1->Excl.pSibling = p2;
1559 p2->Shared.pSibling = p1;
1560 }
1561 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1562 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1563 {
1564 p1->Shared.pSibling = p2;
1565 p2->Excl.pSibling = p1;
1566 }
1567 else
1568 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1569
1570 return VINF_SUCCESS;
1571}
1572
1573
1574#if 0 /* unused */
1575/**
1576 * Gets the lock name for the given record.
1577 *
1578 * @returns Read-only lock name.
1579 * @param pRec The lock record.
1580 */
1581DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1582{
1583 switch (pRec->Core.u32Magic)
1584 {
1585 case RTLOCKVALRECEXCL_MAGIC:
1586 return pRec->Excl.szName;
1587 case RTLOCKVALRECSHRD_MAGIC:
1588 return pRec->Shared.szName;
1589 case RTLOCKVALRECSHRDOWN_MAGIC:
1590 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1591 case RTLOCKVALRECNEST_MAGIC:
1592 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1593 if (VALID_PTR(pRec))
1594 {
1595 switch (pRec->Core.u32Magic)
1596 {
1597 case RTLOCKVALRECEXCL_MAGIC:
1598 return pRec->Excl.szName;
1599 case RTLOCKVALRECSHRD_MAGIC:
1600 return pRec->Shared.szName;
1601 case RTLOCKVALRECSHRDOWN_MAGIC:
1602 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1603 default:
1604 return "unknown-nested";
1605 }
1606 }
1607 return "orphaned-nested";
1608 default:
1609 return "unknown";
1610 }
1611}
1612#endif /* unused */
1613
1614
1615#if 0 /* unused */
1616/**
1617 * Gets the class for this locking record.
1618 *
1619 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1620 * @param pRec The lock validator record.
1621 */
1622DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1623{
1624 switch (pRec->Core.u32Magic)
1625 {
1626 case RTLOCKVALRECEXCL_MAGIC:
1627 return pRec->Excl.hClass;
1628
1629 case RTLOCKVALRECSHRD_MAGIC:
1630 return pRec->Shared.hClass;
1631
1632 case RTLOCKVALRECSHRDOWN_MAGIC:
1633 {
1634 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1635 if (RT_LIKELY( VALID_PTR(pSharedRec)
1636 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1637 return pSharedRec->hClass;
1638 return NIL_RTLOCKVALCLASS;
1639 }
1640
1641 case RTLOCKVALRECNEST_MAGIC:
1642 {
1643 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1644 if (VALID_PTR(pRealRec))
1645 {
1646 switch (pRealRec->Core.u32Magic)
1647 {
1648 case RTLOCKVALRECEXCL_MAGIC:
1649 return pRealRec->Excl.hClass;
1650
1651 case RTLOCKVALRECSHRDOWN_MAGIC:
1652 {
1653 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1654 if (RT_LIKELY( VALID_PTR(pSharedRec)
1655 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1656 return pSharedRec->hClass;
1657 break;
1658 }
1659
1660 default:
1661 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1662 break;
1663 }
1664 }
1665 return NIL_RTLOCKVALCLASS;
1666 }
1667
1668 default:
1669 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1670 return NIL_RTLOCKVALCLASS;
1671 }
1672}
1673#endif /* unused */
1674
1675/**
1676 * Gets the class for this locking record and the pointer to the one below it in
1677 * the stack.
1678 *
1679 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1680 * @param pRec The lock validator record.
1681 * @param puSubClass Where to return the sub-class.
1682 * @param ppDown Where to return the pointer to the record below.
1683 */
1684DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1685rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1686{
1687 switch (pRec->Core.u32Magic)
1688 {
1689 case RTLOCKVALRECEXCL_MAGIC:
1690 *ppDown = pRec->Excl.pDown;
1691 *puSubClass = pRec->Excl.uSubClass;
1692 return pRec->Excl.hClass;
1693
1694 case RTLOCKVALRECSHRD_MAGIC:
1695 *ppDown = NULL;
1696 *puSubClass = pRec->Shared.uSubClass;
1697 return pRec->Shared.hClass;
1698
1699 case RTLOCKVALRECSHRDOWN_MAGIC:
1700 {
1701 *ppDown = pRec->ShrdOwner.pDown;
1702
1703 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1704 if (RT_LIKELY( VALID_PTR(pSharedRec)
1705 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1706 {
1707 *puSubClass = pSharedRec->uSubClass;
1708 return pSharedRec->hClass;
1709 }
1710 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1711 return NIL_RTLOCKVALCLASS;
1712 }
1713
1714 case RTLOCKVALRECNEST_MAGIC:
1715 {
1716 *ppDown = pRec->Nest.pDown;
1717
1718 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1719 if (VALID_PTR(pRealRec))
1720 {
1721 switch (pRealRec->Core.u32Magic)
1722 {
1723 case RTLOCKVALRECEXCL_MAGIC:
1724 *puSubClass = pRealRec->Excl.uSubClass;
1725 return pRealRec->Excl.hClass;
1726
1727 case RTLOCKVALRECSHRDOWN_MAGIC:
1728 {
1729 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1730 if (RT_LIKELY( VALID_PTR(pSharedRec)
1731 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1732 {
1733 *puSubClass = pSharedRec->uSubClass;
1734 return pSharedRec->hClass;
1735 }
1736 break;
1737 }
1738
1739 default:
1740 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1741 break;
1742 }
1743 }
1744 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1745 return NIL_RTLOCKVALCLASS;
1746 }
1747
1748 default:
1749 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1750 *ppDown = NULL;
1751 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1752 return NIL_RTLOCKVALCLASS;
1753 }
1754}
1755
1756
1757/**
1758 * Gets the sub-class for a lock record.
1759 *
1760 * @returns the sub-class.
1761 * @param pRec The lock validator record.
1762 */
1763DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1764{
1765 switch (pRec->Core.u32Magic)
1766 {
1767 case RTLOCKVALRECEXCL_MAGIC:
1768 return pRec->Excl.uSubClass;
1769
1770 case RTLOCKVALRECSHRD_MAGIC:
1771 return pRec->Shared.uSubClass;
1772
1773 case RTLOCKVALRECSHRDOWN_MAGIC:
1774 {
1775 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1776 if (RT_LIKELY( VALID_PTR(pSharedRec)
1777 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1778 return pSharedRec->uSubClass;
1779 return RTLOCKVAL_SUB_CLASS_NONE;
1780 }
1781
1782 case RTLOCKVALRECNEST_MAGIC:
1783 {
1784 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1785 if (VALID_PTR(pRealRec))
1786 {
1787 switch (pRealRec->Core.u32Magic)
1788 {
1789 case RTLOCKVALRECEXCL_MAGIC:
1790 return pRec->Excl.uSubClass;
1791
1792 case RTLOCKVALRECSHRDOWN_MAGIC:
1793 {
1794 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1795 if (RT_LIKELY( VALID_PTR(pSharedRec)
1796 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1797 return pSharedRec->uSubClass;
1798 break;
1799 }
1800
1801 default:
1802 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1803 break;
1804 }
1805 }
1806 return RTLOCKVAL_SUB_CLASS_NONE;
1807 }
1808
1809 default:
1810 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1811 return RTLOCKVAL_SUB_CLASS_NONE;
1812 }
1813}
1814
1815
1816
1817
1818/**
1819 * Calculates the depth of a lock stack.
1820 *
1821 * @returns Number of stack frames.
1822 * @param pThread The thread.
1823 */
1824static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1825{
1826 uint32_t cEntries = 0;
1827 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1828 while (VALID_PTR(pCur))
1829 {
1830 switch (pCur->Core.u32Magic)
1831 {
1832 case RTLOCKVALRECEXCL_MAGIC:
1833 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1834 break;
1835
1836 case RTLOCKVALRECSHRDOWN_MAGIC:
1837 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1838 break;
1839
1840 case RTLOCKVALRECNEST_MAGIC:
1841 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1842 break;
1843
1844 default:
1845 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1846 }
1847 cEntries++;
1848 }
1849 return cEntries;
1850}
1851
1852
1853#ifdef RT_STRICT
1854/**
1855 * Checks if the stack contains @a pRec.
1856 *
1857 * @returns true / false.
1858 * @param pThreadSelf The current thread.
1859 * @param pRec The lock record.
1860 */
1861static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1862{
1863 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1864 while (pCur)
1865 {
1866 AssertPtrReturn(pCur, false);
1867 if (pCur == pRec)
1868 return true;
1869 switch (pCur->Core.u32Magic)
1870 {
1871 case RTLOCKVALRECEXCL_MAGIC:
1872 Assert(pCur->Excl.cRecursion >= 1);
1873 pCur = pCur->Excl.pDown;
1874 break;
1875
1876 case RTLOCKVALRECSHRDOWN_MAGIC:
1877 Assert(pCur->ShrdOwner.cRecursion >= 1);
1878 pCur = pCur->ShrdOwner.pDown;
1879 break;
1880
1881 case RTLOCKVALRECNEST_MAGIC:
1882 Assert(pCur->Nest.cRecursion > 1);
1883 pCur = pCur->Nest.pDown;
1884 break;
1885
1886 default:
1887 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1888 }
1889 }
1890 return false;
1891}
1892#endif /* RT_STRICT */
1893
1894
1895/**
1896 * Pushes a lock record onto the stack.
1897 *
1898 * @param pThreadSelf The current thread.
1899 * @param pRec The lock record.
1900 */
1901static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1902{
1903 Assert(pThreadSelf == RTThreadSelf());
1904 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1905
1906 switch (pRec->Core.u32Magic)
1907 {
1908 case RTLOCKVALRECEXCL_MAGIC:
1909 Assert(pRec->Excl.cRecursion == 1);
1910 Assert(pRec->Excl.pDown == NULL);
1911 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1912 break;
1913
1914 case RTLOCKVALRECSHRDOWN_MAGIC:
1915 Assert(pRec->ShrdOwner.cRecursion == 1);
1916 Assert(pRec->ShrdOwner.pDown == NULL);
1917 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1918 break;
1919
1920 default:
1921 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1922 }
1923 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1924}
1925
1926
1927/**
1928 * Pops a lock record off the stack.
1929 *
1930 * @param pThreadSelf The current thread.
1931 * @param pRec The lock.
1932 */
1933static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1934{
1935 Assert(pThreadSelf == RTThreadSelf());
1936
1937 PRTLOCKVALRECUNION pDown;
1938 switch (pRec->Core.u32Magic)
1939 {
1940 case RTLOCKVALRECEXCL_MAGIC:
1941 Assert(pRec->Excl.cRecursion == 0);
1942 pDown = pRec->Excl.pDown;
1943 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1944 break;
1945
1946 case RTLOCKVALRECSHRDOWN_MAGIC:
1947 Assert(pRec->ShrdOwner.cRecursion == 0);
1948 pDown = pRec->ShrdOwner.pDown;
1949 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1950 break;
1951
1952 default:
1953 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1954 }
1955 if (pThreadSelf->LockValidator.pStackTop == pRec)
1956 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1957 else
1958 {
1959 /* Find the pointer to our record and unlink ourselves. */
1960 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1961 while (pCur)
1962 {
1963 PRTLOCKVALRECUNION volatile *ppDown;
1964 switch (pCur->Core.u32Magic)
1965 {
1966 case RTLOCKVALRECEXCL_MAGIC:
1967 Assert(pCur->Excl.cRecursion >= 1);
1968 ppDown = &pCur->Excl.pDown;
1969 break;
1970
1971 case RTLOCKVALRECSHRDOWN_MAGIC:
1972 Assert(pCur->ShrdOwner.cRecursion >= 1);
1973 ppDown = &pCur->ShrdOwner.pDown;
1974 break;
1975
1976 case RTLOCKVALRECNEST_MAGIC:
1977 Assert(pCur->Nest.cRecursion >= 1);
1978 ppDown = &pCur->Nest.pDown;
1979 break;
1980
1981 default:
1982 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1983 }
1984 pCur = *ppDown;
1985 if (pCur == pRec)
1986 {
1987 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1988 return;
1989 }
1990 }
1991 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
1992 }
1993}
1994
1995
1996/**
1997 * Creates and pushes lock recursion record onto the stack.
1998 *
1999 * @param pThreadSelf The current thread.
2000 * @param pRec The lock record.
2001 * @param pSrcPos Where the recursion occurred.
2002 */
2003static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2004{
2005 Assert(pThreadSelf == RTThreadSelf());
2006 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2007
2008#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2009 /*
2010 * Allocate a new recursion record
2011 */
2012 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2013 if (pRecursionRec)
2014 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2015 else
2016 {
2017 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2018 if (!pRecursionRec)
2019 return;
2020 }
2021
2022 /*
2023 * Initialize it.
2024 */
2025 switch (pRec->Core.u32Magic)
2026 {
2027 case RTLOCKVALRECEXCL_MAGIC:
2028 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2029 break;
2030
2031 case RTLOCKVALRECSHRDOWN_MAGIC:
2032 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2033 break;
2034
2035 default:
2036 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2037 rtLockValidatorSerializeDestructEnter();
2038 rtLockValidatorSerializeDestructLeave();
2039 RTMemFree(pRecursionRec);
2040 return;
2041 }
2042 Assert(pRecursionRec->cRecursion > 1);
2043 pRecursionRec->pRec = pRec;
2044 pRecursionRec->pDown = NULL;
2045 pRecursionRec->pNextFree = NULL;
2046 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2047 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2048
2049 /*
2050 * Link it.
2051 */
2052 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2053 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2054#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2055}
2056
2057
2058/**
2059 * Pops a lock recursion record off the stack.
2060 *
2061 * @param pThreadSelf The current thread.
2062 * @param pRec The lock record.
2063 */
2064static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2065{
2066 Assert(pThreadSelf == RTThreadSelf());
2067 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2068
2069 uint32_t cRecursion;
2070 switch (pRec->Core.u32Magic)
2071 {
2072 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2073 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2074 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2075 }
2076 Assert(cRecursion >= 1);
2077
2078#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2079 /*
2080 * Pop the recursion record.
2081 */
2082 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2083 if ( pNest != NULL
2084 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2085 && pNest->Nest.pRec == pRec
2086 )
2087 {
2088 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2089 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2090 }
2091 else
2092 {
2093 /* Find the record above ours. */
2094 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2095 for (;;)
2096 {
2097 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2098 switch (pNest->Core.u32Magic)
2099 {
2100 case RTLOCKVALRECEXCL_MAGIC:
2101 ppDown = &pNest->Excl.pDown;
2102 pNest = *ppDown;
2103 continue;
2104 case RTLOCKVALRECSHRDOWN_MAGIC:
2105 ppDown = &pNest->ShrdOwner.pDown;
2106 pNest = *ppDown;
2107 continue;
2108 case RTLOCKVALRECNEST_MAGIC:
2109 if (pNest->Nest.pRec == pRec)
2110 break;
2111 ppDown = &pNest->Nest.pDown;
2112 pNest = *ppDown;
2113 continue;
2114 default:
2115 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2116 }
2117 break; /* ugly */
2118 }
2119 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2120 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2121 }
2122
2123 /*
2124 * Invalidate and free the record.
2125 */
2126 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2127 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2128 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2129 pNest->Nest.cRecursion = 0;
2130 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2131 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2132#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2133}
2134
2135
2136/**
2137 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2138 * returns VERR_SEM_LV_WRONG_ORDER.
2139 */
2140static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2141 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2142 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2143
2144
2145{
2146 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2147 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2148 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2149 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2150 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2151 rtLockValComplainPanic();
2152 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2153}
2154
2155
2156/**
2157 * Checks if the sub-class order is ok or not.
2158 *
2159 * Used to deal with two locks from the same class.
2160 *
2161 * @returns true if ok, false if not.
2162 * @param uSubClass1 The sub-class of the lock that is being
2163 * considered.
2164 * @param uSubClass2 The sub-class of the lock that is already being
2165 * held.
2166 */
2167DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2168{
2169 if (uSubClass1 > uSubClass2)
2170 {
2171 /* NONE kills ANY. */
2172 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2173 return false;
2174 return true;
2175 }
2176
2177 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2178 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2179 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2180 return true;
2181 return false;
2182}
2183
2184
2185/**
2186 * Checks if the class and sub-class lock order is ok.
2187 *
2188 * @returns true if ok, false if not.
2189 * @param pClass1 The class of the lock that is being considered.
2190 * @param uSubClass1 The sub-class that goes with @a pClass1.
2191 * @param pClass2 The class of the lock that is already being
2192 * held.
2193 * @param uSubClass2 The sub-class that goes with @a pClass2.
2194 */
2195DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2196 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2197{
2198 if (pClass1 == pClass2)
2199 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2200 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2201}
2202
2203
2204/**
2205 * Checks the locking order, part two.
2206 *
2207 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2208 * @param pClass The lock class.
2209 * @param uSubClass The lock sub-class.
2210 * @param pThreadSelf The current thread.
2211 * @param pRec The lock record.
2212 * @param pSrcPos The source position of the locking operation.
2213 * @param pFirstBadClass The first bad class.
2214 * @param pFirstBadRec The first bad lock record.
2215 * @param pFirstBadDown The next record on the lock stack.
2216 */
2217static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2218 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2219 PCRTLOCKVALSRCPOS const pSrcPos,
2220 RTLOCKVALCLASSINT * const pFirstBadClass,
2221 PRTLOCKVALRECUNION const pFirstBadRec,
2222 PRTLOCKVALRECUNION const pFirstBadDown)
2223{
2224 /*
2225 * Something went wrong, pCur is pointing to where.
2226 */
2227 if ( pClass == pFirstBadClass
2228 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2229 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2230 pRec, pFirstBadRec, pClass, pFirstBadClass);
2231 if (!pClass->fAutodidact)
2232 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2233 pRec, pFirstBadRec, pClass, pFirstBadClass);
2234
2235 /*
2236 * This class is an autodidact, so we have to check out the rest of the stack
2237 * for direct violations.
2238 */
2239 uint32_t cNewRules = 1;
2240 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2241 while (pCur)
2242 {
2243 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2244
2245 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2246 pCur = pCur->Nest.pDown;
2247 else
2248 {
2249 PRTLOCKVALRECUNION pDown;
2250 uint32_t uPriorSubClass;
2251 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2252 if (pPriorClass != NIL_RTLOCKVALCLASS)
2253 {
2254 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2255 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2256 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2257 {
2258 if ( pClass == pPriorClass
2259 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2260 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2261 pRec, pCur, pClass, pPriorClass);
2262 cNewRules++;
2263 }
2264 }
2265 pCur = pDown;
2266 }
2267 }
2268
2269 if (cNewRules == 1)
2270 {
2271 /*
2272 * Special case the simple operation, hoping that it will be a
2273 * frequent case.
2274 */
2275 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2276 if (rc == VERR_SEM_LV_WRONG_ORDER)
2277 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2278 pRec, pFirstBadRec, pClass, pFirstBadClass);
2279 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2280 }
2281 else
2282 {
2283 /*
2284 * We may be adding more than one rule, so we have to take the lock
2285 * before starting to add the rules. This means we have to check
2286 * the state after taking it since we might be racing someone adding
2287 * a conflicting rule.
2288 */
2289 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2290 rtLockValidatorLazyInit();
2291 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2292
2293 /* Check */
2294 pCur = pFirstBadRec;
2295 while (pCur)
2296 {
2297 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2298 pCur = pCur->Nest.pDown;
2299 else
2300 {
2301 uint32_t uPriorSubClass;
2302 PRTLOCKVALRECUNION pDown;
2303 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2304 if (pPriorClass != NIL_RTLOCKVALCLASS)
2305 {
2306 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2307 {
2308 if ( pClass == pPriorClass
2309 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2310 {
2311 if (RT_SUCCESS(rcLock))
2312 RTCritSectLeave(&g_LockValClassTeachCS);
2313 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2314 pRec, pCur, pClass, pPriorClass);
2315 }
2316 }
2317 }
2318 pCur = pDown;
2319 }
2320 }
2321
2322 /* Iterate the stack yet again, adding new rules this time. */
2323 pCur = pFirstBadRec;
2324 while (pCur)
2325 {
2326 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2327 pCur = pCur->Nest.pDown;
2328 else
2329 {
2330 uint32_t uPriorSubClass;
2331 PRTLOCKVALRECUNION pDown;
2332 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2333 if (pPriorClass != NIL_RTLOCKVALCLASS)
2334 {
2335 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2336 {
2337 Assert( pClass != pPriorClass
2338 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2339 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2340 if (RT_FAILURE(rc))
2341 {
2342 Assert(rc == VERR_NO_MEMORY);
2343 break;
2344 }
2345 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2346 }
2347 }
2348 pCur = pDown;
2349 }
2350 }
2351
2352 if (RT_SUCCESS(rcLock))
2353 RTCritSectLeave(&g_LockValClassTeachCS);
2354 }
2355
2356 return VINF_SUCCESS;
2357}
2358
2359
2360
2361/**
2362 * Checks the locking order.
2363 *
2364 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2365 * @param pClass The lock class.
2366 * @param uSubClass The lock sub-class.
2367 * @param pThreadSelf The current thread.
2368 * @param pRec The lock record.
2369 * @param pSrcPos The source position of the locking operation.
2370 */
2371static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2372 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2373 PCRTLOCKVALSRCPOS pSrcPos)
2374{
2375 /*
2376 * Some internal paranoia first.
2377 */
2378 AssertPtr(pClass);
2379 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2380 AssertPtr(pThreadSelf);
2381 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2382 AssertPtr(pRec);
2383 AssertPtrNull(pSrcPos);
2384
2385 /*
2386 * Walk the stack, delegate problems to a worker routine.
2387 */
2388 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2389 if (!pCur)
2390 return VINF_SUCCESS;
2391
2392 for (;;)
2393 {
2394 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2395
2396 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2397 pCur = pCur->Nest.pDown;
2398 else
2399 {
2400 uint32_t uPriorSubClass;
2401 PRTLOCKVALRECUNION pDown;
2402 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2403 if (pPriorClass != NIL_RTLOCKVALCLASS)
2404 {
2405 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2406 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2407 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2408 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2409 pPriorClass, pCur, pDown);
2410 }
2411 pCur = pDown;
2412 }
2413 if (!pCur)
2414 return VINF_SUCCESS;
2415 }
2416}
2417
2418
2419/**
2420 * Check that the lock record is the topmost one on the stack, complain and fail
2421 * if it isn't.
2422 *
2423 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2424 * VERR_SEM_LV_INVALID_PARAMETER.
2425 * @param pThreadSelf The current thread.
2426 * @param pRec The record.
2427 */
2428static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2429{
2430 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2431 Assert(pThreadSelf == RTThreadSelf());
2432
2433 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2434 if (RT_LIKELY( pTop == pRec
2435 || ( pTop
2436 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2437 && pTop->Nest.pRec == pRec) ))
2438 return VINF_SUCCESS;
2439
2440#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2441 /* Look for a recursion record so the right frame is dumped and marked. */
2442 while (pTop)
2443 {
2444 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2445 {
2446 if (pTop->Nest.pRec == pRec)
2447 {
2448 pRec = pTop;
2449 break;
2450 }
2451 pTop = pTop->Nest.pDown;
2452 }
2453 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2454 pTop = pTop->Excl.pDown;
2455 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2456 pTop = pTop->ShrdOwner.pDown;
2457 else
2458 break;
2459 }
2460#endif
2461
2462 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2463 rtLockValComplainPanic();
2464 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2465}
2466
2467
2468/**
2469 * Checks if all owners are blocked - shared record operated in signaller mode.
2470 *
2471 * @returns true / false accordingly.
2472 * @param pRec The record.
2473 * @param pThreadSelf The current thread.
2474 */
2475DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2476{
2477 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2478 uint32_t cAllocated = pRec->cAllocated;
2479 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2480 if (cEntries == 0)
2481 return false;
2482
2483 for (uint32_t i = 0; i < cAllocated; i++)
2484 {
2485 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2486 if ( pEntry
2487 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2488 {
2489 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2490 if (!pCurThread)
2491 return false;
2492 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2493 return false;
2494 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2495 && pCurThread != pThreadSelf)
2496 return false;
2497 if (--cEntries == 0)
2498 break;
2499 }
2500 else
2501 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2502 }
2503
2504 return true;
2505}
2506
2507
2508/**
2509 * Verifies the deadlock stack before calling it a deadlock.
2510 *
2511 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2512 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2513 * @retval VERR_TRY_AGAIN if something changed.
2514 *
2515 * @param pStack The deadlock detection stack.
2516 * @param pThreadSelf The current thread.
2517 */
2518static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2519{
2520 uint32_t const c = pStack->c;
2521 for (uint32_t iPass = 0; iPass < 3; iPass++)
2522 {
2523 for (uint32_t i = 1; i < c; i++)
2524 {
2525 PRTTHREADINT pThread = pStack->a[i].pThread;
2526 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2527 return VERR_TRY_AGAIN;
2528 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2529 return VERR_TRY_AGAIN;
2530 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2531 return VERR_TRY_AGAIN;
2532 /* ASSUMES the signaller records won't have siblings! */
2533 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2534 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2535 && pRec->Shared.fSignaller
2536 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2537 return VERR_TRY_AGAIN;
2538 }
2539 RTThreadYield();
2540 }
2541
2542 if (c == 1)
2543 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2544 return VERR_SEM_LV_DEADLOCK;
2545}
2546
2547
2548/**
2549 * Checks for stack cycles caused by another deadlock before returning.
2550 *
2551 * @retval VINF_SUCCESS if the stack is simply too small.
2552 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2553 *
2554 * @param pStack The deadlock detection stack.
2555 */
2556static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2557{
2558 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2559 {
2560 PRTTHREADINT pThread = pStack->a[i].pThread;
2561 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2562 if (pStack->a[j].pThread == pThread)
2563 return VERR_SEM_LV_EXISTING_DEADLOCK;
2564 }
2565 static bool volatile s_fComplained = false;
2566 if (!s_fComplained)
2567 {
2568 s_fComplained = true;
2569 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2570 }
2571 return VINF_SUCCESS;
2572}
2573
2574
2575/**
2576 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2577 * detection.
2578 *
2579 * @retval VINF_SUCCESS
2580 * @retval VERR_SEM_LV_DEADLOCK
2581 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2582 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2583 * @retval VERR_TRY_AGAIN
2584 *
2585 * @param pStack The stack to use.
2586 * @param pOriginalRec The original record.
2587 * @param pThreadSelf The calling thread.
2588 */
2589static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2590 PRTTHREADINT const pThreadSelf)
2591{
2592 pStack->c = 0;
2593
2594 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2595 compiler may make a better job of it when using individual variables. */
2596 PRTLOCKVALRECUNION pRec = pOriginalRec;
2597 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2598 uint32_t iEntry = UINT32_MAX;
2599 PRTTHREADINT pThread = NIL_RTTHREAD;
2600 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2601 for (uint32_t iLoop = 0; ; iLoop++)
2602 {
2603 /*
2604 * Process the current record.
2605 */
2606 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2607
2608 /* Find the next relevant owner thread and record. */
2609 PRTLOCKVALRECUNION pNextRec = NULL;
2610 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2611 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2612 switch (pRec->Core.u32Magic)
2613 {
2614 case RTLOCKVALRECEXCL_MAGIC:
2615 Assert(iEntry == UINT32_MAX);
2616 for (;;)
2617 {
2618 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2619 if ( !pNextThread
2620 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2621 break;
2622 enmNextState = rtThreadGetState(pNextThread);
2623 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2624 && pNextThread != pThreadSelf)
2625 break;
2626 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2627 if (RT_LIKELY( !pNextRec
2628 || enmNextState == rtThreadGetState(pNextThread)))
2629 break;
2630 pNextRec = NULL;
2631 }
2632 if (!pNextRec)
2633 {
2634 pRec = pRec->Excl.pSibling;
2635 if ( pRec
2636 && pRec != pFirstSibling)
2637 continue;
2638 pNextThread = NIL_RTTHREAD;
2639 }
2640 break;
2641
2642 case RTLOCKVALRECSHRD_MAGIC:
2643 if (!pRec->Shared.fSignaller)
2644 {
2645 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2646 /** @todo The read side of a read-write lock is problematic if
2647 * the implementation prioritizes writers over readers because
2648 * that means we should could deadlock against current readers
2649 * if a writer showed up. If the RW sem implementation is
2650 * wrapping some native API, it's not so easy to detect when we
2651 * should do this and when we shouldn't. Checking when we
2652 * shouldn't is subject to wakeup scheduling and cannot easily
2653 * be made reliable.
2654 *
2655 * At the moment we circumvent all this mess by declaring that
2656 * readers has priority. This is TRUE on linux, but probably
2657 * isn't on Solaris and FreeBSD. */
2658 if ( pRec == pFirstSibling
2659 && pRec->Shared.pSibling != NULL
2660 && pRec->Shared.pSibling != pFirstSibling)
2661 {
2662 pRec = pRec->Shared.pSibling;
2663 Assert(iEntry == UINT32_MAX);
2664 continue;
2665 }
2666 }
2667
2668 /* Scan the owner table for blocked owners. */
2669 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2670 && ( !pRec->Shared.fSignaller
2671 || iEntry != UINT32_MAX
2672 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2673 )
2674 )
2675 {
2676 uint32_t cAllocated = pRec->Shared.cAllocated;
2677 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2678 while (++iEntry < cAllocated)
2679 {
2680 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2681 if (pEntry)
2682 {
2683 for (;;)
2684 {
2685 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2686 break;
2687 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2688 if ( !pNextThread
2689 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2690 break;
2691 enmNextState = rtThreadGetState(pNextThread);
2692 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2693 && pNextThread != pThreadSelf)
2694 break;
2695 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2696 if (RT_LIKELY( !pNextRec
2697 || enmNextState == rtThreadGetState(pNextThread)))
2698 break;
2699 pNextRec = NULL;
2700 }
2701 if (pNextRec)
2702 break;
2703 }
2704 else
2705 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2706 }
2707 if (pNextRec)
2708 break;
2709 pNextThread = NIL_RTTHREAD;
2710 }
2711
2712 /* Advance to the next sibling, if any. */
2713 pRec = pRec->Shared.pSibling;
2714 if ( pRec != NULL
2715 && pRec != pFirstSibling)
2716 {
2717 iEntry = UINT32_MAX;
2718 continue;
2719 }
2720 break;
2721
2722 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2723 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2724 break;
2725
2726 case RTLOCKVALRECSHRDOWN_MAGIC:
2727 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2728 default:
2729 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2730 break;
2731 }
2732
2733 if (pNextRec)
2734 {
2735 /*
2736 * Recurse and check for deadlock.
2737 */
2738 uint32_t i = pStack->c;
2739 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2740 return rtLockValidatorDdHandleStackOverflow(pStack);
2741
2742 pStack->c++;
2743 pStack->a[i].pRec = pRec;
2744 pStack->a[i].iEntry = iEntry;
2745 pStack->a[i].enmState = enmState;
2746 pStack->a[i].pThread = pThread;
2747 pStack->a[i].pFirstSibling = pFirstSibling;
2748
2749 if (RT_UNLIKELY( pNextThread == pThreadSelf
2750 && ( i != 0
2751 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2752 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2753 )
2754 )
2755 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2756
2757 pRec = pNextRec;
2758 pFirstSibling = pNextRec;
2759 iEntry = UINT32_MAX;
2760 enmState = enmNextState;
2761 pThread = pNextThread;
2762 }
2763 else
2764 {
2765 /*
2766 * No deadlock here, unwind the stack and deal with any unfinished
2767 * business there.
2768 */
2769 uint32_t i = pStack->c;
2770 for (;;)
2771 {
2772 /* pop */
2773 if (i == 0)
2774 return VINF_SUCCESS;
2775 i--;
2776 pRec = pStack->a[i].pRec;
2777 iEntry = pStack->a[i].iEntry;
2778
2779 /* Examine it. */
2780 uint32_t u32Magic = pRec->Core.u32Magic;
2781 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2782 pRec = pRec->Excl.pSibling;
2783 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2784 {
2785 if (iEntry + 1 < pRec->Shared.cAllocated)
2786 break; /* continue processing this record. */
2787 pRec = pRec->Shared.pSibling;
2788 }
2789 else
2790 {
2791 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2792 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2793 continue;
2794 }
2795
2796 /* Any next record to advance to? */
2797 if ( !pRec
2798 || pRec == pStack->a[i].pFirstSibling)
2799 continue;
2800 iEntry = UINT32_MAX;
2801 break;
2802 }
2803
2804 /* Restore the rest of the state and update the stack. */
2805 pFirstSibling = pStack->a[i].pFirstSibling;
2806 enmState = pStack->a[i].enmState;
2807 pThread = pStack->a[i].pThread;
2808 pStack->c = i;
2809 }
2810
2811 Assert(iLoop != 1000000);
2812 }
2813}
2814
2815
2816/**
2817 * Check for the simple no-deadlock case.
2818 *
2819 * @returns true if no deadlock, false if further investigation is required.
2820 *
2821 * @param pOriginalRec The original record.
2822 */
2823DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2824{
2825 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2826 && !pOriginalRec->Excl.pSibling)
2827 {
2828 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2829 if ( !pThread
2830 || pThread->u32Magic != RTTHREADINT_MAGIC)
2831 return true;
2832 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2833 if (!RTTHREAD_IS_SLEEPING(enmState))
2834 return true;
2835 }
2836 return false;
2837}
2838
2839
2840/**
2841 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2842 *
2843 * @param pStack The chain of locks causing the deadlock.
2844 * @param pRec The record relating to the current thread's lock
2845 * operation.
2846 * @param pThreadSelf This thread.
2847 * @param pSrcPos Where we are going to deadlock.
2848 * @param rc The return code.
2849 */
2850static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2851 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2852{
2853 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2854 {
2855 const char *pszWhat;
2856 switch (rc)
2857 {
2858 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2859 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2860 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2861 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2862 }
2863 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2864 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2865 for (uint32_t i = 0; i < pStack->c; i++)
2866 {
2867 char szPrefix[24];
2868 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2869 PRTLOCKVALRECUNION pShrdOwner = NULL;
2870 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2871 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2872 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2873 {
2874 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2875 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2876 }
2877 else
2878 {
2879 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2880 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2881 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2882 }
2883 }
2884 rtLockValComplainMore("---- end of deadlock chain ----\n");
2885 }
2886
2887 rtLockValComplainPanic();
2888}
2889
2890
2891/**
2892 * Perform deadlock detection.
2893 *
2894 * @retval VINF_SUCCESS
2895 * @retval VERR_SEM_LV_DEADLOCK
2896 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2897 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2898 *
2899 * @param pRec The record relating to the current thread's lock
2900 * operation.
2901 * @param pThreadSelf The current thread.
2902 * @param pSrcPos The position of the current lock operation.
2903 */
2904static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2905{
2906 RTLOCKVALDDSTACK Stack;
2907 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2908 if (RT_SUCCESS(rc))
2909 return VINF_SUCCESS;
2910
2911 if (rc == VERR_TRY_AGAIN)
2912 {
2913 for (uint32_t iLoop = 0; ; iLoop++)
2914 {
2915 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2916 if (RT_SUCCESS_NP(rc))
2917 return VINF_SUCCESS;
2918 if (rc != VERR_TRY_AGAIN)
2919 break;
2920 RTThreadYield();
2921 if (iLoop >= 3)
2922 return VINF_SUCCESS;
2923 }
2924 }
2925
2926 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2927 return rc;
2928}
2929
2930
2931RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2932 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2933{
2934 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2935 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2936 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2937 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2938 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2939
2940 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2941 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2942 pRec->afReserved[0] = 0;
2943 pRec->afReserved[1] = 0;
2944 pRec->afReserved[2] = 0;
2945 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2946 pRec->hThread = NIL_RTTHREAD;
2947 pRec->pDown = NULL;
2948 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2949 pRec->uSubClass = uSubClass;
2950 pRec->cRecursion = 0;
2951 pRec->hLock = hLock;
2952 pRec->pSibling = NULL;
2953 if (pszNameFmt)
2954 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2955 else
2956 {
2957 static uint32_t volatile s_cAnonymous = 0;
2958 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2959 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2960 }
2961
2962 /* Lazy initialization. */
2963 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2964 rtLockValidatorLazyInit();
2965}
2966
2967
2968RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2969 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2970{
2971 va_list va;
2972 va_start(va, pszNameFmt);
2973 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2974 va_end(va);
2975}
2976
2977
2978RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2979 uint32_t uSubClass, void *pvLock, bool fEnabled,
2980 const char *pszNameFmt, va_list va)
2981{
2982 PRTLOCKVALRECEXCL pRec;
2983 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2984 if (!pRec)
2985 return VERR_NO_MEMORY;
2986 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2987 return VINF_SUCCESS;
2988}
2989
2990
2991RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2992 uint32_t uSubClass, void *pvLock, bool fEnabled,
2993 const char *pszNameFmt, ...)
2994{
2995 va_list va;
2996 va_start(va, pszNameFmt);
2997 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2998 va_end(va);
2999 return rc;
3000}
3001
3002
3003RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
3004{
3005 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3006
3007 rtLockValidatorSerializeDestructEnter();
3008
3009 /** @todo Check that it's not on our stack first. Need to make it
3010 * configurable whether deleting a owned lock is acceptable? */
3011
3012 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3013 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3014 RTLOCKVALCLASS hClass;
3015 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3016 if (pRec->pSibling)
3017 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3018 rtLockValidatorSerializeDestructLeave();
3019 if (hClass != NIL_RTLOCKVALCLASS)
3020 RTLockValidatorClassRelease(hClass);
3021}
3022
3023
3024RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3025{
3026 PRTLOCKVALRECEXCL pRec = *ppRec;
3027 *ppRec = NULL;
3028 if (pRec)
3029 {
3030 RTLockValidatorRecExclDelete(pRec);
3031 RTMemFree(pRec);
3032 }
3033}
3034
3035
3036RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3037{
3038 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3039 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3040 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3041 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3042 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3043 RTLOCKVAL_SUB_CLASS_INVALID);
3044 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3045}
3046
3047
3048RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3049 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3050{
3051 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3052 if (!pRecU)
3053 return;
3054 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3055 if (!pRecU->Excl.fEnabled)
3056 return;
3057 if (hThreadSelf == NIL_RTTHREAD)
3058 {
3059 hThreadSelf = RTThreadSelfAutoAdopt();
3060 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3061 }
3062 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3063 Assert(hThreadSelf == RTThreadSelf());
3064
3065 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3066
3067 if (pRecU->Excl.hThread == hThreadSelf)
3068 {
3069 Assert(!fFirstRecursion); RT_NOREF_PV(fFirstRecursion);
3070 pRecU->Excl.cRecursion++;
3071 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3072 }
3073 else
3074 {
3075 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3076
3077 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3078 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3079 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3080
3081 rtLockValidatorStackPush(hThreadSelf, pRecU);
3082 }
3083}
3084
3085
3086/**
3087 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3088 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3089 */
3090static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3091{
3092 RTTHREADINT *pThread = pRec->Excl.hThread;
3093 AssertReturnVoid(pThread != NIL_RTTHREAD);
3094 Assert(pThread == RTThreadSelf());
3095
3096 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3097 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3098 if (c == 0)
3099 {
3100 rtLockValidatorStackPop(pThread, pRec);
3101 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3102 }
3103 else
3104 {
3105 Assert(c < UINT32_C(0xffff0000));
3106 Assert(!fFinalRecursion); RT_NOREF_PV(fFinalRecursion);
3107 rtLockValidatorStackPopRecursion(pThread, pRec);
3108 }
3109}
3110
3111RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3112{
3113 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3114 if (!pRecU)
3115 return VINF_SUCCESS;
3116 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3117 if (!pRecU->Excl.fEnabled)
3118 return VINF_SUCCESS;
3119
3120 /*
3121 * Check the release order.
3122 */
3123 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3124 && pRecU->Excl.hClass->fStrictReleaseOrder
3125 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3126 )
3127 {
3128 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3129 if (RT_FAILURE(rc))
3130 return rc;
3131 }
3132
3133 /*
3134 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3135 */
3136 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3137 return VINF_SUCCESS;
3138}
3139
3140
3141RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3142{
3143 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3144 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3145 if (pRecU->Excl.fEnabled)
3146 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3147}
3148
3149
3150RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3151{
3152 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3153 if (!pRecU)
3154 return VINF_SUCCESS;
3155 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3156 if (!pRecU->Excl.fEnabled)
3157 return VINF_SUCCESS;
3158 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3159 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3160
3161 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3162 && !pRecU->Excl.hClass->fRecursionOk)
3163 {
3164 rtLockValComplainFirst("Recursion not allowed by the class!",
3165 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3166 rtLockValComplainPanic();
3167 return VERR_SEM_LV_NESTED;
3168 }
3169
3170 Assert(pRecU->Excl.cRecursion < _1M);
3171 pRecU->Excl.cRecursion++;
3172 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3173 return VINF_SUCCESS;
3174}
3175
3176
3177RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3178{
3179 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3180 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3181 if (!pRecU->Excl.fEnabled)
3182 return VINF_SUCCESS;
3183 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3184 Assert(pRecU->Excl.hThread == RTThreadSelf());
3185 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3186
3187 /*
3188 * Check the release order.
3189 */
3190 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3191 && pRecU->Excl.hClass->fStrictReleaseOrder
3192 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3193 )
3194 {
3195 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3196 if (RT_FAILURE(rc))
3197 return rc;
3198 }
3199
3200 /*
3201 * Perform the unwind.
3202 */
3203 pRecU->Excl.cRecursion--;
3204 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3205 return VINF_SUCCESS;
3206}
3207
3208
3209RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3210{
3211 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3212 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3213 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3214 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3215 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3216 , VERR_SEM_LV_INVALID_PARAMETER);
3217 if (!pRecU->Excl.fEnabled)
3218 return VINF_SUCCESS;
3219 Assert(pRecU->Excl.hThread == RTThreadSelf());
3220 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3221 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3222
3223 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3224 && !pRecU->Excl.hClass->fRecursionOk)
3225 {
3226 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3227 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3228 rtLockValComplainPanic();
3229 return VERR_SEM_LV_NESTED;
3230 }
3231
3232 Assert(pRecU->Excl.cRecursion < _1M);
3233 pRecU->Excl.cRecursion++;
3234 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3235
3236 return VINF_SUCCESS;
3237}
3238
3239
3240RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3241{
3242 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3243 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3244 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3245 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3246 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3247 , VERR_SEM_LV_INVALID_PARAMETER);
3248 if (!pRecU->Excl.fEnabled)
3249 return VINF_SUCCESS;
3250 Assert(pRecU->Excl.hThread == RTThreadSelf());
3251 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3252 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3253
3254 /*
3255 * Check the release order.
3256 */
3257 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3258 && pRecU->Excl.hClass->fStrictReleaseOrder
3259 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3260 )
3261 {
3262 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3263 if (RT_FAILURE(rc))
3264 return rc;
3265 }
3266
3267 /*
3268 * Perform the unwind.
3269 */
3270 pRecU->Excl.cRecursion--;
3271 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3272 return VINF_SUCCESS;
3273}
3274
3275
3276RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3277 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3278{
3279 /*
3280 * Validate and adjust input. Quit early if order validation is disabled.
3281 */
3282 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3283 if (!pRecU)
3284 return VINF_SUCCESS;
3285 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3286 if ( !pRecU->Excl.fEnabled
3287 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3288 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3289 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3290 return VINF_SUCCESS;
3291
3292 if (hThreadSelf == NIL_RTTHREAD)
3293 {
3294 hThreadSelf = RTThreadSelfAutoAdopt();
3295 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3296 }
3297 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3298 Assert(hThreadSelf == RTThreadSelf());
3299
3300 /*
3301 * Detect recursion as it isn't subject to order restrictions.
3302 */
3303 if (pRec->hThread == hThreadSelf)
3304 return VINF_SUCCESS;
3305
3306 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3307}
3308
3309
3310RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3311 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3312 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3313{
3314 /*
3315 * Fend off wild life.
3316 */
3317 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3318 if (!pRecU)
3319 return VINF_SUCCESS;
3320 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3321 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3322 if (!pRec->fEnabled)
3323 return VINF_SUCCESS;
3324
3325 PRTTHREADINT pThreadSelf = hThreadSelf;
3326 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3327 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3328 Assert(pThreadSelf == RTThreadSelf());
3329
3330 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3331
3332 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3333 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3334 {
3335 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3336 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3337 , VERR_SEM_LV_INVALID_PARAMETER);
3338 enmSleepState = enmThreadState;
3339 }
3340
3341 /*
3342 * Record the location.
3343 */
3344 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3345 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3346 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3347 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3348 rtThreadSetState(pThreadSelf, enmSleepState);
3349
3350 /*
3351 * Don't do deadlock detection if we're recursing.
3352 *
3353 * On some hosts we don't do recursion accounting our selves and there
3354 * isn't any other place to check for this.
3355 */
3356 int rc = VINF_SUCCESS;
3357 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3358 {
3359 if ( !fRecursiveOk
3360 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3361 && !pRecU->Excl.hClass->fRecursionOk))
3362 {
3363 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3364 rtLockValComplainPanic();
3365 rc = VERR_SEM_LV_NESTED;
3366 }
3367 }
3368 /*
3369 * Perform deadlock detection.
3370 */
3371 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3372 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3373 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3374 rc = VINF_SUCCESS;
3375 else
3376 {
3377 rtLockValidatorSerializeDetectionEnter();
3378 if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3379 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3380 rtLockValidatorSerializeDetectionLeave();
3381 }
3382
3383 if (RT_SUCCESS(rc))
3384 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3385 else
3386 {
3387 rtThreadSetState(pThreadSelf, enmThreadState);
3388 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3389 }
3390 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3391 return rc;
3392}
3393RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3394
3395
3396RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3397 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3398 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3399{
3400 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3401 if (RT_SUCCESS(rc))
3402 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3403 enmSleepState, fReallySleeping);
3404 return rc;
3405}
3406RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3407
3408
3409RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3410 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3411{
3412 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3413 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3414 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3415 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3416 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3417
3418 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3419 pRec->uSubClass = uSubClass;
3420 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3421 pRec->hLock = hLock;
3422 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3423 pRec->fSignaller = fSignaller;
3424 pRec->pSibling = NULL;
3425
3426 /* the table */
3427 pRec->cEntries = 0;
3428 pRec->iLastEntry = 0;
3429 pRec->cAllocated = 0;
3430 pRec->fReallocating = false;
3431 pRec->fPadding = false;
3432 pRec->papOwners = NULL;
3433
3434 /* the name */
3435 if (pszNameFmt)
3436 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3437 else
3438 {
3439 static uint32_t volatile s_cAnonymous = 0;
3440 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3441 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3442 }
3443}
3444
3445
3446RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3447 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3448{
3449 va_list va;
3450 va_start(va, pszNameFmt);
3451 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3452 va_end(va);
3453}
3454
3455
3456RTDECL(int) RTLockValidatorRecSharedCreateV(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3457 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3458 const char *pszNameFmt, va_list va)
3459{
3460 PRTLOCKVALRECSHRD pRec;
3461 *ppRec = pRec = (PRTLOCKVALRECSHRD)RTMemAlloc(sizeof(*pRec));
3462 if (!pRec)
3463 return VERR_NO_MEMORY;
3464 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3465 return VINF_SUCCESS;
3466}
3467
3468
3469RTDECL(int) RTLockValidatorRecSharedCreate(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3470 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3471 const char *pszNameFmt, ...)
3472{
3473 va_list va;
3474 va_start(va, pszNameFmt);
3475 int rc = RTLockValidatorRecSharedCreateV(ppRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3476 va_end(va);
3477 return rc;
3478}
3479
3480
3481RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3482{
3483 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3484
3485 /** @todo Check that it's not on our stack first. Need to make it
3486 * configurable whether deleting a owned lock is acceptable? */
3487
3488 /*
3489 * Flip it into table realloc mode and take the destruction lock.
3490 */
3491 rtLockValidatorSerializeDestructEnter();
3492 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3493 {
3494 rtLockValidatorSerializeDestructLeave();
3495
3496 rtLockValidatorSerializeDetectionEnter();
3497 rtLockValidatorSerializeDetectionLeave();
3498
3499 rtLockValidatorSerializeDestructEnter();
3500 }
3501
3502 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3503 RTLOCKVALCLASS hClass;
3504 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3505 if (pRec->papOwners)
3506 {
3507 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3508 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3509 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3510
3511 RTMemFree((void *)papOwners);
3512 }
3513 if (pRec->pSibling)
3514 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3515 ASMAtomicWriteBool(&pRec->fReallocating, false);
3516
3517 rtLockValidatorSerializeDestructLeave();
3518
3519 if (hClass != NIL_RTLOCKVALCLASS)
3520 RTLockValidatorClassRelease(hClass);
3521}
3522
3523
3524RTDECL(void) RTLockValidatorRecSharedDestroy(PRTLOCKVALRECSHRD *ppRec)
3525{
3526 PRTLOCKVALRECSHRD pRec = *ppRec;
3527 *ppRec = NULL;
3528 if (pRec)
3529 {
3530 RTLockValidatorRecSharedDelete(pRec);
3531 RTMemFree(pRec);
3532 }
3533}
3534
3535
3536RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3537{
3538 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3539 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3540 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3541 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3542 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3543 RTLOCKVAL_SUB_CLASS_INVALID);
3544 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3545}
3546
3547
3548/**
3549 * Locates an owner (thread) in a shared lock record.
3550 *
3551 * @returns Pointer to the owner entry on success, NULL on failure..
3552 * @param pShared The shared lock record.
3553 * @param hThread The thread (owner) to find.
3554 * @param piEntry Where to optionally return the table in index.
3555 * Optional.
3556 */
3557DECLINLINE(PRTLOCKVALRECUNION)
3558rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3559{
3560 rtLockValidatorSerializeDetectionEnter();
3561
3562 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3563 if (papOwners)
3564 {
3565 uint32_t const cMax = pShared->cAllocated;
3566 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3567 {
3568 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3569 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3570 {
3571 rtLockValidatorSerializeDetectionLeave();
3572 if (piEntry)
3573 *piEntry = iEntry;
3574 return pEntry;
3575 }
3576 }
3577 }
3578
3579 rtLockValidatorSerializeDetectionLeave();
3580 return NULL;
3581}
3582
3583
3584RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3585 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3586{
3587 /*
3588 * Validate and adjust input. Quit early if order validation is disabled.
3589 */
3590 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3591 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3592 if ( !pRecU->Shared.fEnabled
3593 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3594 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3595 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3596 )
3597 return VINF_SUCCESS;
3598
3599 if (hThreadSelf == NIL_RTTHREAD)
3600 {
3601 hThreadSelf = RTThreadSelfAutoAdopt();
3602 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3603 }
3604 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3605 Assert(hThreadSelf == RTThreadSelf());
3606
3607 /*
3608 * Detect recursion as it isn't subject to order restrictions.
3609 */
3610 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3611 if (pEntry)
3612 return VINF_SUCCESS;
3613
3614 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3615}
3616
3617
3618RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3619 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3620 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3621{
3622 /*
3623 * Fend off wild life.
3624 */
3625 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3626 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3627 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3628 if (!pRecU->Shared.fEnabled)
3629 return VINF_SUCCESS;
3630
3631 PRTTHREADINT pThreadSelf = hThreadSelf;
3632 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3633 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3634 Assert(pThreadSelf == RTThreadSelf());
3635
3636 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3637
3638 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3639 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3640 {
3641 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3642 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3643 , VERR_SEM_LV_INVALID_PARAMETER);
3644 enmSleepState = enmThreadState;
3645 }
3646
3647 /*
3648 * Record the location.
3649 */
3650 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3651 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3652 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3653 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3654 rtThreadSetState(pThreadSelf, enmSleepState);
3655
3656 /*
3657 * Don't do deadlock detection if we're recursing.
3658 */
3659 int rc = VINF_SUCCESS;
3660 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3661 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3662 : NULL;
3663 if (pEntry)
3664 {
3665 if ( !fRecursiveOk
3666 || ( pRec->hClass
3667 && !pRec->hClass->fRecursionOk)
3668 )
3669 {
3670 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3671 rtLockValComplainPanic();
3672 rc = VERR_SEM_LV_NESTED;
3673 }
3674 }
3675 /*
3676 * Perform deadlock detection.
3677 */
3678 else if ( pRec->hClass
3679 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3680 || pRec->hClass->cMsMinDeadlock > cMillies))
3681 rc = VINF_SUCCESS;
3682 else
3683 {
3684 rtLockValidatorSerializeDetectionEnter();
3685 if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3686 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3687 rtLockValidatorSerializeDetectionLeave();
3688 }
3689
3690 if (RT_SUCCESS(rc))
3691 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3692 else
3693 {
3694 rtThreadSetState(pThreadSelf, enmThreadState);
3695 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3696 }
3697 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3698 return rc;
3699}
3700RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3701
3702
3703RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3704 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3705 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3706{
3707 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3708 if (RT_SUCCESS(rc))
3709 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3710 enmSleepState, fReallySleeping);
3711 return rc;
3712}
3713RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3714
3715
3716/**
3717 * Allocates and initializes an owner entry for the shared lock record.
3718 *
3719 * @returns The new owner entry.
3720 * @param pRec The shared lock record.
3721 * @param pThreadSelf The calling thread and owner. Used for record
3722 * initialization and allocation.
3723 * @param pSrcPos The source position.
3724 */
3725DECLINLINE(PRTLOCKVALRECUNION)
3726rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3727{
3728 PRTLOCKVALRECUNION pEntry;
3729
3730 /*
3731 * Check if the thread has any statically allocated records we can easily
3732 * make use of.
3733 */
3734 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3735 if ( iEntry > 0
3736 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3737 {
3738 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3739 Assert(!pEntry->ShrdOwner.fReserved);
3740 pEntry->ShrdOwner.fStaticAlloc = true;
3741 rtThreadGet(pThreadSelf);
3742 }
3743 else
3744 {
3745 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3746 if (RT_UNLIKELY(!pEntry))
3747 return NULL;
3748 pEntry->ShrdOwner.fStaticAlloc = false;
3749 }
3750
3751 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3752 pEntry->ShrdOwner.cRecursion = 1;
3753 pEntry->ShrdOwner.fReserved = true;
3754 pEntry->ShrdOwner.hThread = pThreadSelf;
3755 pEntry->ShrdOwner.pDown = NULL;
3756 pEntry->ShrdOwner.pSharedRec = pRec;
3757#if HC_ARCH_BITS == 32
3758 pEntry->ShrdOwner.pvReserved = NULL;
3759#endif
3760 if (pSrcPos)
3761 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3762 else
3763 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3764 return pEntry;
3765}
3766
3767
3768/**
3769 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3770 *
3771 * @param pEntry The owner entry.
3772 */
3773DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3774{
3775 if (pEntry)
3776 {
3777 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3778 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3779
3780 PRTTHREADINT pThread;
3781 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3782
3783 Assert(pEntry->fReserved);
3784 pEntry->fReserved = false;
3785
3786 if (pEntry->fStaticAlloc)
3787 {
3788 AssertPtrReturnVoid(pThread);
3789 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3790
3791 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3792 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3793
3794 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3795 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3796
3797 rtThreadRelease(pThread);
3798 }
3799 else
3800 {
3801 rtLockValidatorSerializeDestructEnter();
3802 rtLockValidatorSerializeDestructLeave();
3803
3804 RTMemFree(pEntry);
3805 }
3806 }
3807}
3808
3809
3810/**
3811 * Make more room in the table.
3812 *
3813 * @retval true on success
3814 * @retval false if we're out of memory or running into a bad race condition
3815 * (probably a bug somewhere). No longer holding the lock.
3816 *
3817 * @param pShared The shared lock record.
3818 */
3819static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3820{
3821 for (unsigned i = 0; i < 1000; i++)
3822 {
3823 /*
3824 * Switch to the other data access direction.
3825 */
3826 rtLockValidatorSerializeDetectionLeave();
3827 if (i >= 10)
3828 {
3829 Assert(i != 10 && i != 100);
3830 RTThreadSleep(i >= 100);
3831 }
3832 rtLockValidatorSerializeDestructEnter();
3833
3834 /*
3835 * Try grab the privilege to reallocating the table.
3836 */
3837 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3838 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3839 {
3840 uint32_t cAllocated = pShared->cAllocated;
3841 if (cAllocated < pShared->cEntries)
3842 {
3843 /*
3844 * Ok, still not enough space. Reallocate the table.
3845 */
3846#if 0 /** @todo enable this after making sure growing works flawlessly. */
3847 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3848#else
3849 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3850#endif
3851 PRTLOCKVALRECSHRDOWN *papOwners;
3852 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3853 (cAllocated + cInc) * sizeof(void *));
3854 if (!papOwners)
3855 {
3856 ASMAtomicWriteBool(&pShared->fReallocating, false);
3857 rtLockValidatorSerializeDestructLeave();
3858 /* RTMemRealloc will assert */
3859 return false;
3860 }
3861
3862 while (cInc-- > 0)
3863 {
3864 papOwners[cAllocated] = NULL;
3865 cAllocated++;
3866 }
3867
3868 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3869 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3870 }
3871 ASMAtomicWriteBool(&pShared->fReallocating, false);
3872 }
3873 rtLockValidatorSerializeDestructLeave();
3874
3875 rtLockValidatorSerializeDetectionEnter();
3876 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3877 break;
3878
3879 if (pShared->cAllocated >= pShared->cEntries)
3880 return true;
3881 }
3882
3883 rtLockValidatorSerializeDetectionLeave();
3884 AssertFailed(); /* too many iterations or destroyed while racing. */
3885 return false;
3886}
3887
3888
3889/**
3890 * Adds an owner entry to a shared lock record.
3891 *
3892 * @returns true on success, false on serious race or we're if out of memory.
3893 * @param pShared The shared lock record.
3894 * @param pEntry The owner entry.
3895 */
3896DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3897{
3898 rtLockValidatorSerializeDetectionEnter();
3899 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3900 {
3901 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3902 && !rtLockValidatorRecSharedMakeRoom(pShared))
3903 return false; /* the worker leave the lock */
3904
3905 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3906 uint32_t const cMax = pShared->cAllocated;
3907 for (unsigned i = 0; i < 100; i++)
3908 {
3909 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3910 {
3911 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3912 {
3913 rtLockValidatorSerializeDetectionLeave();
3914 return true;
3915 }
3916 }
3917 Assert(i != 25);
3918 }
3919 AssertFailed();
3920 }
3921 rtLockValidatorSerializeDetectionLeave();
3922 return false;
3923}
3924
3925
3926/**
3927 * Remove an owner entry from a shared lock record and free it.
3928 *
3929 * @param pShared The shared lock record.
3930 * @param pEntry The owner entry to remove.
3931 * @param iEntry The last known index.
3932 */
3933DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3934 uint32_t iEntry)
3935{
3936 /*
3937 * Remove it from the table.
3938 */
3939 rtLockValidatorSerializeDetectionEnter();
3940 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3941 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3942 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3943 {
3944 /* this shouldn't happen yet... */
3945 AssertFailed();
3946 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3947 uint32_t const cMax = pShared->cAllocated;
3948 for (iEntry = 0; iEntry < cMax; iEntry++)
3949 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3950 break;
3951 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3952 }
3953 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3954 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3955 rtLockValidatorSerializeDetectionLeave();
3956
3957 /*
3958 * Successfully removed, now free it.
3959 */
3960 rtLockValidatorRecSharedFreeOwner(pEntry);
3961}
3962
3963
3964RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3965{
3966 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3967 if (!pRec->fEnabled)
3968 return;
3969 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3970 AssertReturnVoid(pRec->fSignaller);
3971
3972 /*
3973 * Free all current owners.
3974 */
3975 rtLockValidatorSerializeDetectionEnter();
3976 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3977 {
3978 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3979 uint32_t iEntry = 0;
3980 uint32_t cEntries = pRec->cAllocated;
3981 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3982 while (iEntry < cEntries)
3983 {
3984 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3985 if (pEntry)
3986 {
3987 ASMAtomicDecU32(&pRec->cEntries);
3988 rtLockValidatorSerializeDetectionLeave();
3989
3990 rtLockValidatorRecSharedFreeOwner(pEntry);
3991
3992 rtLockValidatorSerializeDetectionEnter();
3993 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3994 break;
3995 cEntries = pRec->cAllocated;
3996 papEntries = pRec->papOwners;
3997 }
3998 iEntry++;
3999 }
4000 }
4001 rtLockValidatorSerializeDetectionLeave();
4002
4003 if (hThread != NIL_RTTHREAD)
4004 {
4005 /*
4006 * Allocate a new owner entry and insert it into the table.
4007 */
4008 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4009 if ( pEntry
4010 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4011 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4012 }
4013}
4014RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
4015
4016
4017RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
4018{
4019 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4020 if (!pRec->fEnabled)
4021 return;
4022 if (hThread == NIL_RTTHREAD)
4023 {
4024 hThread = RTThreadSelfAutoAdopt();
4025 AssertReturnVoid(hThread != NIL_RTTHREAD);
4026 }
4027 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4028
4029 /*
4030 * Recursive?
4031 *
4032 * Note! This code can be optimized to try avoid scanning the table on
4033 * insert. However, that's annoying work that makes the code big,
4034 * so it can wait til later sometime.
4035 */
4036 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4037 if (pEntry)
4038 {
4039 Assert(!pRec->fSignaller);
4040 pEntry->ShrdOwner.cRecursion++;
4041 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
4042 return;
4043 }
4044
4045 /*
4046 * Allocate a new owner entry and insert it into the table.
4047 */
4048 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4049 if (pEntry)
4050 {
4051 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4052 {
4053 if (!pRec->fSignaller)
4054 rtLockValidatorStackPush(hThread, pEntry);
4055 }
4056 else
4057 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4058 }
4059}
4060RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4061
4062
4063RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4064{
4065 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4066 if (!pRec->fEnabled)
4067 return;
4068 if (hThread == NIL_RTTHREAD)
4069 {
4070 hThread = RTThreadSelfAutoAdopt();
4071 AssertReturnVoid(hThread != NIL_RTTHREAD);
4072 }
4073 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4074
4075 /*
4076 * Find the entry hope it's a recursive one.
4077 */
4078 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4079 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4080 AssertReturnVoid(pEntry);
4081 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4082
4083 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4084 if (c == 0)
4085 {
4086 if (!pRec->fSignaller)
4087 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4088 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4089 }
4090 else
4091 {
4092 Assert(!pRec->fSignaller);
4093 rtLockValidatorStackPopRecursion(hThread, pEntry);
4094 }
4095}
4096RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4097
4098
4099RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4100{
4101 /* Validate and resolve input. */
4102 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4103 if (!pRec->fEnabled)
4104 return false;
4105 if (hThread == NIL_RTTHREAD)
4106 {
4107 hThread = RTThreadSelfAutoAdopt();
4108 AssertReturn(hThread != NIL_RTTHREAD, false);
4109 }
4110 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4111
4112 /* Do the job. */
4113 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4114 return pEntry != NULL;
4115}
4116RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4117
4118
4119RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4120{
4121 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4122 if (!pRec->fEnabled)
4123 return VINF_SUCCESS;
4124 if (hThreadSelf == NIL_RTTHREAD)
4125 {
4126 hThreadSelf = RTThreadSelfAutoAdopt();
4127 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4128 }
4129 Assert(hThreadSelf == RTThreadSelf());
4130 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4131
4132 /*
4133 * Locate the entry for this thread in the table.
4134 */
4135 uint32_t iEntry = 0;
4136 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4137 if (RT_UNLIKELY(!pEntry))
4138 {
4139 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4140 rtLockValComplainPanic();
4141 return VERR_SEM_LV_NOT_OWNER;
4142 }
4143
4144 /*
4145 * Check the release order.
4146 */
4147 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4148 && pRec->hClass->fStrictReleaseOrder
4149 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4150 )
4151 {
4152 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4153 if (RT_FAILURE(rc))
4154 return rc;
4155 }
4156
4157 /*
4158 * Release the ownership or unwind a level of recursion.
4159 */
4160 Assert(pEntry->ShrdOwner.cRecursion > 0);
4161 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4162 if (c == 0)
4163 {
4164 rtLockValidatorStackPop(hThreadSelf, pEntry);
4165 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4166 }
4167 else
4168 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4169
4170 return VINF_SUCCESS;
4171}
4172
4173
4174RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4175{
4176 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4177 if (!pRec->fEnabled)
4178 return VINF_SUCCESS;
4179 if (hThreadSelf == NIL_RTTHREAD)
4180 {
4181 hThreadSelf = RTThreadSelfAutoAdopt();
4182 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4183 }
4184 Assert(hThreadSelf == RTThreadSelf());
4185 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4186
4187 /*
4188 * Locate the entry for this thread in the table.
4189 */
4190 uint32_t iEntry = 0;
4191 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4192 if (RT_UNLIKELY(!pEntry))
4193 {
4194 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4195 rtLockValComplainPanic();
4196 return VERR_SEM_LV_NOT_SIGNALLER;
4197 }
4198 return VINF_SUCCESS;
4199}
4200
4201
4202RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4203{
4204 if (Thread == NIL_RTTHREAD)
4205 return 0;
4206
4207 PRTTHREADINT pThread = rtThreadGet(Thread);
4208 if (!pThread)
4209 return VERR_INVALID_HANDLE;
4210 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4211 rtThreadRelease(pThread);
4212 return cWriteLocks;
4213}
4214RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4215
4216
4217RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4218{
4219 PRTTHREADINT pThread = rtThreadGet(Thread);
4220 AssertReturnVoid(pThread);
4221 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4222 rtThreadRelease(pThread);
4223}
4224RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4225
4226
4227RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4228{
4229 PRTTHREADINT pThread = rtThreadGet(Thread);
4230 AssertReturnVoid(pThread);
4231 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4232 rtThreadRelease(pThread);
4233}
4234RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4235
4236
4237RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4238{
4239 if (Thread == NIL_RTTHREAD)
4240 return 0;
4241
4242 PRTTHREADINT pThread = rtThreadGet(Thread);
4243 if (!pThread)
4244 return VERR_INVALID_HANDLE;
4245 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4246 rtThreadRelease(pThread);
4247 return cReadLocks;
4248}
4249RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4250
4251
4252RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4253{
4254 PRTTHREADINT pThread = rtThreadGet(Thread);
4255 Assert(pThread);
4256 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4257 rtThreadRelease(pThread);
4258}
4259RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4260
4261
4262RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4263{
4264 PRTTHREADINT pThread = rtThreadGet(Thread);
4265 Assert(pThread);
4266 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4267 rtThreadRelease(pThread);
4268}
4269RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4270
4271
4272RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4273{
4274 void *pvLock = NULL;
4275 PRTTHREADINT pThread = rtThreadGet(hThread);
4276 if (pThread)
4277 {
4278 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4279 if (RTTHREAD_IS_SLEEPING(enmState))
4280 {
4281 rtLockValidatorSerializeDetectionEnter();
4282
4283 enmState = rtThreadGetState(pThread);
4284 if (RTTHREAD_IS_SLEEPING(enmState))
4285 {
4286 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4287 if (pRec)
4288 {
4289 switch (pRec->Core.u32Magic)
4290 {
4291 case RTLOCKVALRECEXCL_MAGIC:
4292 pvLock = pRec->Excl.hLock;
4293 break;
4294
4295 case RTLOCKVALRECSHRDOWN_MAGIC:
4296 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4297 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4298 break;
4299 RT_FALL_THRU();
4300 case RTLOCKVALRECSHRD_MAGIC:
4301 pvLock = pRec->Shared.hLock;
4302 break;
4303 }
4304 if (RTThreadGetState(pThread) != enmState)
4305 pvLock = NULL;
4306 }
4307 }
4308
4309 rtLockValidatorSerializeDetectionLeave();
4310 }
4311 rtThreadRelease(pThread);
4312 }
4313 return pvLock;
4314}
4315RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4316
4317
4318RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4319{
4320 bool fRet = false;
4321 PRTTHREADINT pThread = rtThreadGet(hThread);
4322 if (pThread)
4323 {
4324 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4325 rtThreadRelease(pThread);
4326 }
4327 return fRet;
4328}
4329RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4330
4331
4332RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4333{
4334 bool fRet = false;
4335 if (hCurrentThread == NIL_RTTHREAD)
4336 hCurrentThread = RTThreadSelf();
4337 else
4338 Assert(hCurrentThread == RTThreadSelf());
4339 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4340 if (pThread)
4341 {
4342 if (hClass != NIL_RTLOCKVALCLASS)
4343 {
4344 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4345 while (VALID_PTR(pCur) && !fRet)
4346 {
4347 switch (pCur->Core.u32Magic)
4348 {
4349 case RTLOCKVALRECEXCL_MAGIC:
4350 fRet = pCur->Excl.hClass == hClass;
4351 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4352 break;
4353 case RTLOCKVALRECSHRDOWN_MAGIC:
4354 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4355 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4356 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4357 break;
4358 case RTLOCKVALRECNEST_MAGIC:
4359 switch (pCur->Nest.pRec->Core.u32Magic)
4360 {
4361 case RTLOCKVALRECEXCL_MAGIC:
4362 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4363 break;
4364 case RTLOCKVALRECSHRDOWN_MAGIC:
4365 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4366 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4367 break;
4368 }
4369 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4370 break;
4371 default:
4372 pCur = NULL;
4373 break;
4374 }
4375 }
4376 }
4377
4378 rtThreadRelease(pThread);
4379 }
4380 return fRet;
4381}
4382RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4383
4384
4385RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4386{
4387 bool fRet = false;
4388 if (hCurrentThread == NIL_RTTHREAD)
4389 hCurrentThread = RTThreadSelf();
4390 else
4391 Assert(hCurrentThread == RTThreadSelf());
4392 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4393 if (pThread)
4394 {
4395 if (hClass != NIL_RTLOCKVALCLASS)
4396 {
4397 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4398 while (VALID_PTR(pCur) && !fRet)
4399 {
4400 switch (pCur->Core.u32Magic)
4401 {
4402 case RTLOCKVALRECEXCL_MAGIC:
4403 fRet = pCur->Excl.hClass == hClass
4404 && pCur->Excl.uSubClass == uSubClass;
4405 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4406 break;
4407 case RTLOCKVALRECSHRDOWN_MAGIC:
4408 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4409 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4410 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4411 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4412 break;
4413 case RTLOCKVALRECNEST_MAGIC:
4414 switch (pCur->Nest.pRec->Core.u32Magic)
4415 {
4416 case RTLOCKVALRECEXCL_MAGIC:
4417 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4418 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4419 break;
4420 case RTLOCKVALRECSHRDOWN_MAGIC:
4421 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4422 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4423 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4424 break;
4425 }
4426 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4427 break;
4428 default:
4429 pCur = NULL;
4430 break;
4431 }
4432 }
4433 }
4434
4435 rtThreadRelease(pThread);
4436 }
4437 return fRet;
4438}
4439RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4440
4441
4442RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4443{
4444 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4445}
4446RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4447
4448
4449RTDECL(bool) RTLockValidatorIsEnabled(void)
4450{
4451 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4452}
4453RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4454
4455
4456RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4457{
4458 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4459}
4460RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4461
4462
4463RTDECL(bool) RTLockValidatorIsQuiet(void)
4464{
4465 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4466}
4467RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4468
4469
4470RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4471{
4472 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4473}
4474RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4475
4476
4477RTDECL(bool) RTLockValidatorMayPanic(void)
4478{
4479 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4480}
4481RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4482
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette