VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 62559

Last change on this file since 62559 was 62477, checked in by vboxsync, 8 years ago

(C) 2016

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 159.9 KB
Line 
1/* $Id: lockvalidator.cpp 62477 2016-07-22 18:27:37Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include <iprt/lockvalidator.h>
32#include "internal/iprt.h"
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/env.h>
37#include <iprt/err.h>
38#include <iprt/mem.h>
39#include <iprt/once.h>
40#include <iprt/semaphore.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44#include "internal/lockvalidator.h"
45#include "internal/magics.h"
46#include "internal/strhash.h"
47#include "internal/thread.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** Macro that asserts that a pointer is aligned correctly.
54 * Only used when fighting bugs. */
55#if 1
56# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
57 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
58#else
59# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
60#endif
61
62/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
63#define RTLOCKVALCLASS_HASH(hClass) \
64 ( ((uintptr_t)(hClass) >> 6 ) \
65 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
66 / sizeof(PRTLOCKVALCLASSREF)) )
67
68/** The max value for RTLOCKVALCLASSINT::cRefs. */
69#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
70/** The max value for RTLOCKVALCLASSREF::cLookups. */
71#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
72/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
73 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
74#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
75
76
77/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
78 * Enable recursion records. */
79#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
80# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
81#endif
82
83/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
84 * Enables some extra verbosity in the lock dumping. */
85#if defined(DOXYGEN_RUNNING)
86# define RTLOCKVAL_WITH_VERBOSE_DUMPS
87#endif
88
89/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
90 * Enables collection prior class hash lookup statistics, dumping them when
91 * complaining about the class. */
92#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
93# define RTLOCKVAL_WITH_CLASS_HASH_STATS
94#endif
95
96
97/*********************************************************************************************************************************
98* Structures and Typedefs *
99*********************************************************************************************************************************/
100/**
101 * Deadlock detection stack entry.
102 */
103typedef struct RTLOCKVALDDENTRY
104{
105 /** The current record. */
106 PRTLOCKVALRECUNION pRec;
107 /** The current entry number if pRec is a shared one. */
108 uint32_t iEntry;
109 /** The thread state of the thread we followed to get to pFirstSibling.
110 * This is only used for validating a deadlock stack. */
111 RTTHREADSTATE enmState;
112 /** The thread we followed to get to pFirstSibling.
113 * This is only used for validating a deadlock stack. */
114 PRTTHREADINT pThread;
115 /** What pThread is waiting on, i.e. where we entered the circular list of
116 * siblings. This is used for validating a deadlock stack as well as
117 * terminating the sibling walk. */
118 PRTLOCKVALRECUNION pFirstSibling;
119} RTLOCKVALDDENTRY;
120
121
122/**
123 * Deadlock detection stack.
124 */
125typedef struct RTLOCKVALDDSTACK
126{
127 /** The number stack entries. */
128 uint32_t c;
129 /** The stack entries. */
130 RTLOCKVALDDENTRY a[32];
131} RTLOCKVALDDSTACK;
132/** Pointer to a deadlock detection stack. */
133typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
134
135
136/**
137 * Reference to another class.
138 */
139typedef struct RTLOCKVALCLASSREF
140{
141 /** The class. */
142 RTLOCKVALCLASS hClass;
143 /** The number of lookups of this class. */
144 uint32_t volatile cLookups;
145 /** Indicates whether the entry was added automatically during order checking
146 * (true) or manually via the API (false). */
147 bool fAutodidacticism;
148 /** Reserved / explicit alignment padding. */
149 bool afReserved[3];
150} RTLOCKVALCLASSREF;
151/** Pointer to a class reference. */
152typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
153
154
155/** Pointer to a chunk of class references. */
156typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
157/**
158 * Chunk of class references.
159 */
160typedef struct RTLOCKVALCLASSREFCHUNK
161{
162 /** Array of refs. */
163#if 0 /** @todo for testing allocation of new chunks. */
164 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
165#else
166 RTLOCKVALCLASSREF aRefs[2];
167#endif
168 /** Pointer to the next chunk. */
169 PRTLOCKVALCLASSREFCHUNK volatile pNext;
170} RTLOCKVALCLASSREFCHUNK;
171
172
173/**
174 * Lock class.
175 */
176typedef struct RTLOCKVALCLASSINT
177{
178 /** AVL node core. */
179 AVLLU32NODECORE Core;
180 /** Magic value (RTLOCKVALCLASS_MAGIC). */
181 uint32_t volatile u32Magic;
182 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
183 uint32_t volatile cRefs;
184 /** Whether the class is allowed to teach it self new locking order rules. */
185 bool fAutodidact;
186 /** Whether to allow recursion. */
187 bool fRecursionOk;
188 /** Strict release order. */
189 bool fStrictReleaseOrder;
190 /** Whether this class is in the tree. */
191 bool fInTree;
192 /** Donate a reference to the next retainer. This is a hack to make
193 * RTLockValidatorClassCreateUnique work. */
194 bool volatile fDonateRefToNextRetainer;
195 /** Reserved future use / explicit alignment. */
196 bool afReserved[3];
197 /** The minimum wait interval for which we do deadlock detection
198 * (milliseconds). */
199 RTMSINTERVAL cMsMinDeadlock;
200 /** The minimum wait interval for which we do order checks (milliseconds). */
201 RTMSINTERVAL cMsMinOrder;
202 /** More padding. */
203 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
204 /** Classes that may be taken prior to this one.
205 * This is a linked list where each node contains a chunk of locks so that we
206 * reduce the number of allocations as well as localize the data. */
207 RTLOCKVALCLASSREFCHUNK PriorLocks;
208 /** Hash table containing frequently encountered prior locks. */
209 PRTLOCKVALCLASSREF apPriorLocksHash[17];
210 /** Class name. (Allocated after the end of the block as usual.) */
211 char const *pszName;
212 /** Where this class was created.
213 * This is mainly used for finding automatically created lock classes.
214 * @remarks The strings are stored after this structure so we won't crash
215 * if the class lives longer than the module (dll/so/dylib) that
216 * spawned it. */
217 RTLOCKVALSRCPOS CreatePos;
218#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
219 /** Hash hits. */
220 uint32_t volatile cHashHits;
221 /** Hash misses. */
222 uint32_t volatile cHashMisses;
223#endif
224} RTLOCKVALCLASSINT;
225AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
226AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
227
228
229/*********************************************************************************************************************************
230* Global Variables *
231*********************************************************************************************************************************/
232/** Serializing object destruction and deadlock detection.
233 *
234 * This makes sure that none of the memory examined by the deadlock detection
235 * code will become invalid (reused for other purposes or made not present)
236 * while the detection is in progress.
237 *
238 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
239 * EW: Deadlock detection and some related activities.
240 */
241static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
242/** Serializing class tree insert and lookups. */
243static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
244/** Class tree. */
245static PAVLLU32NODECORE g_LockValClassTree = NULL;
246/** Critical section serializing the teaching new rules to the classes. */
247static RTCRITSECT g_LockValClassTeachCS;
248
249/** Whether the lock validator is enabled or disabled.
250 * Only applies to new locks. */
251static bool volatile g_fLockValidatorEnabled = true;
252/** Set if the lock validator is quiet. */
253#ifdef RT_STRICT
254static bool volatile g_fLockValidatorQuiet = false;
255#else
256static bool volatile g_fLockValidatorQuiet = true;
257#endif
258/** Set if the lock validator may panic. */
259#ifdef RT_STRICT
260static bool volatile g_fLockValidatorMayPanic = true;
261#else
262static bool volatile g_fLockValidatorMayPanic = false;
263#endif
264/** Whether to return an error status on wrong locking order. */
265static bool volatile g_fLockValSoftWrongOrder = false;
266
267
268/*********************************************************************************************************************************
269* Internal Functions *
270*********************************************************************************************************************************/
271static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
272static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
273
274
275/**
276 * Lazy initialization of the lock validator globals.
277 */
278static void rtLockValidatorLazyInit(void)
279{
280 static uint32_t volatile s_fInitializing = false;
281 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
282 {
283 /*
284 * The locks.
285 */
286 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
287 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
288 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
289
290 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
291 {
292 RTSEMRW hSemRW;
293 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
294 if (RT_SUCCESS(rc))
295 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
296 }
297
298 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
299 {
300 RTSEMXROADS hXRoads;
301 int rc = RTSemXRoadsCreate(&hXRoads);
302 if (RT_SUCCESS(rc))
303 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
304 }
305
306#ifdef IN_RING3
307 /*
308 * Check the environment for our config variables.
309 */
310 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
311 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
312 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
313 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
314
315 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
316 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
317 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
318 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
319
320 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
321 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
322 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
323 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
324
325 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
326 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
327 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
328 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
329#endif
330
331 /*
332 * Register cleanup
333 */
334 /** @todo register some cleanup callback if we care. */
335
336 ASMAtomicWriteU32(&s_fInitializing, false);
337 }
338}
339
340
341
342/** Wrapper around ASMAtomicReadPtr. */
343DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
344{
345 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
346 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
347 return p;
348}
349
350
351/** Wrapper around ASMAtomicWritePtr. */
352DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
353{
354 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
355 ASMAtomicWritePtr(ppRec, pRecNew);
356}
357
358
359/** Wrapper around ASMAtomicReadPtr. */
360DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
361{
362 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
363 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
364 return p;
365}
366
367
368/** Wrapper around ASMAtomicUoReadPtr. */
369DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
370{
371 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
372 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
373 return p;
374}
375
376
377/**
378 * Reads a volatile thread handle field and returns the thread name.
379 *
380 * @returns Thread name (read only).
381 * @param phThread The thread handle field.
382 */
383static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
384{
385 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
386 if (!pThread)
387 return "<NIL>";
388 if (!VALID_PTR(pThread))
389 return "<INVALID>";
390 if (pThread->u32Magic != RTTHREADINT_MAGIC)
391 return "<BAD-THREAD-MAGIC>";
392 return pThread->szName;
393}
394
395
396/**
397 * Launch a simple assertion like complaint w/ panic.
398 *
399 * @param SRC_POS The source position where call is being made from.
400 * @param pszWhat What we're complaining about.
401 * @param ... Format arguments.
402 */
403static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
404{
405 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
406 {
407 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
408 va_list va;
409 va_start(va, pszWhat);
410 RTAssertMsg2WeakV(pszWhat, va);
411 va_end(va);
412 }
413 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
414 RTAssertPanic();
415}
416
417
418/**
419 * Describes the class.
420 *
421 * @param pszPrefix Message prefix.
422 * @param pClass The class to complain about.
423 * @param uSubClass My sub-class.
424 * @param fVerbose Verbose description including relations to other
425 * classes.
426 */
427static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
428{
429 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
430 return;
431
432 /* Stringify the sub-class. */
433 const char *pszSubClass;
434 char szSubClass[32];
435 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
436 switch (uSubClass)
437 {
438 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
439 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
440 default:
441 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
442 pszSubClass = szSubClass;
443 break;
444 }
445 else
446 {
447 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
448 pszSubClass = szSubClass;
449 }
450
451 /* Validate the class pointer. */
452 if (!VALID_PTR(pClass))
453 {
454 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
455 return;
456 }
457 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
458 {
459 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
460 return;
461 }
462
463 /* OK, dump the class info. */
464 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
465 pClass,
466 pClass->pszName,
467 pClass->CreatePos.pszFile,
468 pClass->CreatePos.uLine,
469 pClass->CreatePos.pszFunction,
470 pClass->CreatePos.uId,
471 pszSubClass);
472 if (fVerbose)
473 {
474 uint32_t i = 0;
475 uint32_t cPrinted = 0;
476 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
477 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
478 {
479 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
480 if (pCurClass != NIL_RTLOCKVALCLASS)
481 {
482 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
483 cPrinted == 0
484 ? "Prior:"
485 : " ",
486 i,
487 pCurClass->pszName,
488 pChunk->aRefs[j].fAutodidacticism
489 ? "autodidactic"
490 : "manually ",
491 pChunk->aRefs[j].cLookups,
492 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
493 cPrinted++;
494 }
495 }
496 if (!cPrinted)
497 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
498#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
499 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
500#endif
501 }
502 else
503 {
504 uint32_t cPrinted = 0;
505 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
506 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
507 {
508 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
509 if (pCurClass != NIL_RTLOCKVALCLASS)
510 {
511 if ((cPrinted % 10) == 0)
512 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
513 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
514 else if ((cPrinted % 10) != 9)
515 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
516 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
517 else
518 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
519 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
520 cPrinted++;
521 }
522 }
523 if (!cPrinted)
524 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
525 else if ((cPrinted % 10) != 0)
526 RTAssertMsg2AddWeak("\n");
527 }
528}
529
530
531/**
532 * Helper for getting the class name.
533 * @returns Class name string.
534 * @param pClass The class.
535 */
536static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
537{
538 if (!pClass)
539 return "<nil-class>";
540 if (!VALID_PTR(pClass))
541 return "<bad-class-ptr>";
542 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
543 return "<bad-class-magic>";
544 if (!pClass->pszName)
545 return "<no-class-name>";
546 return pClass->pszName;
547}
548
549/**
550 * Formats the sub-class.
551 *
552 * @returns Stringified sub-class.
553 * @param uSubClass The name.
554 * @param pszBuf Buffer that is big enough.
555 */
556static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
557{
558 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
559 switch (uSubClass)
560 {
561 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
562 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
563 default:
564 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
565 break;
566 }
567 else
568 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
569 return pszBuf;
570}
571
572
573/**
574 * Helper for rtLockValComplainAboutLock.
575 */
576DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
577 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
578 const char *pszFrameType)
579{
580 char szBuf[32];
581 switch (u32Magic)
582 {
583 case RTLOCKVALRECEXCL_MAGIC:
584#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
585 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
586 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
587 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
588 rtLockValComplainGetClassName(pRec->Excl.hClass),
589 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
590 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
591 pszFrameType, pszSuffix);
592#else
593 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
594 pRec->Excl.hLock, pRec->Excl.szName,
595 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
596 rtLockValComplainGetClassName(pRec->Excl.hClass),
597 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
598 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
599 pszFrameType, pszSuffix);
600#endif
601 break;
602
603 case RTLOCKVALRECSHRD_MAGIC:
604 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
605 pRec->Shared.hLock, pRec->Shared.szName, pRec,
606 rtLockValComplainGetClassName(pRec->Shared.hClass),
607 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
608 pszFrameType, pszSuffix);
609 break;
610
611 case RTLOCKVALRECSHRDOWN_MAGIC:
612 {
613 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
614 if ( VALID_PTR(pShared)
615 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
616#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
617 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
618 pShared->hLock, pShared->pszName, pShared,
619 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
620 rtLockValComplainGetClassName(pShared->hClass),
621 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
622 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
623 pszSuffix2, pszSuffix);
624#else
625 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
626 pShared->hLock, pShared->szName,
627 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
628 rtLockValComplainGetClassName(pShared->hClass),
629 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
630 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
631 pszFrameType, pszSuffix);
632#endif
633 else
634 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
635 pShared,
636 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
637 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
638 pszFrameType, pszSuffix);
639 break;
640 }
641
642 default:
643 AssertMsgFailed(("%#x\n", u32Magic));
644 }
645}
646
647
648/**
649 * Describes the lock.
650 *
651 * @param pszPrefix Message prefix.
652 * @param pRec The lock record we're working on.
653 * @param pszSuffix Message suffix.
654 */
655static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
656{
657#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
658# define FIX_REC(r) 1
659#else
660# define FIX_REC(r) (r)
661#endif
662 if ( VALID_PTR(pRec)
663 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
664 {
665 switch (pRec->Core.u32Magic)
666 {
667 case RTLOCKVALRECEXCL_MAGIC:
668 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
669 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
670 break;
671
672 case RTLOCKVALRECSHRD_MAGIC:
673 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
674 break;
675
676 case RTLOCKVALRECSHRDOWN_MAGIC:
677 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
678 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
679 break;
680
681 case RTLOCKVALRECNEST_MAGIC:
682 {
683 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
684 uint32_t u32Magic;
685 if ( VALID_PTR(pRealRec)
686 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
687 || u32Magic == RTLOCKVALRECSHRD_MAGIC
688 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
689 )
690 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
691 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
692 else
693 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
694 pRealRec, pRec, pRec->Nest.cRecursion,
695 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
696 pszSuffix);
697 break;
698 }
699
700 default:
701 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
702 break;
703 }
704 }
705#undef FIX_REC
706}
707
708
709/**
710 * Dump the lock stack.
711 *
712 * @param pThread The thread which lock stack we're gonna dump.
713 * @param cchIndent The indentation in chars.
714 * @param cMinFrames The minimum number of frames to consider
715 * dumping.
716 * @param pHighightRec Record that should be marked specially in the
717 * dump.
718 */
719static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
720 PRTLOCKVALRECUNION pHighightRec)
721{
722 if ( VALID_PTR(pThread)
723 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
724 && pThread->u32Magic == RTTHREADINT_MAGIC
725 )
726 {
727 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
728 if (cEntries >= cMinFrames)
729 {
730 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
731 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
732 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
733 for (uint32_t i = 0; VALID_PTR(pCur); i++)
734 {
735 char szPrefix[80];
736 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
737 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
738 switch (pCur->Core.u32Magic)
739 {
740 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
741 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
742 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
743 default:
744 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
745 pCur = NULL;
746 break;
747 }
748 }
749 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
750 }
751 }
752}
753
754
755/**
756 * Launch the initial complaint.
757 *
758 * @param pszWhat What we're complaining about.
759 * @param pSrcPos Where we are complaining from, as it were.
760 * @param pThreadSelf The calling thread.
761 * @param pRec The main lock involved. Can be NULL.
762 * @param fDumpStack Whether to dump the lock stack (true) or not
763 * (false).
764 */
765static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
766 PRTLOCKVALRECUNION pRec, bool fDumpStack)
767{
768 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
769 {
770 ASMCompilerBarrier(); /* paranoia */
771 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
772 if (pSrcPos && pSrcPos->uId)
773 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
774 else
775 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
776 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
777 if (fDumpStack)
778 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
779 }
780}
781
782
783/**
784 * Continue bitching.
785 *
786 * @param pszFormat Format string.
787 * @param ... Format arguments.
788 */
789static void rtLockValComplainMore(const char *pszFormat, ...)
790{
791 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
792 {
793 va_list va;
794 va_start(va, pszFormat);
795 RTAssertMsg2AddWeakV(pszFormat, va);
796 va_end(va);
797 }
798}
799
800
801/**
802 * Raise a panic if enabled.
803 */
804static void rtLockValComplainPanic(void)
805{
806 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
807 RTAssertPanic();
808}
809
810
811/**
812 * Copy a source position record.
813 *
814 * @param pDst The destination.
815 * @param pSrc The source. Can be NULL.
816 */
817DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
818{
819 if (pSrc)
820 {
821 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
822 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
823 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
824 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
825 }
826 else
827 {
828 ASMAtomicUoWriteU32(&pDst->uLine, 0);
829 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
830 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
831 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
832 }
833}
834
835
836/**
837 * Init a source position record.
838 *
839 * @param pSrcPos The source position record.
840 */
841DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
842{
843 pSrcPos->pszFile = NULL;
844 pSrcPos->pszFunction = NULL;
845 pSrcPos->uId = 0;
846 pSrcPos->uLine = 0;
847#if HC_ARCH_BITS == 64
848 pSrcPos->u32Padding = 0;
849#endif
850}
851
852
853/**
854 * Hashes the specified source position.
855 *
856 * @returns Hash.
857 * @param pSrcPos The source position record.
858 */
859static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
860{
861 uint32_t uHash;
862 if ( ( pSrcPos->pszFile
863 || pSrcPos->pszFunction)
864 && pSrcPos->uLine != 0)
865 {
866 uHash = 0;
867 if (pSrcPos->pszFile)
868 uHash = sdbmInc(pSrcPos->pszFile, uHash);
869 if (pSrcPos->pszFunction)
870 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
871 uHash += pSrcPos->uLine;
872 }
873 else
874 {
875 Assert(pSrcPos->uId);
876 uHash = (uint32_t)pSrcPos->uId;
877 }
878
879 return uHash;
880}
881
882
883/**
884 * Compares two source positions.
885 *
886 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
887 * otherwise.
888 * @param pSrcPos1 The first source position.
889 * @param pSrcPos2 The second source position.
890 */
891static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
892{
893 if (pSrcPos1->uLine != pSrcPos2->uLine)
894 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
895
896 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
897 if (iDiff != 0)
898 return iDiff;
899
900 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
901 if (iDiff != 0)
902 return iDiff;
903
904 if (pSrcPos1->uId != pSrcPos2->uId)
905 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
906 return 0;
907}
908
909
910
911/**
912 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
913 */
914DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
915{
916 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
917 if (hXRoads != NIL_RTSEMXROADS)
918 RTSemXRoadsNSEnter(hXRoads);
919}
920
921
922/**
923 * Call after rtLockValidatorSerializeDestructEnter.
924 */
925DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
926{
927 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
928 if (hXRoads != NIL_RTSEMXROADS)
929 RTSemXRoadsNSLeave(hXRoads);
930}
931
932
933/**
934 * Serializes deadlock detection against destruction of the objects being
935 * inspected.
936 */
937DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
938{
939 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
940 if (hXRoads != NIL_RTSEMXROADS)
941 RTSemXRoadsEWEnter(hXRoads);
942}
943
944
945/**
946 * Call after rtLockValidatorSerializeDetectionEnter.
947 */
948DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
949{
950 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
951 if (hXRoads != NIL_RTSEMXROADS)
952 RTSemXRoadsEWLeave(hXRoads);
953}
954
955
956/**
957 * Initializes the per thread lock validator data.
958 *
959 * @param pPerThread The data.
960 */
961DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
962{
963 pPerThread->bmFreeShrdOwners = UINT32_MAX;
964
965 /* ASSUMES the rest has already been zeroed. */
966 Assert(pPerThread->pRec == NULL);
967 Assert(pPerThread->cWriteLocks == 0);
968 Assert(pPerThread->cReadLocks == 0);
969 Assert(pPerThread->fInValidator == false);
970 Assert(pPerThread->pStackTop == NULL);
971}
972
973
974/**
975 * Delete the per thread lock validator data.
976 *
977 * @param pPerThread The data.
978 */
979DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
980{
981 /*
982 * Check that the thread doesn't own any locks at this time.
983 */
984 if (pPerThread->pStackTop)
985 {
986 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
987 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
988 pPerThread->pStackTop, true);
989 rtLockValComplainPanic();
990 }
991
992 /*
993 * Free the recursion records.
994 */
995 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
996 pPerThread->pFreeNestRecs = NULL;
997 while (pCur)
998 {
999 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1000 RTMemFree(pCur);
1001 pCur = pNext;
1002 }
1003}
1004
1005RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1006 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1007 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1008 const char *pszNameFmt, ...)
1009{
1010 va_list va;
1011 va_start(va, pszNameFmt);
1012 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1013 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1014 va_end(va);
1015 return rc;
1016}
1017
1018
1019RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1020 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1021 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1022 const char *pszNameFmt, va_list va)
1023{
1024 Assert(cMsMinDeadlock >= 1);
1025 Assert(cMsMinOrder >= 1);
1026 AssertPtr(pSrcPos);
1027
1028 /*
1029 * Format the name and calc its length.
1030 */
1031 size_t cbName;
1032 char szName[32];
1033 if (pszNameFmt && *pszNameFmt)
1034 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1035 else
1036 {
1037 static uint32_t volatile s_cAnonymous = 0;
1038 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1039 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1040 }
1041
1042 /*
1043 * Figure out the file and function name lengths and allocate memory for
1044 * it all.
1045 */
1046 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1047 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1048 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVar(sizeof(*pThis) + cbFile + cbFunction + cbName);
1049 if (!pThis)
1050 return VERR_NO_MEMORY;
1051
1052 /*
1053 * Initialize the class data.
1054 */
1055 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1056 pThis->Core.uchHeight = 0;
1057 pThis->Core.pLeft = NULL;
1058 pThis->Core.pRight = NULL;
1059 pThis->Core.pList = NULL;
1060 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1061 pThis->cRefs = 1;
1062 pThis->fAutodidact = fAutodidact;
1063 pThis->fRecursionOk = fRecursionOk;
1064 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1065 pThis->fInTree = false;
1066 pThis->fDonateRefToNextRetainer = false;
1067 pThis->afReserved[0] = false;
1068 pThis->afReserved[1] = false;
1069 pThis->afReserved[2] = false;
1070 pThis->cMsMinDeadlock = cMsMinDeadlock;
1071 pThis->cMsMinOrder = cMsMinOrder;
1072 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1073 pThis->au32Reserved[i] = 0;
1074 for (unsigned i = 0; i < RT_ELEMENTS(pThis->PriorLocks.aRefs); i++)
1075 {
1076 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1077 pThis->PriorLocks.aRefs[i].cLookups = 0;
1078 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1079 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1080 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1081 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1082 }
1083 pThis->PriorLocks.pNext = NULL;
1084 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1085 pThis->apPriorLocksHash[i] = NULL;
1086 char *pszDst = (char *)(pThis + 1);
1087 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1088 pszDst += cbName;
1089 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1090 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1091 pszDst += cbFile;
1092 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1093 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1094#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1095 pThis->cHashHits = 0;
1096 pThis->cHashMisses = 0;
1097#endif
1098
1099 *phClass = pThis;
1100 return VINF_SUCCESS;
1101}
1102
1103
1104RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1105{
1106 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1107 va_list va;
1108 va_start(va, pszNameFmt);
1109 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1110 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1111 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1112 pszNameFmt, va);
1113 va_end(va);
1114 return rc;
1115}
1116
1117
1118/**
1119 * Creates a new lock validator class with a reference that is consumed by the
1120 * first call to RTLockValidatorClassRetain.
1121 *
1122 * This is tailored for use in the parameter list of a semaphore constructor.
1123 *
1124 * @returns Class handle with a reference that is automatically consumed by the
1125 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1126 *
1127 * @param SRC_POS The source position where call is being made from.
1128 * Use RT_SRC_POS when possible. Optional.
1129 * @param pszNameFmt Class name format string, optional (NULL). Max
1130 * length is 32 bytes.
1131 * @param ... Format string arguments.
1132 */
1133RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1134{
1135 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1136 RTLOCKVALCLASSINT *pClass;
1137 va_list va;
1138 va_start(va, pszNameFmt);
1139 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1140 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1141 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1142 pszNameFmt, va);
1143 va_end(va);
1144 if (RT_FAILURE(rc))
1145 return NIL_RTLOCKVALCLASS;
1146 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1147 return pClass;
1148}
1149
1150
1151/**
1152 * Internal class retainer.
1153 * @returns The new reference count.
1154 * @param pClass The class.
1155 */
1156DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1157{
1158 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1159 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1160 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1161 else if ( cRefs == 2
1162 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1163 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1164 return cRefs;
1165}
1166
1167
1168/**
1169 * Validates and retains a lock validator class.
1170 *
1171 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1172 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1173 */
1174DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1175{
1176 if (hClass == NIL_RTLOCKVALCLASS)
1177 return hClass;
1178 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1179 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1180 rtLockValidatorClassRetain(hClass);
1181 return hClass;
1182}
1183
1184
1185/**
1186 * Internal class releaser.
1187 * @returns The new reference count.
1188 * @param pClass The class.
1189 */
1190DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1191{
1192 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1193 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1194 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1195 else if (!cRefs)
1196 rtLockValidatorClassDestroy(pClass);
1197 return cRefs;
1198}
1199
1200
1201/**
1202 * Destroys a class once there are not more references to it.
1203 *
1204 * @param pClass The class.
1205 */
1206static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1207{
1208 AssertReturnVoid(!pClass->fInTree);
1209 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1210
1211 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1212 while (pChunk)
1213 {
1214 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1215 {
1216 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1217 if (pClass2 != NIL_RTLOCKVALCLASS)
1218 {
1219 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1220 rtLockValidatorClassRelease(pClass2);
1221 }
1222 }
1223
1224 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1225 pChunk->pNext = NULL;
1226 if (pChunk != &pClass->PriorLocks)
1227 RTMemFree(pChunk);
1228 pChunk = pNext;
1229 }
1230
1231 RTMemFree(pClass);
1232}
1233
1234
1235RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1236{
1237 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1238 rtLockValidatorLazyInit();
1239 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1240
1241 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1242 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1243 while (pClass)
1244 {
1245 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1246 break;
1247 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1248 }
1249
1250 if (RT_SUCCESS(rcLock))
1251 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1252 return pClass;
1253}
1254
1255
1256RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1257{
1258 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1259 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1260 if (hClass == NIL_RTLOCKVALCLASS)
1261 {
1262 /*
1263 * Create a new class and insert it into the tree.
1264 */
1265 va_list va;
1266 va_start(va, pszNameFmt);
1267 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1268 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1269 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1270 pszNameFmt, va);
1271 va_end(va);
1272 if (RT_SUCCESS(rc))
1273 {
1274 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1275 rtLockValidatorLazyInit();
1276 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1277
1278 Assert(!hClass->fInTree);
1279 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1280 Assert(hClass->fInTree);
1281
1282 if (RT_SUCCESS(rcLock))
1283 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1284 return hClass;
1285 }
1286 }
1287 return hClass;
1288}
1289
1290
1291RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1292{
1293 RTLOCKVALCLASSINT *pClass = hClass;
1294 AssertPtrReturn(pClass, UINT32_MAX);
1295 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1296 return rtLockValidatorClassRetain(pClass);
1297}
1298
1299
1300RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1301{
1302 RTLOCKVALCLASSINT *pClass = hClass;
1303 if (pClass == NIL_RTLOCKVALCLASS)
1304 return 0;
1305 AssertPtrReturn(pClass, UINT32_MAX);
1306 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1307 return rtLockValidatorClassRelease(pClass);
1308}
1309
1310
1311/**
1312 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1313 * all the chunks for @a pPriorClass.
1314 *
1315 * @returns true / false.
1316 * @param pClass The class to search.
1317 * @param pPriorClass The class to search for.
1318 */
1319static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1320{
1321 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1322 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1323 {
1324 if (pChunk->aRefs[i].hClass == pPriorClass)
1325 {
1326 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1327 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1328 {
1329 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1330 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1331 }
1332
1333 /* update the hash table entry. */
1334 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1335 if ( !(*ppHashEntry)
1336 || (*ppHashEntry)->cLookups + 128 < cLookups)
1337 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1338
1339#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1340 ASMAtomicIncU32(&pClass->cHashMisses);
1341#endif
1342 return true;
1343 }
1344 }
1345
1346 return false;
1347}
1348
1349
1350/**
1351 * Checks if @a pPriorClass is a known prior class.
1352 *
1353 * @returns true / false.
1354 * @param pClass The class to search.
1355 * @param pPriorClass The class to search for.
1356 */
1357DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1358{
1359 /*
1360 * Hash lookup here.
1361 */
1362 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1363 if ( pRef
1364 && pRef->hClass == pPriorClass)
1365 {
1366 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1367 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1368 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1369#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1370 ASMAtomicIncU32(&pClass->cHashHits);
1371#endif
1372 return true;
1373 }
1374
1375 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1376}
1377
1378
1379/**
1380 * Adds a class to the prior list.
1381 *
1382 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1383 * @param pClass The class to work on.
1384 * @param pPriorClass The class to add.
1385 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1386 * somebody is teaching us via the API (false).
1387 * @param pSrcPos Where this rule was added (optional).
1388 */
1389static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1390 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1391{
1392 NOREF(pSrcPos);
1393 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1394 rtLockValidatorLazyInit();
1395 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1396
1397 /*
1398 * Check that there are no conflict (no assert since we might race each other).
1399 */
1400 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1401 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1402 {
1403 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1404 {
1405 /*
1406 * Scan the table for a free entry, allocating a new chunk if necessary.
1407 */
1408 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1409 {
1410 bool fDone = false;
1411 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1412 {
1413 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1414 if (fDone)
1415 {
1416 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1417 rtLockValidatorClassRetain(pPriorClass);
1418 rc = VINF_SUCCESS;
1419 break;
1420 }
1421 }
1422 if (fDone)
1423 break;
1424
1425 /* If no more chunks, allocate a new one and insert the class before linking it. */
1426 if (!pChunk->pNext)
1427 {
1428 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1429 if (!pNew)
1430 {
1431 rc = VERR_NO_MEMORY;
1432 break;
1433 }
1434 pNew->pNext = NULL;
1435 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1436 {
1437 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1438 pNew->aRefs[i].cLookups = 0;
1439 pNew->aRefs[i].fAutodidacticism = false;
1440 pNew->aRefs[i].afReserved[0] = false;
1441 pNew->aRefs[i].afReserved[1] = false;
1442 pNew->aRefs[i].afReserved[2] = false;
1443 }
1444
1445 pNew->aRefs[0].hClass = pPriorClass;
1446 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1447
1448 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1449 rtLockValidatorClassRetain(pPriorClass);
1450 rc = VINF_SUCCESS;
1451 break;
1452 }
1453 } /* chunk loop */
1454 }
1455 else
1456 rc = VINF_SUCCESS;
1457 }
1458 else
1459 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1460
1461 if (RT_SUCCESS(rcLock))
1462 RTCritSectLeave(&g_LockValClassTeachCS);
1463 return rc;
1464}
1465
1466
1467RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1468{
1469 RTLOCKVALCLASSINT *pClass = hClass;
1470 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1471 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1472
1473 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1474 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1475 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1476
1477 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1478}
1479
1480
1481RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1482{
1483 RTLOCKVALCLASSINT *pClass = hClass;
1484 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1485 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1486
1487 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1488 return VINF_SUCCESS;
1489}
1490
1491
1492/**
1493 * Unlinks all siblings.
1494 *
1495 * This is used during record deletion and assumes no races.
1496 *
1497 * @param pCore One of the siblings.
1498 */
1499static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1500{
1501 /* ASSUMES sibling destruction doesn't involve any races and that all
1502 related records are to be disposed off now. */
1503 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1504 while (pSibling)
1505 {
1506 PRTLOCKVALRECUNION volatile *ppCoreNext;
1507 switch (pSibling->Core.u32Magic)
1508 {
1509 case RTLOCKVALRECEXCL_MAGIC:
1510 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1511 ppCoreNext = &pSibling->Excl.pSibling;
1512 break;
1513
1514 case RTLOCKVALRECSHRD_MAGIC:
1515 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1516 ppCoreNext = &pSibling->Shared.pSibling;
1517 break;
1518
1519 default:
1520 AssertFailed();
1521 ppCoreNext = NULL;
1522 break;
1523 }
1524 if (RT_UNLIKELY(ppCoreNext))
1525 break;
1526 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1527 }
1528}
1529
1530
1531RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1532{
1533 /*
1534 * Validate input.
1535 */
1536 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1537 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1538
1539 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1540 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1541 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1542 , VERR_SEM_LV_INVALID_PARAMETER);
1543
1544 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1545 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1546 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1547 , VERR_SEM_LV_INVALID_PARAMETER);
1548
1549 /*
1550 * Link them (circular list).
1551 */
1552 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1553 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1554 {
1555 p1->Excl.pSibling = p2;
1556 p2->Shared.pSibling = p1;
1557 }
1558 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1559 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1560 {
1561 p1->Shared.pSibling = p2;
1562 p2->Excl.pSibling = p1;
1563 }
1564 else
1565 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1566
1567 return VINF_SUCCESS;
1568}
1569
1570
1571/**
1572 * Gets the lock name for the given record.
1573 *
1574 * @returns Read-only lock name.
1575 * @param pRec The lock record.
1576 */
1577DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1578{
1579 switch (pRec->Core.u32Magic)
1580 {
1581 case RTLOCKVALRECEXCL_MAGIC:
1582 return pRec->Excl.szName;
1583 case RTLOCKVALRECSHRD_MAGIC:
1584 return pRec->Shared.szName;
1585 case RTLOCKVALRECSHRDOWN_MAGIC:
1586 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1587 case RTLOCKVALRECNEST_MAGIC:
1588 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1589 if (VALID_PTR(pRec))
1590 {
1591 switch (pRec->Core.u32Magic)
1592 {
1593 case RTLOCKVALRECEXCL_MAGIC:
1594 return pRec->Excl.szName;
1595 case RTLOCKVALRECSHRD_MAGIC:
1596 return pRec->Shared.szName;
1597 case RTLOCKVALRECSHRDOWN_MAGIC:
1598 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1599 default:
1600 return "unknown-nested";
1601 }
1602 }
1603 return "orphaned-nested";
1604 default:
1605 return "unknown";
1606 }
1607}
1608
1609
1610/**
1611 * Gets the class for this locking record.
1612 *
1613 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1614 * @param pRec The lock validator record.
1615 */
1616DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1617{
1618 switch (pRec->Core.u32Magic)
1619 {
1620 case RTLOCKVALRECEXCL_MAGIC:
1621 return pRec->Excl.hClass;
1622
1623 case RTLOCKVALRECSHRD_MAGIC:
1624 return pRec->Shared.hClass;
1625
1626 case RTLOCKVALRECSHRDOWN_MAGIC:
1627 {
1628 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1629 if (RT_LIKELY( VALID_PTR(pSharedRec)
1630 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1631 return pSharedRec->hClass;
1632 return NIL_RTLOCKVALCLASS;
1633 }
1634
1635 case RTLOCKVALRECNEST_MAGIC:
1636 {
1637 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1638 if (VALID_PTR(pRealRec))
1639 {
1640 switch (pRealRec->Core.u32Magic)
1641 {
1642 case RTLOCKVALRECEXCL_MAGIC:
1643 return pRealRec->Excl.hClass;
1644
1645 case RTLOCKVALRECSHRDOWN_MAGIC:
1646 {
1647 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1648 if (RT_LIKELY( VALID_PTR(pSharedRec)
1649 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1650 return pSharedRec->hClass;
1651 break;
1652 }
1653
1654 default:
1655 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1656 break;
1657 }
1658 }
1659 return NIL_RTLOCKVALCLASS;
1660 }
1661
1662 default:
1663 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1664 return NIL_RTLOCKVALCLASS;
1665 }
1666}
1667
1668
1669/**
1670 * Gets the class for this locking record and the pointer to the one below it in
1671 * the stack.
1672 *
1673 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1674 * @param pRec The lock validator record.
1675 * @param puSubClass Where to return the sub-class.
1676 * @param ppDown Where to return the pointer to the record below.
1677 */
1678DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1679rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1680{
1681 switch (pRec->Core.u32Magic)
1682 {
1683 case RTLOCKVALRECEXCL_MAGIC:
1684 *ppDown = pRec->Excl.pDown;
1685 *puSubClass = pRec->Excl.uSubClass;
1686 return pRec->Excl.hClass;
1687
1688 case RTLOCKVALRECSHRD_MAGIC:
1689 *ppDown = NULL;
1690 *puSubClass = pRec->Shared.uSubClass;
1691 return pRec->Shared.hClass;
1692
1693 case RTLOCKVALRECSHRDOWN_MAGIC:
1694 {
1695 *ppDown = pRec->ShrdOwner.pDown;
1696
1697 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1698 if (RT_LIKELY( VALID_PTR(pSharedRec)
1699 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1700 {
1701 *puSubClass = pSharedRec->uSubClass;
1702 return pSharedRec->hClass;
1703 }
1704 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1705 return NIL_RTLOCKVALCLASS;
1706 }
1707
1708 case RTLOCKVALRECNEST_MAGIC:
1709 {
1710 *ppDown = pRec->Nest.pDown;
1711
1712 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1713 if (VALID_PTR(pRealRec))
1714 {
1715 switch (pRealRec->Core.u32Magic)
1716 {
1717 case RTLOCKVALRECEXCL_MAGIC:
1718 *puSubClass = pRealRec->Excl.uSubClass;
1719 return pRealRec->Excl.hClass;
1720
1721 case RTLOCKVALRECSHRDOWN_MAGIC:
1722 {
1723 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1724 if (RT_LIKELY( VALID_PTR(pSharedRec)
1725 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1726 {
1727 *puSubClass = pSharedRec->uSubClass;
1728 return pSharedRec->hClass;
1729 }
1730 break;
1731 }
1732
1733 default:
1734 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1735 break;
1736 }
1737 }
1738 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1739 return NIL_RTLOCKVALCLASS;
1740 }
1741
1742 default:
1743 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1744 *ppDown = NULL;
1745 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1746 return NIL_RTLOCKVALCLASS;
1747 }
1748}
1749
1750
1751/**
1752 * Gets the sub-class for a lock record.
1753 *
1754 * @returns the sub-class.
1755 * @param pRec The lock validator record.
1756 */
1757DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1758{
1759 switch (pRec->Core.u32Magic)
1760 {
1761 case RTLOCKVALRECEXCL_MAGIC:
1762 return pRec->Excl.uSubClass;
1763
1764 case RTLOCKVALRECSHRD_MAGIC:
1765 return pRec->Shared.uSubClass;
1766
1767 case RTLOCKVALRECSHRDOWN_MAGIC:
1768 {
1769 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1770 if (RT_LIKELY( VALID_PTR(pSharedRec)
1771 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1772 return pSharedRec->uSubClass;
1773 return RTLOCKVAL_SUB_CLASS_NONE;
1774 }
1775
1776 case RTLOCKVALRECNEST_MAGIC:
1777 {
1778 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1779 if (VALID_PTR(pRealRec))
1780 {
1781 switch (pRealRec->Core.u32Magic)
1782 {
1783 case RTLOCKVALRECEXCL_MAGIC:
1784 return pRec->Excl.uSubClass;
1785
1786 case RTLOCKVALRECSHRDOWN_MAGIC:
1787 {
1788 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1789 if (RT_LIKELY( VALID_PTR(pSharedRec)
1790 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1791 return pSharedRec->uSubClass;
1792 break;
1793 }
1794
1795 default:
1796 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1797 break;
1798 }
1799 }
1800 return RTLOCKVAL_SUB_CLASS_NONE;
1801 }
1802
1803 default:
1804 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1805 return RTLOCKVAL_SUB_CLASS_NONE;
1806 }
1807}
1808
1809
1810
1811
1812/**
1813 * Calculates the depth of a lock stack.
1814 *
1815 * @returns Number of stack frames.
1816 * @param pThread The thread.
1817 */
1818static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1819{
1820 uint32_t cEntries = 0;
1821 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1822 while (VALID_PTR(pCur))
1823 {
1824 switch (pCur->Core.u32Magic)
1825 {
1826 case RTLOCKVALRECEXCL_MAGIC:
1827 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1828 break;
1829
1830 case RTLOCKVALRECSHRDOWN_MAGIC:
1831 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1832 break;
1833
1834 case RTLOCKVALRECNEST_MAGIC:
1835 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1836 break;
1837
1838 default:
1839 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1840 }
1841 cEntries++;
1842 }
1843 return cEntries;
1844}
1845
1846
1847#ifdef RT_STRICT
1848/**
1849 * Checks if the stack contains @a pRec.
1850 *
1851 * @returns true / false.
1852 * @param pThreadSelf The current thread.
1853 * @param pRec The lock record.
1854 */
1855static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1856{
1857 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1858 while (pCur)
1859 {
1860 AssertPtrReturn(pCur, false);
1861 if (pCur == pRec)
1862 return true;
1863 switch (pCur->Core.u32Magic)
1864 {
1865 case RTLOCKVALRECEXCL_MAGIC:
1866 Assert(pCur->Excl.cRecursion >= 1);
1867 pCur = pCur->Excl.pDown;
1868 break;
1869
1870 case RTLOCKVALRECSHRDOWN_MAGIC:
1871 Assert(pCur->ShrdOwner.cRecursion >= 1);
1872 pCur = pCur->ShrdOwner.pDown;
1873 break;
1874
1875 case RTLOCKVALRECNEST_MAGIC:
1876 Assert(pCur->Nest.cRecursion > 1);
1877 pCur = pCur->Nest.pDown;
1878 break;
1879
1880 default:
1881 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1882 }
1883 }
1884 return false;
1885}
1886#endif /* RT_STRICT */
1887
1888
1889/**
1890 * Pushes a lock record onto the stack.
1891 *
1892 * @param pThreadSelf The current thread.
1893 * @param pRec The lock record.
1894 */
1895static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1896{
1897 Assert(pThreadSelf == RTThreadSelf());
1898 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1899
1900 switch (pRec->Core.u32Magic)
1901 {
1902 case RTLOCKVALRECEXCL_MAGIC:
1903 Assert(pRec->Excl.cRecursion == 1);
1904 Assert(pRec->Excl.pDown == NULL);
1905 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1906 break;
1907
1908 case RTLOCKVALRECSHRDOWN_MAGIC:
1909 Assert(pRec->ShrdOwner.cRecursion == 1);
1910 Assert(pRec->ShrdOwner.pDown == NULL);
1911 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1912 break;
1913
1914 default:
1915 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1916 }
1917 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1918}
1919
1920
1921/**
1922 * Pops a lock record off the stack.
1923 *
1924 * @param pThreadSelf The current thread.
1925 * @param pRec The lock.
1926 */
1927static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1928{
1929 Assert(pThreadSelf == RTThreadSelf());
1930
1931 PRTLOCKVALRECUNION pDown;
1932 switch (pRec->Core.u32Magic)
1933 {
1934 case RTLOCKVALRECEXCL_MAGIC:
1935 Assert(pRec->Excl.cRecursion == 0);
1936 pDown = pRec->Excl.pDown;
1937 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1938 break;
1939
1940 case RTLOCKVALRECSHRDOWN_MAGIC:
1941 Assert(pRec->ShrdOwner.cRecursion == 0);
1942 pDown = pRec->ShrdOwner.pDown;
1943 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1944 break;
1945
1946 default:
1947 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1948 }
1949 if (pThreadSelf->LockValidator.pStackTop == pRec)
1950 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1951 else
1952 {
1953 /* Find the pointer to our record and unlink ourselves. */
1954 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1955 while (pCur)
1956 {
1957 PRTLOCKVALRECUNION volatile *ppDown;
1958 switch (pCur->Core.u32Magic)
1959 {
1960 case RTLOCKVALRECEXCL_MAGIC:
1961 Assert(pCur->Excl.cRecursion >= 1);
1962 ppDown = &pCur->Excl.pDown;
1963 break;
1964
1965 case RTLOCKVALRECSHRDOWN_MAGIC:
1966 Assert(pCur->ShrdOwner.cRecursion >= 1);
1967 ppDown = &pCur->ShrdOwner.pDown;
1968 break;
1969
1970 case RTLOCKVALRECNEST_MAGIC:
1971 Assert(pCur->Nest.cRecursion >= 1);
1972 ppDown = &pCur->Nest.pDown;
1973 break;
1974
1975 default:
1976 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1977 }
1978 pCur = *ppDown;
1979 if (pCur == pRec)
1980 {
1981 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1982 return;
1983 }
1984 }
1985 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
1986 }
1987}
1988
1989
1990/**
1991 * Creates and pushes lock recursion record onto the stack.
1992 *
1993 * @param pThreadSelf The current thread.
1994 * @param pRec The lock record.
1995 * @param pSrcPos Where the recursion occurred.
1996 */
1997static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
1998{
1999 Assert(pThreadSelf == RTThreadSelf());
2000 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2001
2002#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2003 /*
2004 * Allocate a new recursion record
2005 */
2006 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2007 if (pRecursionRec)
2008 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2009 else
2010 {
2011 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2012 if (!pRecursionRec)
2013 return;
2014 }
2015
2016 /*
2017 * Initialize it.
2018 */
2019 switch (pRec->Core.u32Magic)
2020 {
2021 case RTLOCKVALRECEXCL_MAGIC:
2022 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2023 break;
2024
2025 case RTLOCKVALRECSHRDOWN_MAGIC:
2026 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2027 break;
2028
2029 default:
2030 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2031 rtLockValidatorSerializeDestructEnter();
2032 rtLockValidatorSerializeDestructLeave();
2033 RTMemFree(pRecursionRec);
2034 return;
2035 }
2036 Assert(pRecursionRec->cRecursion > 1);
2037 pRecursionRec->pRec = pRec;
2038 pRecursionRec->pDown = NULL;
2039 pRecursionRec->pNextFree = NULL;
2040 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2041 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2042
2043 /*
2044 * Link it.
2045 */
2046 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2047 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2048#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2049}
2050
2051
2052/**
2053 * Pops a lock recursion record off the stack.
2054 *
2055 * @param pThreadSelf The current thread.
2056 * @param pRec The lock record.
2057 */
2058static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2059{
2060 Assert(pThreadSelf == RTThreadSelf());
2061 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2062
2063 uint32_t cRecursion;
2064 switch (pRec->Core.u32Magic)
2065 {
2066 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2067 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2068 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2069 }
2070 Assert(cRecursion >= 1);
2071
2072#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2073 /*
2074 * Pop the recursion record.
2075 */
2076 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2077 if ( pNest != NULL
2078 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2079 && pNest->Nest.pRec == pRec
2080 )
2081 {
2082 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2083 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2084 }
2085 else
2086 {
2087 /* Find the record above ours. */
2088 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2089 for (;;)
2090 {
2091 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2092 switch (pNest->Core.u32Magic)
2093 {
2094 case RTLOCKVALRECEXCL_MAGIC:
2095 ppDown = &pNest->Excl.pDown;
2096 pNest = *ppDown;
2097 continue;
2098 case RTLOCKVALRECSHRDOWN_MAGIC:
2099 ppDown = &pNest->ShrdOwner.pDown;
2100 pNest = *ppDown;
2101 continue;
2102 case RTLOCKVALRECNEST_MAGIC:
2103 if (pNest->Nest.pRec == pRec)
2104 break;
2105 ppDown = &pNest->Nest.pDown;
2106 pNest = *ppDown;
2107 continue;
2108 default:
2109 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2110 }
2111 break; /* ugly */
2112 }
2113 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2114 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2115 }
2116
2117 /*
2118 * Invalidate and free the record.
2119 */
2120 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2121 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2122 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2123 pNest->Nest.cRecursion = 0;
2124 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2125 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2126#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2127}
2128
2129
2130/**
2131 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2132 * returns VERR_SEM_LV_WRONG_ORDER.
2133 */
2134static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2135 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2136 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2137
2138
2139{
2140 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2141 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2142 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2143 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2144 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2145 rtLockValComplainPanic();
2146 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2147}
2148
2149
2150/**
2151 * Checks if the sub-class order is ok or not.
2152 *
2153 * Used to deal with two locks from the same class.
2154 *
2155 * @returns true if ok, false if not.
2156 * @param uSubClass1 The sub-class of the lock that is being
2157 * considered.
2158 * @param uSubClass2 The sub-class of the lock that is already being
2159 * held.
2160 */
2161DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2162{
2163 if (uSubClass1 > uSubClass2)
2164 {
2165 /* NONE kills ANY. */
2166 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2167 return false;
2168 return true;
2169 }
2170
2171 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2172 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2173 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2174 return true;
2175 return false;
2176}
2177
2178
2179/**
2180 * Checks if the class and sub-class lock order is ok.
2181 *
2182 * @returns true if ok, false if not.
2183 * @param pClass1 The class of the lock that is being considered.
2184 * @param uSubClass1 The sub-class that goes with @a pClass1.
2185 * @param pClass2 The class of the lock that is already being
2186 * held.
2187 * @param uSubClass2 The sub-class that goes with @a pClass2.
2188 */
2189DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2190 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2191{
2192 if (pClass1 == pClass2)
2193 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2194 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2195}
2196
2197
2198/**
2199 * Checks the locking order, part two.
2200 *
2201 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2202 * @param pClass The lock class.
2203 * @param uSubClass The lock sub-class.
2204 * @param pThreadSelf The current thread.
2205 * @param pRec The lock record.
2206 * @param pSrcPos The source position of the locking operation.
2207 * @param pFirstBadClass The first bad class.
2208 * @param pFirstBadRec The first bad lock record.
2209 * @param pFirstBadDown The next record on the lock stack.
2210 */
2211static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2212 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2213 PCRTLOCKVALSRCPOS const pSrcPos,
2214 RTLOCKVALCLASSINT * const pFirstBadClass,
2215 PRTLOCKVALRECUNION const pFirstBadRec,
2216 PRTLOCKVALRECUNION const pFirstBadDown)
2217{
2218 /*
2219 * Something went wrong, pCur is pointing to where.
2220 */
2221 if ( pClass == pFirstBadClass
2222 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2223 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2224 pRec, pFirstBadRec, pClass, pFirstBadClass);
2225 if (!pClass->fAutodidact)
2226 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2227 pRec, pFirstBadRec, pClass, pFirstBadClass);
2228
2229 /*
2230 * This class is an autodidact, so we have to check out the rest of the stack
2231 * for direct violations.
2232 */
2233 uint32_t cNewRules = 1;
2234 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2235 while (pCur)
2236 {
2237 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2238
2239 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2240 pCur = pCur->Nest.pDown;
2241 else
2242 {
2243 PRTLOCKVALRECUNION pDown;
2244 uint32_t uPriorSubClass;
2245 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2246 if (pPriorClass != NIL_RTLOCKVALCLASS)
2247 {
2248 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2249 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2250 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2251 {
2252 if ( pClass == pPriorClass
2253 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2254 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2255 pRec, pCur, pClass, pPriorClass);
2256 cNewRules++;
2257 }
2258 }
2259 pCur = pDown;
2260 }
2261 }
2262
2263 if (cNewRules == 1)
2264 {
2265 /*
2266 * Special case the simple operation, hoping that it will be a
2267 * frequent case.
2268 */
2269 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2270 if (rc == VERR_SEM_LV_WRONG_ORDER)
2271 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2272 pRec, pFirstBadRec, pClass, pFirstBadClass);
2273 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2274 }
2275 else
2276 {
2277 /*
2278 * We may be adding more than one rule, so we have to take the lock
2279 * before starting to add the rules. This means we have to check
2280 * the state after taking it since we might be racing someone adding
2281 * a conflicting rule.
2282 */
2283 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2284 rtLockValidatorLazyInit();
2285 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2286
2287 /* Check */
2288 pCur = pFirstBadRec;
2289 while (pCur)
2290 {
2291 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2292 pCur = pCur->Nest.pDown;
2293 else
2294 {
2295 uint32_t uPriorSubClass;
2296 PRTLOCKVALRECUNION pDown;
2297 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2298 if (pPriorClass != NIL_RTLOCKVALCLASS)
2299 {
2300 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2301 {
2302 if ( pClass == pPriorClass
2303 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2304 {
2305 if (RT_SUCCESS(rcLock))
2306 RTCritSectLeave(&g_LockValClassTeachCS);
2307 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2308 pRec, pCur, pClass, pPriorClass);
2309 }
2310 }
2311 }
2312 pCur = pDown;
2313 }
2314 }
2315
2316 /* Iterate the stack yet again, adding new rules this time. */
2317 pCur = pFirstBadRec;
2318 while (pCur)
2319 {
2320 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2321 pCur = pCur->Nest.pDown;
2322 else
2323 {
2324 uint32_t uPriorSubClass;
2325 PRTLOCKVALRECUNION pDown;
2326 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2327 if (pPriorClass != NIL_RTLOCKVALCLASS)
2328 {
2329 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2330 {
2331 Assert( pClass != pPriorClass
2332 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2333 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2334 if (RT_FAILURE(rc))
2335 {
2336 Assert(rc == VERR_NO_MEMORY);
2337 break;
2338 }
2339 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2340 }
2341 }
2342 pCur = pDown;
2343 }
2344 }
2345
2346 if (RT_SUCCESS(rcLock))
2347 RTCritSectLeave(&g_LockValClassTeachCS);
2348 }
2349
2350 return VINF_SUCCESS;
2351}
2352
2353
2354
2355/**
2356 * Checks the locking order.
2357 *
2358 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2359 * @param pClass The lock class.
2360 * @param uSubClass The lock sub-class.
2361 * @param pThreadSelf The current thread.
2362 * @param pRec The lock record.
2363 * @param pSrcPos The source position of the locking operation.
2364 */
2365static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2366 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2367 PCRTLOCKVALSRCPOS pSrcPos)
2368{
2369 /*
2370 * Some internal paranoia first.
2371 */
2372 AssertPtr(pClass);
2373 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2374 AssertPtr(pThreadSelf);
2375 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2376 AssertPtr(pRec);
2377 AssertPtrNull(pSrcPos);
2378
2379 /*
2380 * Walk the stack, delegate problems to a worker routine.
2381 */
2382 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2383 if (!pCur)
2384 return VINF_SUCCESS;
2385
2386 for (;;)
2387 {
2388 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2389
2390 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2391 pCur = pCur->Nest.pDown;
2392 else
2393 {
2394 uint32_t uPriorSubClass;
2395 PRTLOCKVALRECUNION pDown;
2396 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2397 if (pPriorClass != NIL_RTLOCKVALCLASS)
2398 {
2399 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2400 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2401 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2402 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2403 pPriorClass, pCur, pDown);
2404 }
2405 pCur = pDown;
2406 }
2407 if (!pCur)
2408 return VINF_SUCCESS;
2409 }
2410}
2411
2412
2413/**
2414 * Check that the lock record is the topmost one on the stack, complain and fail
2415 * if it isn't.
2416 *
2417 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2418 * VERR_SEM_LV_INVALID_PARAMETER.
2419 * @param pThreadSelf The current thread.
2420 * @param pRec The record.
2421 */
2422static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2423{
2424 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2425 Assert(pThreadSelf == RTThreadSelf());
2426
2427 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2428 if (RT_LIKELY( pTop == pRec
2429 || ( pTop
2430 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2431 && pTop->Nest.pRec == pRec) ))
2432 return VINF_SUCCESS;
2433
2434#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2435 /* Look for a recursion record so the right frame is dumped and marked. */
2436 while (pTop)
2437 {
2438 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2439 {
2440 if (pTop->Nest.pRec == pRec)
2441 {
2442 pRec = pTop;
2443 break;
2444 }
2445 pTop = pTop->Nest.pDown;
2446 }
2447 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2448 pTop = pTop->Excl.pDown;
2449 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2450 pTop = pTop->ShrdOwner.pDown;
2451 else
2452 break;
2453 }
2454#endif
2455
2456 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2457 rtLockValComplainPanic();
2458 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2459}
2460
2461
2462/**
2463 * Checks if all owners are blocked - shared record operated in signaller mode.
2464 *
2465 * @returns true / false accordingly.
2466 * @param pRec The record.
2467 * @param pThreadSelf The current thread.
2468 */
2469DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2470{
2471 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2472 uint32_t cAllocated = pRec->cAllocated;
2473 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2474 if (cEntries == 0)
2475 return false;
2476
2477 for (uint32_t i = 0; i < cAllocated; i++)
2478 {
2479 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2480 if ( pEntry
2481 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2482 {
2483 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2484 if (!pCurThread)
2485 return false;
2486 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2487 return false;
2488 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2489 && pCurThread != pThreadSelf)
2490 return false;
2491 if (--cEntries == 0)
2492 break;
2493 }
2494 else
2495 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2496 }
2497
2498 return true;
2499}
2500
2501
2502/**
2503 * Verifies the deadlock stack before calling it a deadlock.
2504 *
2505 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2506 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2507 * @retval VERR_TRY_AGAIN if something changed.
2508 *
2509 * @param pStack The deadlock detection stack.
2510 * @param pThreadSelf The current thread.
2511 */
2512static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2513{
2514 uint32_t const c = pStack->c;
2515 for (uint32_t iPass = 0; iPass < 3; iPass++)
2516 {
2517 for (uint32_t i = 1; i < c; i++)
2518 {
2519 PRTTHREADINT pThread = pStack->a[i].pThread;
2520 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2521 return VERR_TRY_AGAIN;
2522 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2523 return VERR_TRY_AGAIN;
2524 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2525 return VERR_TRY_AGAIN;
2526 /* ASSUMES the signaller records won't have siblings! */
2527 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2528 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2529 && pRec->Shared.fSignaller
2530 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2531 return VERR_TRY_AGAIN;
2532 }
2533 RTThreadYield();
2534 }
2535
2536 if (c == 1)
2537 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2538 return VERR_SEM_LV_DEADLOCK;
2539}
2540
2541
2542/**
2543 * Checks for stack cycles caused by another deadlock before returning.
2544 *
2545 * @retval VINF_SUCCESS if the stack is simply too small.
2546 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2547 *
2548 * @param pStack The deadlock detection stack.
2549 */
2550static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2551{
2552 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2553 {
2554 PRTTHREADINT pThread = pStack->a[i].pThread;
2555 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2556 if (pStack->a[j].pThread == pThread)
2557 return VERR_SEM_LV_EXISTING_DEADLOCK;
2558 }
2559 static bool volatile s_fComplained = false;
2560 if (!s_fComplained)
2561 {
2562 s_fComplained = true;
2563 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2564 }
2565 return VINF_SUCCESS;
2566}
2567
2568
2569/**
2570 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2571 * detection.
2572 *
2573 * @retval VINF_SUCCESS
2574 * @retval VERR_SEM_LV_DEADLOCK
2575 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2576 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2577 * @retval VERR_TRY_AGAIN
2578 *
2579 * @param pStack The stack to use.
2580 * @param pOriginalRec The original record.
2581 * @param pThreadSelf The calling thread.
2582 */
2583static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2584 PRTTHREADINT const pThreadSelf)
2585{
2586 pStack->c = 0;
2587
2588 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2589 compiler may make a better job of it when using individual variables. */
2590 PRTLOCKVALRECUNION pRec = pOriginalRec;
2591 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2592 uint32_t iEntry = UINT32_MAX;
2593 PRTTHREADINT pThread = NIL_RTTHREAD;
2594 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2595 for (uint32_t iLoop = 0; ; iLoop++)
2596 {
2597 /*
2598 * Process the current record.
2599 */
2600 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2601
2602 /* Find the next relevant owner thread and record. */
2603 PRTLOCKVALRECUNION pNextRec = NULL;
2604 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2605 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2606 switch (pRec->Core.u32Magic)
2607 {
2608 case RTLOCKVALRECEXCL_MAGIC:
2609 Assert(iEntry == UINT32_MAX);
2610 for (;;)
2611 {
2612 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2613 if ( !pNextThread
2614 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2615 break;
2616 enmNextState = rtThreadGetState(pNextThread);
2617 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2618 && pNextThread != pThreadSelf)
2619 break;
2620 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2621 if (RT_LIKELY( !pNextRec
2622 || enmNextState == rtThreadGetState(pNextThread)))
2623 break;
2624 pNextRec = NULL;
2625 }
2626 if (!pNextRec)
2627 {
2628 pRec = pRec->Excl.pSibling;
2629 if ( pRec
2630 && pRec != pFirstSibling)
2631 continue;
2632 pNextThread = NIL_RTTHREAD;
2633 }
2634 break;
2635
2636 case RTLOCKVALRECSHRD_MAGIC:
2637 if (!pRec->Shared.fSignaller)
2638 {
2639 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2640 /** @todo The read side of a read-write lock is problematic if
2641 * the implementation prioritizes writers over readers because
2642 * that means we should could deadlock against current readers
2643 * if a writer showed up. If the RW sem implementation is
2644 * wrapping some native API, it's not so easy to detect when we
2645 * should do this and when we shouldn't. Checking when we
2646 * shouldn't is subject to wakeup scheduling and cannot easily
2647 * be made reliable.
2648 *
2649 * At the moment we circumvent all this mess by declaring that
2650 * readers has priority. This is TRUE on linux, but probably
2651 * isn't on Solaris and FreeBSD. */
2652 if ( pRec == pFirstSibling
2653 && pRec->Shared.pSibling != NULL
2654 && pRec->Shared.pSibling != pFirstSibling)
2655 {
2656 pRec = pRec->Shared.pSibling;
2657 Assert(iEntry == UINT32_MAX);
2658 continue;
2659 }
2660 }
2661
2662 /* Scan the owner table for blocked owners. */
2663 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2664 && ( !pRec->Shared.fSignaller
2665 || iEntry != UINT32_MAX
2666 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2667 )
2668 )
2669 {
2670 uint32_t cAllocated = pRec->Shared.cAllocated;
2671 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2672 while (++iEntry < cAllocated)
2673 {
2674 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2675 if (pEntry)
2676 {
2677 for (;;)
2678 {
2679 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2680 break;
2681 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2682 if ( !pNextThread
2683 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2684 break;
2685 enmNextState = rtThreadGetState(pNextThread);
2686 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2687 && pNextThread != pThreadSelf)
2688 break;
2689 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2690 if (RT_LIKELY( !pNextRec
2691 || enmNextState == rtThreadGetState(pNextThread)))
2692 break;
2693 pNextRec = NULL;
2694 }
2695 if (pNextRec)
2696 break;
2697 }
2698 else
2699 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2700 }
2701 if (pNextRec)
2702 break;
2703 pNextThread = NIL_RTTHREAD;
2704 }
2705
2706 /* Advance to the next sibling, if any. */
2707 pRec = pRec->Shared.pSibling;
2708 if ( pRec != NULL
2709 && pRec != pFirstSibling)
2710 {
2711 iEntry = UINT32_MAX;
2712 continue;
2713 }
2714 break;
2715
2716 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2717 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2718 break;
2719
2720 case RTLOCKVALRECSHRDOWN_MAGIC:
2721 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2722 default:
2723 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2724 break;
2725 }
2726
2727 if (pNextRec)
2728 {
2729 /*
2730 * Recurse and check for deadlock.
2731 */
2732 uint32_t i = pStack->c;
2733 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2734 return rtLockValidatorDdHandleStackOverflow(pStack);
2735
2736 pStack->c++;
2737 pStack->a[i].pRec = pRec;
2738 pStack->a[i].iEntry = iEntry;
2739 pStack->a[i].enmState = enmState;
2740 pStack->a[i].pThread = pThread;
2741 pStack->a[i].pFirstSibling = pFirstSibling;
2742
2743 if (RT_UNLIKELY( pNextThread == pThreadSelf
2744 && ( i != 0
2745 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2746 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2747 )
2748 )
2749 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2750
2751 pRec = pNextRec;
2752 pFirstSibling = pNextRec;
2753 iEntry = UINT32_MAX;
2754 enmState = enmNextState;
2755 pThread = pNextThread;
2756 }
2757 else
2758 {
2759 /*
2760 * No deadlock here, unwind the stack and deal with any unfinished
2761 * business there.
2762 */
2763 uint32_t i = pStack->c;
2764 for (;;)
2765 {
2766 /* pop */
2767 if (i == 0)
2768 return VINF_SUCCESS;
2769 i--;
2770 pRec = pStack->a[i].pRec;
2771 iEntry = pStack->a[i].iEntry;
2772
2773 /* Examine it. */
2774 uint32_t u32Magic = pRec->Core.u32Magic;
2775 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2776 pRec = pRec->Excl.pSibling;
2777 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2778 {
2779 if (iEntry + 1 < pRec->Shared.cAllocated)
2780 break; /* continue processing this record. */
2781 pRec = pRec->Shared.pSibling;
2782 }
2783 else
2784 {
2785 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2786 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2787 continue;
2788 }
2789
2790 /* Any next record to advance to? */
2791 if ( !pRec
2792 || pRec == pStack->a[i].pFirstSibling)
2793 continue;
2794 iEntry = UINT32_MAX;
2795 break;
2796 }
2797
2798 /* Restore the rest of the state and update the stack. */
2799 pFirstSibling = pStack->a[i].pFirstSibling;
2800 enmState = pStack->a[i].enmState;
2801 pThread = pStack->a[i].pThread;
2802 pStack->c = i;
2803 }
2804
2805 Assert(iLoop != 1000000);
2806 }
2807}
2808
2809
2810/**
2811 * Check for the simple no-deadlock case.
2812 *
2813 * @returns true if no deadlock, false if further investigation is required.
2814 *
2815 * @param pOriginalRec The original record.
2816 */
2817DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2818{
2819 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2820 && !pOriginalRec->Excl.pSibling)
2821 {
2822 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2823 if ( !pThread
2824 || pThread->u32Magic != RTTHREADINT_MAGIC)
2825 return true;
2826 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2827 if (!RTTHREAD_IS_SLEEPING(enmState))
2828 return true;
2829 }
2830 return false;
2831}
2832
2833
2834/**
2835 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2836 *
2837 * @param pStack The chain of locks causing the deadlock.
2838 * @param pRec The record relating to the current thread's lock
2839 * operation.
2840 * @param pThreadSelf This thread.
2841 * @param pSrcPos Where we are going to deadlock.
2842 * @param rc The return code.
2843 */
2844static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2845 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2846{
2847 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2848 {
2849 const char *pszWhat;
2850 switch (rc)
2851 {
2852 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2853 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2854 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2855 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2856 }
2857 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2858 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2859 for (uint32_t i = 0; i < pStack->c; i++)
2860 {
2861 char szPrefix[24];
2862 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2863 PRTLOCKVALRECUNION pShrdOwner = NULL;
2864 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2865 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2866 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2867 {
2868 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2869 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2870 }
2871 else
2872 {
2873 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2874 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2875 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2876 }
2877 }
2878 rtLockValComplainMore("---- end of deadlock chain ----\n");
2879 }
2880
2881 rtLockValComplainPanic();
2882}
2883
2884
2885/**
2886 * Perform deadlock detection.
2887 *
2888 * @retval VINF_SUCCESS
2889 * @retval VERR_SEM_LV_DEADLOCK
2890 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2891 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2892 *
2893 * @param pRec The record relating to the current thread's lock
2894 * operation.
2895 * @param pThreadSelf The current thread.
2896 * @param pSrcPos The position of the current lock operation.
2897 */
2898static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2899{
2900 RTLOCKVALDDSTACK Stack;
2901 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2902 if (RT_SUCCESS(rc))
2903 return VINF_SUCCESS;
2904
2905 if (rc == VERR_TRY_AGAIN)
2906 {
2907 for (uint32_t iLoop = 0; ; iLoop++)
2908 {
2909 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2910 if (RT_SUCCESS_NP(rc))
2911 return VINF_SUCCESS;
2912 if (rc != VERR_TRY_AGAIN)
2913 break;
2914 RTThreadYield();
2915 if (iLoop >= 3)
2916 return VINF_SUCCESS;
2917 }
2918 }
2919
2920 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2921 return rc;
2922}
2923
2924
2925RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2926 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2927{
2928 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2929 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2930 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2931 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2932 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2933
2934 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2935 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2936 pRec->afReserved[0] = 0;
2937 pRec->afReserved[1] = 0;
2938 pRec->afReserved[2] = 0;
2939 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2940 pRec->hThread = NIL_RTTHREAD;
2941 pRec->pDown = NULL;
2942 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2943 pRec->uSubClass = uSubClass;
2944 pRec->cRecursion = 0;
2945 pRec->hLock = hLock;
2946 pRec->pSibling = NULL;
2947 if (pszNameFmt)
2948 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2949 else
2950 {
2951 static uint32_t volatile s_cAnonymous = 0;
2952 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2953 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2954 }
2955
2956 /* Lazy initialization. */
2957 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2958 rtLockValidatorLazyInit();
2959}
2960
2961
2962RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2963 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2964{
2965 va_list va;
2966 va_start(va, pszNameFmt);
2967 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2968 va_end(va);
2969}
2970
2971
2972RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2973 uint32_t uSubClass, void *pvLock, bool fEnabled,
2974 const char *pszNameFmt, va_list va)
2975{
2976 PRTLOCKVALRECEXCL pRec;
2977 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2978 if (!pRec)
2979 return VERR_NO_MEMORY;
2980 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2981 return VINF_SUCCESS;
2982}
2983
2984
2985RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2986 uint32_t uSubClass, void *pvLock, bool fEnabled,
2987 const char *pszNameFmt, ...)
2988{
2989 va_list va;
2990 va_start(va, pszNameFmt);
2991 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2992 va_end(va);
2993 return rc;
2994}
2995
2996
2997RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
2998{
2999 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3000
3001 rtLockValidatorSerializeDestructEnter();
3002
3003 /** @todo Check that it's not on our stack first. Need to make it
3004 * configurable whether deleting a owned lock is acceptable? */
3005
3006 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3007 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3008 RTLOCKVALCLASS hClass;
3009 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3010 if (pRec->pSibling)
3011 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3012 rtLockValidatorSerializeDestructLeave();
3013 if (hClass != NIL_RTLOCKVALCLASS)
3014 RTLockValidatorClassRelease(hClass);
3015}
3016
3017
3018RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3019{
3020 PRTLOCKVALRECEXCL pRec = *ppRec;
3021 *ppRec = NULL;
3022 if (pRec)
3023 {
3024 RTLockValidatorRecExclDelete(pRec);
3025 RTMemFree(pRec);
3026 }
3027}
3028
3029
3030RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3031{
3032 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3033 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3034 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3035 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3036 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3037 RTLOCKVAL_SUB_CLASS_INVALID);
3038 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3039}
3040
3041
3042RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3043 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3044{
3045 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3046 if (!pRecU)
3047 return;
3048 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3049 if (!pRecU->Excl.fEnabled)
3050 return;
3051 if (hThreadSelf == NIL_RTTHREAD)
3052 {
3053 hThreadSelf = RTThreadSelfAutoAdopt();
3054 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3055 }
3056 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3057 Assert(hThreadSelf == RTThreadSelf());
3058
3059 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3060
3061 if (pRecU->Excl.hThread == hThreadSelf)
3062 {
3063 Assert(!fFirstRecursion);
3064 pRecU->Excl.cRecursion++;
3065 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3066 }
3067 else
3068 {
3069 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3070
3071 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3072 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3073 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3074
3075 rtLockValidatorStackPush(hThreadSelf, pRecU);
3076 }
3077}
3078
3079
3080/**
3081 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3082 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3083 */
3084static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3085{
3086 RTTHREADINT *pThread = pRec->Excl.hThread;
3087 AssertReturnVoid(pThread != NIL_RTTHREAD);
3088 Assert(pThread == RTThreadSelf());
3089
3090 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3091 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3092 if (c == 0)
3093 {
3094 rtLockValidatorStackPop(pThread, pRec);
3095 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3096 }
3097 else
3098 {
3099 Assert(c < UINT32_C(0xffff0000));
3100 Assert(!fFinalRecursion);
3101 rtLockValidatorStackPopRecursion(pThread, pRec);
3102 }
3103}
3104
3105RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3106{
3107 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3108 if (!pRecU)
3109 return VINF_SUCCESS;
3110 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3111 if (!pRecU->Excl.fEnabled)
3112 return VINF_SUCCESS;
3113
3114 /*
3115 * Check the release order.
3116 */
3117 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3118 && pRecU->Excl.hClass->fStrictReleaseOrder
3119 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3120 )
3121 {
3122 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3123 if (RT_FAILURE(rc))
3124 return rc;
3125 }
3126
3127 /*
3128 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3129 */
3130 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3131 return VINF_SUCCESS;
3132}
3133
3134
3135RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3136{
3137 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3138 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3139 if (pRecU->Excl.fEnabled)
3140 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3141}
3142
3143
3144RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3145{
3146 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3147 if (!pRecU)
3148 return VINF_SUCCESS;
3149 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3150 if (!pRecU->Excl.fEnabled)
3151 return VINF_SUCCESS;
3152 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3153 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3154
3155 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3156 && !pRecU->Excl.hClass->fRecursionOk)
3157 {
3158 rtLockValComplainFirst("Recursion not allowed by the class!",
3159 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3160 rtLockValComplainPanic();
3161 return VERR_SEM_LV_NESTED;
3162 }
3163
3164 Assert(pRecU->Excl.cRecursion < _1M);
3165 pRecU->Excl.cRecursion++;
3166 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3167 return VINF_SUCCESS;
3168}
3169
3170
3171RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3172{
3173 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3174 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3175 if (!pRecU->Excl.fEnabled)
3176 return VINF_SUCCESS;
3177 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3178 Assert(pRecU->Excl.hThread == RTThreadSelf());
3179 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3180
3181 /*
3182 * Check the release order.
3183 */
3184 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3185 && pRecU->Excl.hClass->fStrictReleaseOrder
3186 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3187 )
3188 {
3189 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3190 if (RT_FAILURE(rc))
3191 return rc;
3192 }
3193
3194 /*
3195 * Perform the unwind.
3196 */
3197 pRecU->Excl.cRecursion--;
3198 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3199 return VINF_SUCCESS;
3200}
3201
3202
3203RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3204{
3205 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3206 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3207 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3208 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3209 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3210 , VERR_SEM_LV_INVALID_PARAMETER);
3211 if (!pRecU->Excl.fEnabled)
3212 return VINF_SUCCESS;
3213 Assert(pRecU->Excl.hThread == RTThreadSelf());
3214 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3215 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3216
3217 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3218 && !pRecU->Excl.hClass->fRecursionOk)
3219 {
3220 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3221 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3222 rtLockValComplainPanic();
3223 return VERR_SEM_LV_NESTED;
3224 }
3225
3226 Assert(pRecU->Excl.cRecursion < _1M);
3227 pRecU->Excl.cRecursion++;
3228 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3229
3230 return VINF_SUCCESS;
3231}
3232
3233
3234RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3235{
3236 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3237 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3238 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3239 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3240 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3241 , VERR_SEM_LV_INVALID_PARAMETER);
3242 if (!pRecU->Excl.fEnabled)
3243 return VINF_SUCCESS;
3244 Assert(pRecU->Excl.hThread == RTThreadSelf());
3245 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3246 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3247
3248 /*
3249 * Check the release order.
3250 */
3251 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3252 && pRecU->Excl.hClass->fStrictReleaseOrder
3253 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3254 )
3255 {
3256 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3257 if (RT_FAILURE(rc))
3258 return rc;
3259 }
3260
3261 /*
3262 * Perform the unwind.
3263 */
3264 pRecU->Excl.cRecursion--;
3265 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3266 return VINF_SUCCESS;
3267}
3268
3269
3270RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3271 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3272{
3273 /*
3274 * Validate and adjust input. Quit early if order validation is disabled.
3275 */
3276 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3277 if (!pRecU)
3278 return VINF_SUCCESS;
3279 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3280 if ( !pRecU->Excl.fEnabled
3281 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3282 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3283 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3284 return VINF_SUCCESS;
3285
3286 if (hThreadSelf == NIL_RTTHREAD)
3287 {
3288 hThreadSelf = RTThreadSelfAutoAdopt();
3289 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3290 }
3291 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3292 Assert(hThreadSelf == RTThreadSelf());
3293
3294 /*
3295 * Detect recursion as it isn't subject to order restrictions.
3296 */
3297 if (pRec->hThread == hThreadSelf)
3298 return VINF_SUCCESS;
3299
3300 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3301}
3302
3303
3304RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3305 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3306 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3307{
3308 /*
3309 * Fend off wild life.
3310 */
3311 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3312 if (!pRecU)
3313 return VINF_SUCCESS;
3314 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3315 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3316 if (!pRec->fEnabled)
3317 return VINF_SUCCESS;
3318
3319 PRTTHREADINT pThreadSelf = hThreadSelf;
3320 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3321 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3322 Assert(pThreadSelf == RTThreadSelf());
3323
3324 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3325
3326 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3327 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3328 {
3329 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3330 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3331 , VERR_SEM_LV_INVALID_PARAMETER);
3332 enmSleepState = enmThreadState;
3333 }
3334
3335 /*
3336 * Record the location.
3337 */
3338 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3339 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3340 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3341 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3342 rtThreadSetState(pThreadSelf, enmSleepState);
3343
3344 /*
3345 * Don't do deadlock detection if we're recursing.
3346 *
3347 * On some hosts we don't do recursion accounting our selves and there
3348 * isn't any other place to check for this.
3349 */
3350 int rc = VINF_SUCCESS;
3351 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3352 {
3353 if ( !fRecursiveOk
3354 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3355 && !pRecU->Excl.hClass->fRecursionOk))
3356 {
3357 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3358 rtLockValComplainPanic();
3359 rc = VERR_SEM_LV_NESTED;
3360 }
3361 }
3362 /*
3363 * Perform deadlock detection.
3364 */
3365 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3366 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3367 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3368 rc = VINF_SUCCESS;
3369 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3370 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3371
3372 if (RT_SUCCESS(rc))
3373 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3374 else
3375 {
3376 rtThreadSetState(pThreadSelf, enmThreadState);
3377 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3378 }
3379 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3380 return rc;
3381}
3382RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3383
3384
3385RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3386 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3387 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3388{
3389 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3390 if (RT_SUCCESS(rc))
3391 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3392 enmSleepState, fReallySleeping);
3393 return rc;
3394}
3395RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3396
3397
3398RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3399 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3400{
3401 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3402 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3403 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3404 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3405 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3406
3407 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3408 pRec->uSubClass = uSubClass;
3409 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3410 pRec->hLock = hLock;
3411 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3412 pRec->fSignaller = fSignaller;
3413 pRec->pSibling = NULL;
3414
3415 /* the table */
3416 pRec->cEntries = 0;
3417 pRec->iLastEntry = 0;
3418 pRec->cAllocated = 0;
3419 pRec->fReallocating = false;
3420 pRec->fPadding = false;
3421 pRec->papOwners = NULL;
3422
3423 /* the name */
3424 if (pszNameFmt)
3425 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3426 else
3427 {
3428 static uint32_t volatile s_cAnonymous = 0;
3429 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3430 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3431 }
3432}
3433
3434
3435RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3436 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3437{
3438 va_list va;
3439 va_start(va, pszNameFmt);
3440 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3441 va_end(va);
3442}
3443
3444
3445RTDECL(int) RTLockValidatorRecSharedCreateV(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3446 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3447 const char *pszNameFmt, va_list va)
3448{
3449 PRTLOCKVALRECSHRD pRec;
3450 *ppRec = pRec = (PRTLOCKVALRECSHRD)RTMemAlloc(sizeof(*pRec));
3451 if (!pRec)
3452 return VERR_NO_MEMORY;
3453 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3454 return VINF_SUCCESS;
3455}
3456
3457
3458RTDECL(int) RTLockValidatorRecSharedCreate(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3459 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3460 const char *pszNameFmt, ...)
3461{
3462 va_list va;
3463 va_start(va, pszNameFmt);
3464 int rc = RTLockValidatorRecSharedCreateV(ppRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3465 va_end(va);
3466 return rc;
3467}
3468
3469
3470RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3471{
3472 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3473
3474 /** @todo Check that it's not on our stack first. Need to make it
3475 * configurable whether deleting a owned lock is acceptable? */
3476
3477 /*
3478 * Flip it into table realloc mode and take the destruction lock.
3479 */
3480 rtLockValidatorSerializeDestructEnter();
3481 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3482 {
3483 rtLockValidatorSerializeDestructLeave();
3484
3485 rtLockValidatorSerializeDetectionEnter();
3486 rtLockValidatorSerializeDetectionLeave();
3487
3488 rtLockValidatorSerializeDestructEnter();
3489 }
3490
3491 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3492 RTLOCKVALCLASS hClass;
3493 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3494 if (pRec->papOwners)
3495 {
3496 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3497 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3498 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3499
3500 RTMemFree((void *)papOwners);
3501 }
3502 if (pRec->pSibling)
3503 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3504 ASMAtomicWriteBool(&pRec->fReallocating, false);
3505
3506 rtLockValidatorSerializeDestructLeave();
3507
3508 if (hClass != NIL_RTLOCKVALCLASS)
3509 RTLockValidatorClassRelease(hClass);
3510}
3511
3512
3513RTDECL(void) RTLockValidatorRecSharedDestroy(PRTLOCKVALRECSHRD *ppRec)
3514{
3515 PRTLOCKVALRECSHRD pRec = *ppRec;
3516 *ppRec = NULL;
3517 if (pRec)
3518 {
3519 RTLockValidatorRecSharedDelete(pRec);
3520 RTMemFree(pRec);
3521 }
3522}
3523
3524
3525RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3526{
3527 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3528 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3529 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3530 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3531 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3532 RTLOCKVAL_SUB_CLASS_INVALID);
3533 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3534}
3535
3536
3537/**
3538 * Locates an owner (thread) in a shared lock record.
3539 *
3540 * @returns Pointer to the owner entry on success, NULL on failure..
3541 * @param pShared The shared lock record.
3542 * @param hThread The thread (owner) to find.
3543 * @param piEntry Where to optionally return the table in index.
3544 * Optional.
3545 */
3546DECLINLINE(PRTLOCKVALRECUNION)
3547rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3548{
3549 rtLockValidatorSerializeDetectionEnter();
3550
3551 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3552 if (papOwners)
3553 {
3554 uint32_t const cMax = pShared->cAllocated;
3555 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3556 {
3557 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3558 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3559 {
3560 rtLockValidatorSerializeDetectionLeave();
3561 if (piEntry)
3562 *piEntry = iEntry;
3563 return pEntry;
3564 }
3565 }
3566 }
3567
3568 rtLockValidatorSerializeDetectionLeave();
3569 return NULL;
3570}
3571
3572
3573RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3574 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3575{
3576 /*
3577 * Validate and adjust input. Quit early if order validation is disabled.
3578 */
3579 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3580 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3581 if ( !pRecU->Shared.fEnabled
3582 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3583 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3584 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3585 )
3586 return VINF_SUCCESS;
3587
3588 if (hThreadSelf == NIL_RTTHREAD)
3589 {
3590 hThreadSelf = RTThreadSelfAutoAdopt();
3591 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3592 }
3593 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3594 Assert(hThreadSelf == RTThreadSelf());
3595
3596 /*
3597 * Detect recursion as it isn't subject to order restrictions.
3598 */
3599 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3600 if (pEntry)
3601 return VINF_SUCCESS;
3602
3603 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3604}
3605
3606
3607RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3608 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3609 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3610{
3611 /*
3612 * Fend off wild life.
3613 */
3614 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3615 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3616 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3617 if (!pRecU->Shared.fEnabled)
3618 return VINF_SUCCESS;
3619
3620 PRTTHREADINT pThreadSelf = hThreadSelf;
3621 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3622 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3623 Assert(pThreadSelf == RTThreadSelf());
3624
3625 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3626
3627 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3628 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3629 {
3630 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3631 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3632 , VERR_SEM_LV_INVALID_PARAMETER);
3633 enmSleepState = enmThreadState;
3634 }
3635
3636 /*
3637 * Record the location.
3638 */
3639 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3640 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3641 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3642 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3643 rtThreadSetState(pThreadSelf, enmSleepState);
3644
3645 /*
3646 * Don't do deadlock detection if we're recursing.
3647 */
3648 int rc = VINF_SUCCESS;
3649 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3650 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3651 : NULL;
3652 if (pEntry)
3653 {
3654 if ( !fRecursiveOk
3655 || ( pRec->hClass
3656 && !pRec->hClass->fRecursionOk)
3657 )
3658 {
3659 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3660 rtLockValComplainPanic();
3661 rc = VERR_SEM_LV_NESTED;
3662 }
3663 }
3664 /*
3665 * Perform deadlock detection.
3666 */
3667 else if ( pRec->hClass
3668 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3669 || pRec->hClass->cMsMinDeadlock > cMillies))
3670 rc = VINF_SUCCESS;
3671 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3672 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3673
3674 if (RT_SUCCESS(rc))
3675 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3676 else
3677 {
3678 rtThreadSetState(pThreadSelf, enmThreadState);
3679 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3680 }
3681 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3682 return rc;
3683}
3684RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3685
3686
3687RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3688 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3689 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3690{
3691 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3692 if (RT_SUCCESS(rc))
3693 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3694 enmSleepState, fReallySleeping);
3695 return rc;
3696}
3697RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3698
3699
3700/**
3701 * Allocates and initializes an owner entry for the shared lock record.
3702 *
3703 * @returns The new owner entry.
3704 * @param pRec The shared lock record.
3705 * @param pThreadSelf The calling thread and owner. Used for record
3706 * initialization and allocation.
3707 * @param pSrcPos The source position.
3708 */
3709DECLINLINE(PRTLOCKVALRECUNION)
3710rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3711{
3712 PRTLOCKVALRECUNION pEntry;
3713
3714 /*
3715 * Check if the thread has any statically allocated records we can easily
3716 * make use of.
3717 */
3718 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3719 if ( iEntry > 0
3720 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3721 {
3722 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3723 Assert(!pEntry->ShrdOwner.fReserved);
3724 pEntry->ShrdOwner.fStaticAlloc = true;
3725 rtThreadGet(pThreadSelf);
3726 }
3727 else
3728 {
3729 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3730 if (RT_UNLIKELY(!pEntry))
3731 return NULL;
3732 pEntry->ShrdOwner.fStaticAlloc = false;
3733 }
3734
3735 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3736 pEntry->ShrdOwner.cRecursion = 1;
3737 pEntry->ShrdOwner.fReserved = true;
3738 pEntry->ShrdOwner.hThread = pThreadSelf;
3739 pEntry->ShrdOwner.pDown = NULL;
3740 pEntry->ShrdOwner.pSharedRec = pRec;
3741#if HC_ARCH_BITS == 32
3742 pEntry->ShrdOwner.pvReserved = NULL;
3743#endif
3744 if (pSrcPos)
3745 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3746 else
3747 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3748 return pEntry;
3749}
3750
3751
3752/**
3753 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3754 *
3755 * @param pEntry The owner entry.
3756 */
3757DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3758{
3759 if (pEntry)
3760 {
3761 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3762 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3763
3764 PRTTHREADINT pThread;
3765 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3766
3767 Assert(pEntry->fReserved);
3768 pEntry->fReserved = false;
3769
3770 if (pEntry->fStaticAlloc)
3771 {
3772 AssertPtrReturnVoid(pThread);
3773 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3774
3775 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3776 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3777
3778 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3779 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3780
3781 rtThreadRelease(pThread);
3782 }
3783 else
3784 {
3785 rtLockValidatorSerializeDestructEnter();
3786 rtLockValidatorSerializeDestructLeave();
3787
3788 RTMemFree(pEntry);
3789 }
3790 }
3791}
3792
3793
3794/**
3795 * Make more room in the table.
3796 *
3797 * @retval true on success
3798 * @retval false if we're out of memory or running into a bad race condition
3799 * (probably a bug somewhere). No longer holding the lock.
3800 *
3801 * @param pShared The shared lock record.
3802 */
3803static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3804{
3805 for (unsigned i = 0; i < 1000; i++)
3806 {
3807 /*
3808 * Switch to the other data access direction.
3809 */
3810 rtLockValidatorSerializeDetectionLeave();
3811 if (i >= 10)
3812 {
3813 Assert(i != 10 && i != 100);
3814 RTThreadSleep(i >= 100);
3815 }
3816 rtLockValidatorSerializeDestructEnter();
3817
3818 /*
3819 * Try grab the privilege to reallocating the table.
3820 */
3821 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3822 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3823 {
3824 uint32_t cAllocated = pShared->cAllocated;
3825 if (cAllocated < pShared->cEntries)
3826 {
3827 /*
3828 * Ok, still not enough space. Reallocate the table.
3829 */
3830#if 0 /** @todo enable this after making sure growing works flawlessly. */
3831 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3832#else
3833 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3834#endif
3835 PRTLOCKVALRECSHRDOWN *papOwners;
3836 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3837 (cAllocated + cInc) * sizeof(void *));
3838 if (!papOwners)
3839 {
3840 ASMAtomicWriteBool(&pShared->fReallocating, false);
3841 rtLockValidatorSerializeDestructLeave();
3842 /* RTMemRealloc will assert */
3843 return false;
3844 }
3845
3846 while (cInc-- > 0)
3847 {
3848 papOwners[cAllocated] = NULL;
3849 cAllocated++;
3850 }
3851
3852 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3853 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3854 }
3855 ASMAtomicWriteBool(&pShared->fReallocating, false);
3856 }
3857 rtLockValidatorSerializeDestructLeave();
3858
3859 rtLockValidatorSerializeDetectionEnter();
3860 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3861 break;
3862
3863 if (pShared->cAllocated >= pShared->cEntries)
3864 return true;
3865 }
3866
3867 rtLockValidatorSerializeDetectionLeave();
3868 AssertFailed(); /* too many iterations or destroyed while racing. */
3869 return false;
3870}
3871
3872
3873/**
3874 * Adds an owner entry to a shared lock record.
3875 *
3876 * @returns true on success, false on serious race or we're if out of memory.
3877 * @param pShared The shared lock record.
3878 * @param pEntry The owner entry.
3879 */
3880DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3881{
3882 rtLockValidatorSerializeDetectionEnter();
3883 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3884 {
3885 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3886 && !rtLockValidatorRecSharedMakeRoom(pShared))
3887 return false; /* the worker leave the lock */
3888
3889 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3890 uint32_t const cMax = pShared->cAllocated;
3891 for (unsigned i = 0; i < 100; i++)
3892 {
3893 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3894 {
3895 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3896 {
3897 rtLockValidatorSerializeDetectionLeave();
3898 return true;
3899 }
3900 }
3901 Assert(i != 25);
3902 }
3903 AssertFailed();
3904 }
3905 rtLockValidatorSerializeDetectionLeave();
3906 return false;
3907}
3908
3909
3910/**
3911 * Remove an owner entry from a shared lock record and free it.
3912 *
3913 * @param pShared The shared lock record.
3914 * @param pEntry The owner entry to remove.
3915 * @param iEntry The last known index.
3916 */
3917DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3918 uint32_t iEntry)
3919{
3920 /*
3921 * Remove it from the table.
3922 */
3923 rtLockValidatorSerializeDetectionEnter();
3924 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3925 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3926 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3927 {
3928 /* this shouldn't happen yet... */
3929 AssertFailed();
3930 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3931 uint32_t const cMax = pShared->cAllocated;
3932 for (iEntry = 0; iEntry < cMax; iEntry++)
3933 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3934 break;
3935 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3936 }
3937 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3938 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3939 rtLockValidatorSerializeDetectionLeave();
3940
3941 /*
3942 * Successfully removed, now free it.
3943 */
3944 rtLockValidatorRecSharedFreeOwner(pEntry);
3945}
3946
3947
3948RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3949{
3950 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3951 if (!pRec->fEnabled)
3952 return;
3953 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3954 AssertReturnVoid(pRec->fSignaller);
3955
3956 /*
3957 * Free all current owners.
3958 */
3959 rtLockValidatorSerializeDetectionEnter();
3960 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3961 {
3962 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3963 uint32_t iEntry = 0;
3964 uint32_t cEntries = pRec->cAllocated;
3965 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3966 while (iEntry < cEntries)
3967 {
3968 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3969 if (pEntry)
3970 {
3971 ASMAtomicDecU32(&pRec->cEntries);
3972 rtLockValidatorSerializeDetectionLeave();
3973
3974 rtLockValidatorRecSharedFreeOwner(pEntry);
3975
3976 rtLockValidatorSerializeDetectionEnter();
3977 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3978 break;
3979 cEntries = pRec->cAllocated;
3980 papEntries = pRec->papOwners;
3981 }
3982 iEntry++;
3983 }
3984 }
3985 rtLockValidatorSerializeDetectionLeave();
3986
3987 if (hThread != NIL_RTTHREAD)
3988 {
3989 /*
3990 * Allocate a new owner entry and insert it into the table.
3991 */
3992 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3993 if ( pEntry
3994 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3995 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3996 }
3997}
3998RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
3999
4000
4001RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
4002{
4003 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4004 if (!pRec->fEnabled)
4005 return;
4006 if (hThread == NIL_RTTHREAD)
4007 {
4008 hThread = RTThreadSelfAutoAdopt();
4009 AssertReturnVoid(hThread != NIL_RTTHREAD);
4010 }
4011 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4012
4013 /*
4014 * Recursive?
4015 *
4016 * Note! This code can be optimized to try avoid scanning the table on
4017 * insert. However, that's annoying work that makes the code big,
4018 * so it can wait til later sometime.
4019 */
4020 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4021 if (pEntry)
4022 {
4023 Assert(!pRec->fSignaller);
4024 pEntry->ShrdOwner.cRecursion++;
4025 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
4026 return;
4027 }
4028
4029 /*
4030 * Allocate a new owner entry and insert it into the table.
4031 */
4032 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4033 if (pEntry)
4034 {
4035 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4036 {
4037 if (!pRec->fSignaller)
4038 rtLockValidatorStackPush(hThread, pEntry);
4039 }
4040 else
4041 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4042 }
4043}
4044RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4045
4046
4047RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4048{
4049 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4050 if (!pRec->fEnabled)
4051 return;
4052 if (hThread == NIL_RTTHREAD)
4053 {
4054 hThread = RTThreadSelfAutoAdopt();
4055 AssertReturnVoid(hThread != NIL_RTTHREAD);
4056 }
4057 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4058
4059 /*
4060 * Find the entry hope it's a recursive one.
4061 */
4062 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4063 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4064 AssertReturnVoid(pEntry);
4065 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4066
4067 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4068 if (c == 0)
4069 {
4070 if (!pRec->fSignaller)
4071 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4072 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4073 }
4074 else
4075 {
4076 Assert(!pRec->fSignaller);
4077 rtLockValidatorStackPopRecursion(hThread, pEntry);
4078 }
4079}
4080RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4081
4082
4083RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4084{
4085 /* Validate and resolve input. */
4086 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4087 if (!pRec->fEnabled)
4088 return false;
4089 if (hThread == NIL_RTTHREAD)
4090 {
4091 hThread = RTThreadSelfAutoAdopt();
4092 AssertReturn(hThread != NIL_RTTHREAD, false);
4093 }
4094 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4095
4096 /* Do the job. */
4097 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4098 return pEntry != NULL;
4099}
4100RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4101
4102
4103RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4104{
4105 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4106 if (!pRec->fEnabled)
4107 return VINF_SUCCESS;
4108 if (hThreadSelf == NIL_RTTHREAD)
4109 {
4110 hThreadSelf = RTThreadSelfAutoAdopt();
4111 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4112 }
4113 Assert(hThreadSelf == RTThreadSelf());
4114 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4115
4116 /*
4117 * Locate the entry for this thread in the table.
4118 */
4119 uint32_t iEntry = 0;
4120 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4121 if (RT_UNLIKELY(!pEntry))
4122 {
4123 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4124 rtLockValComplainPanic();
4125 return VERR_SEM_LV_NOT_OWNER;
4126 }
4127
4128 /*
4129 * Check the release order.
4130 */
4131 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4132 && pRec->hClass->fStrictReleaseOrder
4133 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4134 )
4135 {
4136 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4137 if (RT_FAILURE(rc))
4138 return rc;
4139 }
4140
4141 /*
4142 * Release the ownership or unwind a level of recursion.
4143 */
4144 Assert(pEntry->ShrdOwner.cRecursion > 0);
4145 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4146 if (c == 0)
4147 {
4148 rtLockValidatorStackPop(hThreadSelf, pEntry);
4149 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4150 }
4151 else
4152 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4153
4154 return VINF_SUCCESS;
4155}
4156
4157
4158RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4159{
4160 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4161 if (!pRec->fEnabled)
4162 return VINF_SUCCESS;
4163 if (hThreadSelf == NIL_RTTHREAD)
4164 {
4165 hThreadSelf = RTThreadSelfAutoAdopt();
4166 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4167 }
4168 Assert(hThreadSelf == RTThreadSelf());
4169 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4170
4171 /*
4172 * Locate the entry for this thread in the table.
4173 */
4174 uint32_t iEntry = 0;
4175 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4176 if (RT_UNLIKELY(!pEntry))
4177 {
4178 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4179 rtLockValComplainPanic();
4180 return VERR_SEM_LV_NOT_SIGNALLER;
4181 }
4182 return VINF_SUCCESS;
4183}
4184
4185
4186RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4187{
4188 if (Thread == NIL_RTTHREAD)
4189 return 0;
4190
4191 PRTTHREADINT pThread = rtThreadGet(Thread);
4192 if (!pThread)
4193 return VERR_INVALID_HANDLE;
4194 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4195 rtThreadRelease(pThread);
4196 return cWriteLocks;
4197}
4198RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4199
4200
4201RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4202{
4203 PRTTHREADINT pThread = rtThreadGet(Thread);
4204 AssertReturnVoid(pThread);
4205 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4206 rtThreadRelease(pThread);
4207}
4208RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4209
4210
4211RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4212{
4213 PRTTHREADINT pThread = rtThreadGet(Thread);
4214 AssertReturnVoid(pThread);
4215 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4216 rtThreadRelease(pThread);
4217}
4218RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4219
4220
4221RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4222{
4223 if (Thread == NIL_RTTHREAD)
4224 return 0;
4225
4226 PRTTHREADINT pThread = rtThreadGet(Thread);
4227 if (!pThread)
4228 return VERR_INVALID_HANDLE;
4229 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4230 rtThreadRelease(pThread);
4231 return cReadLocks;
4232}
4233RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4234
4235
4236RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4237{
4238 PRTTHREADINT pThread = rtThreadGet(Thread);
4239 Assert(pThread);
4240 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4241 rtThreadRelease(pThread);
4242}
4243RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4244
4245
4246RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4247{
4248 PRTTHREADINT pThread = rtThreadGet(Thread);
4249 Assert(pThread);
4250 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4251 rtThreadRelease(pThread);
4252}
4253RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4254
4255
4256RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4257{
4258 void *pvLock = NULL;
4259 PRTTHREADINT pThread = rtThreadGet(hThread);
4260 if (pThread)
4261 {
4262 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4263 if (RTTHREAD_IS_SLEEPING(enmState))
4264 {
4265 rtLockValidatorSerializeDetectionEnter();
4266
4267 enmState = rtThreadGetState(pThread);
4268 if (RTTHREAD_IS_SLEEPING(enmState))
4269 {
4270 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4271 if (pRec)
4272 {
4273 switch (pRec->Core.u32Magic)
4274 {
4275 case RTLOCKVALRECEXCL_MAGIC:
4276 pvLock = pRec->Excl.hLock;
4277 break;
4278
4279 case RTLOCKVALRECSHRDOWN_MAGIC:
4280 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4281 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4282 break;
4283 case RTLOCKVALRECSHRD_MAGIC:
4284 pvLock = pRec->Shared.hLock;
4285 break;
4286 }
4287 if (RTThreadGetState(pThread) != enmState)
4288 pvLock = NULL;
4289 }
4290 }
4291
4292 rtLockValidatorSerializeDetectionLeave();
4293 }
4294 rtThreadRelease(pThread);
4295 }
4296 return pvLock;
4297}
4298RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4299
4300
4301RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4302{
4303 bool fRet = false;
4304 PRTTHREADINT pThread = rtThreadGet(hThread);
4305 if (pThread)
4306 {
4307 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4308 rtThreadRelease(pThread);
4309 }
4310 return fRet;
4311}
4312RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4313
4314
4315RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4316{
4317 bool fRet = false;
4318 if (hCurrentThread == NIL_RTTHREAD)
4319 hCurrentThread = RTThreadSelf();
4320 else
4321 Assert(hCurrentThread == RTThreadSelf());
4322 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4323 if (pThread)
4324 {
4325 if (hClass != NIL_RTLOCKVALCLASS)
4326 {
4327 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4328 while (VALID_PTR(pCur) && !fRet)
4329 {
4330 switch (pCur->Core.u32Magic)
4331 {
4332 case RTLOCKVALRECEXCL_MAGIC:
4333 fRet = pCur->Excl.hClass == hClass;
4334 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4335 break;
4336 case RTLOCKVALRECSHRDOWN_MAGIC:
4337 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4338 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4339 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4340 break;
4341 case RTLOCKVALRECNEST_MAGIC:
4342 switch (pCur->Nest.pRec->Core.u32Magic)
4343 {
4344 case RTLOCKVALRECEXCL_MAGIC:
4345 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4346 break;
4347 case RTLOCKVALRECSHRDOWN_MAGIC:
4348 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4349 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4350 break;
4351 }
4352 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4353 break;
4354 default:
4355 pCur = NULL;
4356 break;
4357 }
4358 }
4359 }
4360
4361 rtThreadRelease(pThread);
4362 }
4363 return fRet;
4364}
4365RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4366
4367
4368RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4369{
4370 bool fRet = false;
4371 if (hCurrentThread == NIL_RTTHREAD)
4372 hCurrentThread = RTThreadSelf();
4373 else
4374 Assert(hCurrentThread == RTThreadSelf());
4375 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4376 if (pThread)
4377 {
4378 if (hClass != NIL_RTLOCKVALCLASS)
4379 {
4380 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4381 while (VALID_PTR(pCur) && !fRet)
4382 {
4383 switch (pCur->Core.u32Magic)
4384 {
4385 case RTLOCKVALRECEXCL_MAGIC:
4386 fRet = pCur->Excl.hClass == hClass
4387 && pCur->Excl.uSubClass == uSubClass;
4388 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4389 break;
4390 case RTLOCKVALRECSHRDOWN_MAGIC:
4391 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4392 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4393 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4394 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4395 break;
4396 case RTLOCKVALRECNEST_MAGIC:
4397 switch (pCur->Nest.pRec->Core.u32Magic)
4398 {
4399 case RTLOCKVALRECEXCL_MAGIC:
4400 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4401 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4402 break;
4403 case RTLOCKVALRECSHRDOWN_MAGIC:
4404 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4405 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4406 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4407 break;
4408 }
4409 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4410 break;
4411 default:
4412 pCur = NULL;
4413 break;
4414 }
4415 }
4416 }
4417
4418 rtThreadRelease(pThread);
4419 }
4420 return fRet;
4421}
4422RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4423
4424
4425RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4426{
4427 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4428}
4429RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4430
4431
4432RTDECL(bool) RTLockValidatorIsEnabled(void)
4433{
4434 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4435}
4436RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4437
4438
4439RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4440{
4441 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4442}
4443RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4444
4445
4446RTDECL(bool) RTLockValidatorIsQuiet(void)
4447{
4448 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4449}
4450RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4451
4452
4453RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4454{
4455 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4456}
4457RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4458
4459
4460RTDECL(bool) RTLockValidatorMayPanic(void)
4461{
4462 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4463}
4464RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4465
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette