VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 36597

Last change on this file since 36597 was 36597, checked in by vboxsync, 14 years ago

IPRT: Implemented the memory tracker.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 157.5 KB
Line 
1/* $Id: lockvalidator.cpp 36597 2011-04-06 19:46:15Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/*******************************************************************************
28* Header Files *
29*******************************************************************************/
30#include <iprt/lockvalidator.h>
31#include "internal/iprt.h"
32
33#include <iprt/asm.h>
34#include <iprt/assert.h>
35#include <iprt/env.h>
36#include <iprt/err.h>
37#include <iprt/mem.h>
38#include <iprt/once.h>
39#include <iprt/semaphore.h>
40#include <iprt/string.h>
41#include <iprt/thread.h>
42
43#include "internal/lockvalidator.h"
44#include "internal/magics.h"
45#include "internal/strhash.h"
46#include "internal/thread.h"
47
48
49/*******************************************************************************
50* Defined Constants And Macros *
51*******************************************************************************/
52/** Macro that asserts that a pointer is aligned correctly.
53 * Only used when fighting bugs. */
54#if 1
55# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
56 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
57#else
58# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
59#endif
60
61/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
62#define RTLOCKVALCLASS_HASH(hClass) \
63 ( ((uintptr_t)(hClass) >> 6 ) \
64 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
65 / sizeof(PRTLOCKVALCLASSREF)) )
66
67/** The max value for RTLOCKVALCLASSINT::cRefs. */
68#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
69/** The max value for RTLOCKVALCLASSREF::cLookups. */
70#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
71/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
72 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
73#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
74
75
76/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
77 * Enable recursion records. */
78#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
79# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
80#endif
81
82/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
83 * Enables some extra verbosity in the lock dumping. */
84#if defined(DOXYGEN_RUNNING)
85# define RTLOCKVAL_WITH_VERBOSE_DUMPS
86#endif
87
88/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
89 * Enables collection prior class hash lookup statistics, dumping them when
90 * complaining about the class. */
91#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
92# define RTLOCKVAL_WITH_CLASS_HASH_STATS
93#endif
94
95
96/*******************************************************************************
97* Structures and Typedefs *
98*******************************************************************************/
99/**
100 * Deadlock detection stack entry.
101 */
102typedef struct RTLOCKVALDDENTRY
103{
104 /** The current record. */
105 PRTLOCKVALRECUNION pRec;
106 /** The current entry number if pRec is a shared one. */
107 uint32_t iEntry;
108 /** The thread state of the thread we followed to get to pFirstSibling.
109 * This is only used for validating a deadlock stack. */
110 RTTHREADSTATE enmState;
111 /** The thread we followed to get to pFirstSibling.
112 * This is only used for validating a deadlock stack. */
113 PRTTHREADINT pThread;
114 /** What pThread is waiting on, i.e. where we entered the circular list of
115 * siblings. This is used for validating a deadlock stack as well as
116 * terminating the sibling walk. */
117 PRTLOCKVALRECUNION pFirstSibling;
118} RTLOCKVALDDENTRY;
119
120
121/**
122 * Deadlock detection stack.
123 */
124typedef struct RTLOCKVALDDSTACK
125{
126 /** The number stack entries. */
127 uint32_t c;
128 /** The stack entries. */
129 RTLOCKVALDDENTRY a[32];
130} RTLOCKVALDDSTACK;
131/** Pointer to a deadlock detection stack. */
132typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
133
134
135/**
136 * Reference to another class.
137 */
138typedef struct RTLOCKVALCLASSREF
139{
140 /** The class. */
141 RTLOCKVALCLASS hClass;
142 /** The number of lookups of this class. */
143 uint32_t volatile cLookups;
144 /** Indicates whether the entry was added automatically during order checking
145 * (true) or manually via the API (false). */
146 bool fAutodidacticism;
147 /** Reserved / explicit alignment padding. */
148 bool afReserved[3];
149} RTLOCKVALCLASSREF;
150/** Pointer to a class reference. */
151typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
152
153
154/** Pointer to a chunk of class references. */
155typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
156/**
157 * Chunk of class references.
158 */
159typedef struct RTLOCKVALCLASSREFCHUNK
160{
161 /** Array of refs. */
162#if 0 /** @todo for testing allocation of new chunks. */
163 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
164#else
165 RTLOCKVALCLASSREF aRefs[2];
166#endif
167 /** Pointer to the next chunk. */
168 PRTLOCKVALCLASSREFCHUNK volatile pNext;
169} RTLOCKVALCLASSREFCHUNK;
170
171
172/**
173 * Lock class.
174 */
175typedef struct RTLOCKVALCLASSINT
176{
177 /** AVL node core. */
178 AVLLU32NODECORE Core;
179 /** Magic value (RTLOCKVALCLASS_MAGIC). */
180 uint32_t volatile u32Magic;
181 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
182 uint32_t volatile cRefs;
183 /** Whether the class is allowed to teach it self new locking order rules. */
184 bool fAutodidact;
185 /** Whether to allow recursion. */
186 bool fRecursionOk;
187 /** Strict release order. */
188 bool fStrictReleaseOrder;
189 /** Whether this class is in the tree. */
190 bool fInTree;
191 /** Donate a reference to the next retainer. This is a hack to make
192 * RTLockValidatorClassCreateUnique work. */
193 bool volatile fDonateRefToNextRetainer;
194 /** Reserved future use / explicit alignment. */
195 bool afReserved[3];
196 /** The minimum wait interval for which we do deadlock detection
197 * (milliseconds). */
198 RTMSINTERVAL cMsMinDeadlock;
199 /** The minimum wait interval for which we do order checks (milliseconds). */
200 RTMSINTERVAL cMsMinOrder;
201 /** More padding. */
202 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
203 /** Classes that may be taken prior to this one.
204 * This is a linked list where each node contains a chunk of locks so that we
205 * reduce the number of allocations as well as localize the data. */
206 RTLOCKVALCLASSREFCHUNK PriorLocks;
207 /** Hash table containing frequently encountered prior locks. */
208 PRTLOCKVALCLASSREF apPriorLocksHash[17];
209 /** Class name. (Allocated after the end of the block as usual.) */
210 char const *pszName;
211 /** Where this class was created.
212 * This is mainly used for finding automatically created lock classes.
213 * @remarks The strings are stored after this structure so we won't crash
214 * if the class lives longer than the module (dll/so/dylib) that
215 * spawned it. */
216 RTLOCKVALSRCPOS CreatePos;
217#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
218 /** Hash hits. */
219 uint32_t volatile cHashHits;
220 /** Hash misses. */
221 uint32_t volatile cHashMisses;
222#endif
223} RTLOCKVALCLASSINT;
224AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
225AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
226
227
228/*******************************************************************************
229* Global Variables *
230*******************************************************************************/
231/** Serializing object destruction and deadlock detection.
232 *
233 * This makes sure that none of the memory examined by the deadlock detection
234 * code will become invalid (reused for other purposes or made not present)
235 * while the detection is in progress.
236 *
237 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
238 * EW: Deadlock detection and some related activities.
239 */
240static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
241/** Serializing class tree insert and lookups. */
242static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
243/** Class tree. */
244static PAVLLU32NODECORE g_LockValClassTree = NULL;
245/** Critical section serializing the teaching new rules to the classes. */
246static RTCRITSECT g_LockValClassTeachCS;
247
248/** Whether the lock validator is enabled or disabled.
249 * Only applies to new locks. */
250static bool volatile g_fLockValidatorEnabled = true;
251/** Set if the lock validator is quiet. */
252#ifdef RT_STRICT
253static bool volatile g_fLockValidatorQuiet = false;
254#else
255static bool volatile g_fLockValidatorQuiet = true;
256#endif
257/** Set if the lock validator may panic. */
258#ifdef RT_STRICT
259static bool volatile g_fLockValidatorMayPanic = true;
260#else
261static bool volatile g_fLockValidatorMayPanic = false;
262#endif
263/** Whether to return an error status on wrong locking order. */
264static bool volatile g_fLockValSoftWrongOrder = false;
265
266
267/*******************************************************************************
268* Internal Functions *
269*******************************************************************************/
270static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
271static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
272
273
274/**
275 * Lazy initialization of the lock validator globals.
276 */
277static void rtLockValidatorLazyInit(void)
278{
279 static uint32_t volatile s_fInitializing = false;
280 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
281 {
282 /*
283 * The locks.
284 */
285 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
286 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
287 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
288
289 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
290 {
291 RTSEMRW hSemRW;
292 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
293 if (RT_SUCCESS(rc))
294 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
295 }
296
297 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
298 {
299 RTSEMXROADS hXRoads;
300 int rc = RTSemXRoadsCreate(&hXRoads);
301 if (RT_SUCCESS(rc))
302 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
303 }
304
305#ifdef IN_RING3
306 /*
307 * Check the environment for our config variables.
308 */
309 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
310 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
311 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
312 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
313
314 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
315 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
316 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
317 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
318
319 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
320 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
321 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
322 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
323
324 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
325 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
326 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
327 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
328#endif
329
330 /*
331 * Register cleanup
332 */
333 /** @todo register some cleanup callback if we care. */
334
335 ASMAtomicWriteU32(&s_fInitializing, false);
336 }
337}
338
339
340
341/** Wrapper around ASMAtomicReadPtr. */
342DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
343{
344 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
345 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
346 return p;
347}
348
349
350/** Wrapper around ASMAtomicWritePtr. */
351DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
352{
353 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
354 ASMAtomicWritePtr(ppRec, pRecNew);
355}
356
357
358/** Wrapper around ASMAtomicReadPtr. */
359DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
360{
361 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
362 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
363 return p;
364}
365
366
367/** Wrapper around ASMAtomicUoReadPtr. */
368DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
369{
370 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
371 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
372 return p;
373}
374
375
376/**
377 * Reads a volatile thread handle field and returns the thread name.
378 *
379 * @returns Thread name (read only).
380 * @param phThread The thread handle field.
381 */
382static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
383{
384 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
385 if (!pThread)
386 return "<NIL>";
387 if (!VALID_PTR(pThread))
388 return "<INVALID>";
389 if (pThread->u32Magic != RTTHREADINT_MAGIC)
390 return "<BAD-THREAD-MAGIC>";
391 return pThread->szName;
392}
393
394
395/**
396 * Launch a simple assertion like complaint w/ panic.
397 *
398 * @param pszFile Where from - file.
399 * @param iLine Where from - line.
400 * @param pszFunction Where from - function.
401 * @param pszWhat What we're complaining about.
402 * @param ... Format arguments.
403 */
404static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
405{
406 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
407 {
408 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
409 va_list va;
410 va_start(va, pszWhat);
411 RTAssertMsg2WeakV(pszWhat, va);
412 va_end(va);
413 }
414 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
415 RTAssertPanic();
416}
417
418
419/**
420 * Describes the class.
421 *
422 * @param pszPrefix Message prefix.
423 * @param pClass The class to complain about.
424 * @param uSubClass My sub-class.
425 * @param fVerbose Verbose description including relations to other
426 * classes.
427 */
428static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
429{
430 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
431 return;
432
433 /* Stringify the sub-class. */
434 const char *pszSubClass;
435 char szSubClass[32];
436 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
437 switch (uSubClass)
438 {
439 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
440 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
441 default:
442 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
443 pszSubClass = szSubClass;
444 break;
445 }
446 else
447 {
448 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
449 pszSubClass = szSubClass;
450 }
451
452 /* Validate the class pointer. */
453 if (!VALID_PTR(pClass))
454 {
455 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
456 return;
457 }
458 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
459 {
460 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
461 return;
462 }
463
464 /* OK, dump the class info. */
465 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
466 pClass,
467 pClass->pszName,
468 pClass->CreatePos.pszFile,
469 pClass->CreatePos.uLine,
470 pClass->CreatePos.pszFunction,
471 pClass->CreatePos.uId,
472 pszSubClass);
473 if (fVerbose)
474 {
475 uint32_t i = 0;
476 uint32_t cPrinted = 0;
477 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
478 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
479 {
480 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
481 if (pCurClass != NIL_RTLOCKVALCLASS)
482 {
483 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
484 cPrinted == 0
485 ? "Prior:"
486 : " ",
487 i,
488 pCurClass->pszName,
489 pChunk->aRefs[j].fAutodidacticism
490 ? "autodidactic"
491 : "manually ",
492 pChunk->aRefs[j].cLookups,
493 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
494 cPrinted++;
495 }
496 }
497 if (!cPrinted)
498 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
499#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
500 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
501#endif
502 }
503 else
504 {
505 uint32_t cPrinted = 0;
506 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
507 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
508 {
509 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
510 if (pCurClass != NIL_RTLOCKVALCLASS)
511 {
512 if ((cPrinted % 10) == 0)
513 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
514 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
515 else if ((cPrinted % 10) != 9)
516 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
517 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
518 else
519 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
520 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
521 cPrinted++;
522 }
523 }
524 if (!cPrinted)
525 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
526 else if ((cPrinted % 10) != 0)
527 RTAssertMsg2AddWeak("\n");
528 }
529}
530
531
532/**
533 * Helper for getting the class name.
534 * @returns Class name string.
535 * @param pClass The class.
536 */
537static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
538{
539 if (!pClass)
540 return "<nil-class>";
541 if (!VALID_PTR(pClass))
542 return "<bad-class-ptr>";
543 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
544 return "<bad-class-magic>";
545 if (!pClass->pszName)
546 return "<no-class-name>";
547 return pClass->pszName;
548}
549
550/**
551 * Formats the sub-class.
552 *
553 * @returns Stringified sub-class.
554 * @param uSubClass The name.
555 * @param pszBuf Buffer that is big enough.
556 */
557static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
558{
559 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
560 switch (uSubClass)
561 {
562 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
563 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
564 default:
565 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
566 break;
567 }
568 else
569 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
570 return pszBuf;
571}
572
573
574/**
575 * Helper for rtLockValComplainAboutLock.
576 */
577DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
578 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
579 const char *pszFrameType)
580{
581 char szBuf[32];
582 switch (u32Magic)
583 {
584 case RTLOCKVALRECEXCL_MAGIC:
585#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
586 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
587 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
588 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
589 rtLockValComplainGetClassName(pRec->Excl.hClass),
590 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
591 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
592 pszFrameType, pszSuffix);
593#else
594 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
595 pRec->Excl.hLock, pRec->Excl.szName,
596 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
597 rtLockValComplainGetClassName(pRec->Excl.hClass),
598 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
599 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
600 pszFrameType, pszSuffix);
601#endif
602 break;
603
604 case RTLOCKVALRECSHRD_MAGIC:
605 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
606 pRec->Shared.hLock, pRec->Shared.szName, pRec,
607 rtLockValComplainGetClassName(pRec->Shared.hClass),
608 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
609 pszFrameType, pszSuffix);
610 break;
611
612 case RTLOCKVALRECSHRDOWN_MAGIC:
613 {
614 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
615 if ( VALID_PTR(pShared)
616 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
617#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
618 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
619 pShared->hLock, pShared->pszName, pShared,
620 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
621 rtLockValComplainGetClassName(pShared->hClass),
622 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
623 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
624 pszSuffix2, pszSuffix);
625#else
626 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
627 pShared->hLock, pShared->szName,
628 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
629 rtLockValComplainGetClassName(pShared->hClass),
630 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
631 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
632 pszFrameType, pszSuffix);
633#endif
634 else
635 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
636 pShared,
637 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
638 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
639 pszFrameType, pszSuffix);
640 break;
641 }
642
643 default:
644 AssertMsgFailed(("%#x\n", u32Magic));
645 }
646}
647
648
649/**
650 * Describes the lock.
651 *
652 * @param pszPrefix Message prefix.
653 * @param pRec The lock record we're working on.
654 * @param pszSuffix Message suffix.
655 */
656static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
657{
658#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
659# define FIX_REC(r) 1
660#else
661# define FIX_REC(r) (r)
662#endif
663 if ( VALID_PTR(pRec)
664 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
665 {
666 switch (pRec->Core.u32Magic)
667 {
668 case RTLOCKVALRECEXCL_MAGIC:
669 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
670 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
671 break;
672
673 case RTLOCKVALRECSHRD_MAGIC:
674 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
675 break;
676
677 case RTLOCKVALRECSHRDOWN_MAGIC:
678 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
679 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
680 break;
681
682 case RTLOCKVALRECNEST_MAGIC:
683 {
684 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
685 uint32_t u32Magic;
686 if ( VALID_PTR(pRealRec)
687 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
688 || u32Magic == RTLOCKVALRECSHRD_MAGIC
689 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
690 )
691 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
692 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
693 else
694 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
695 pRealRec, pRec, pRec->Nest.cRecursion,
696 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
697 pszSuffix);
698 break;
699 }
700
701 default:
702 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
703 break;
704 }
705 }
706#undef FIX_REC
707}
708
709
710/**
711 * Dump the lock stack.
712 *
713 * @param pThread The thread which lock stack we're gonna dump.
714 * @param cchIndent The indentation in chars.
715 * @param cMinFrames The minimum number of frames to consider
716 * dumping.
717 * @param pHighightRec Record that should be marked specially in the
718 * dump.
719 */
720static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
721 PRTLOCKVALRECUNION pHighightRec)
722{
723 if ( VALID_PTR(pThread)
724 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
725 && pThread->u32Magic == RTTHREADINT_MAGIC
726 )
727 {
728 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
729 if (cEntries >= cMinFrames)
730 {
731 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
732 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
733 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
734 for (uint32_t i = 0; VALID_PTR(pCur); i++)
735 {
736 char szPrefix[80];
737 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
738 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
739 switch (pCur->Core.u32Magic)
740 {
741 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
742 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
743 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
744 default:
745 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
746 pCur = NULL;
747 break;
748 }
749 }
750 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
751 }
752 }
753}
754
755
756/**
757 * Launch the initial complaint.
758 *
759 * @param pszWhat What we're complaining about.
760 * @param pSrcPos Where we are complaining from, as it were.
761 * @param pThreadSelf The calling thread.
762 * @param pRec The main lock involved. Can be NULL.
763 * @param fDumpStack Whether to dump the lock stack (true) or not
764 * (false).
765 */
766static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
767 PRTLOCKVALRECUNION pRec, bool fDumpStack)
768{
769 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
770 {
771 ASMCompilerBarrier(); /* paranoia */
772 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
773 if (pSrcPos && pSrcPos->uId)
774 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
775 else
776 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
777 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
778 if (fDumpStack)
779 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
780 }
781}
782
783
784/**
785 * Continue bitching.
786 *
787 * @param pszFormat Format string.
788 * @param ... Format arguments.
789 */
790static void rtLockValComplainMore(const char *pszFormat, ...)
791{
792 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
793 {
794 va_list va;
795 va_start(va, pszFormat);
796 RTAssertMsg2AddWeakV(pszFormat, va);
797 va_end(va);
798 }
799}
800
801
802/**
803 * Raise a panic if enabled.
804 */
805static void rtLockValComplainPanic(void)
806{
807 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
808 RTAssertPanic();
809}
810
811
812/**
813 * Copy a source position record.
814 *
815 * @param pDst The destination.
816 * @param pSrc The source. Can be NULL.
817 */
818DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
819{
820 if (pSrc)
821 {
822 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
823 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
824 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
825 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
826 }
827 else
828 {
829 ASMAtomicUoWriteU32(&pDst->uLine, 0);
830 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
831 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
832 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
833 }
834}
835
836
837/**
838 * Init a source position record.
839 *
840 * @param pSrcPos The source position record.
841 */
842DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
843{
844 pSrcPos->pszFile = NULL;
845 pSrcPos->pszFunction = NULL;
846 pSrcPos->uId = 0;
847 pSrcPos->uLine = 0;
848#if HC_ARCH_BITS == 64
849 pSrcPos->u32Padding = 0;
850#endif
851}
852
853
854/**
855 * Hashes the specified source position.
856 *
857 * @returns Hash.
858 * @param pSrcPos The source position record.
859 */
860static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
861{
862 uint32_t uHash;
863 if ( ( pSrcPos->pszFile
864 || pSrcPos->pszFunction)
865 && pSrcPos->uLine != 0)
866 {
867 uHash = 0;
868 if (pSrcPos->pszFile)
869 uHash = sdbmInc(pSrcPos->pszFile, uHash);
870 if (pSrcPos->pszFunction)
871 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
872 uHash += pSrcPos->uLine;
873 }
874 else
875 {
876 Assert(pSrcPos->uId);
877 uHash = (uint32_t)pSrcPos->uId;
878 }
879
880 return uHash;
881}
882
883
884/**
885 * Compares two source positions.
886 *
887 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
888 * otherwise.
889 * @param pSrcPos1 The first source position.
890 * @param pSrcPos2 The second source position.
891 */
892static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
893{
894 if (pSrcPos1->uLine != pSrcPos2->uLine)
895 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
896
897 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
898 if (iDiff != 0)
899 return iDiff;
900
901 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
902 if (iDiff != 0)
903 return iDiff;
904
905 if (pSrcPos1->uId != pSrcPos2->uId)
906 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
907 return 0;
908}
909
910
911
912/**
913 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
914 */
915DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
916{
917 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
918 if (hXRoads != NIL_RTSEMXROADS)
919 RTSemXRoadsNSEnter(hXRoads);
920}
921
922
923/**
924 * Call after rtLockValidatorSerializeDestructEnter.
925 */
926DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
927{
928 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
929 if (hXRoads != NIL_RTSEMXROADS)
930 RTSemXRoadsNSLeave(hXRoads);
931}
932
933
934/**
935 * Serializes deadlock detection against destruction of the objects being
936 * inspected.
937 */
938DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
939{
940 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
941 if (hXRoads != NIL_RTSEMXROADS)
942 RTSemXRoadsEWEnter(hXRoads);
943}
944
945
946/**
947 * Call after rtLockValidatorSerializeDetectionEnter.
948 */
949DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
950{
951 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
952 if (hXRoads != NIL_RTSEMXROADS)
953 RTSemXRoadsEWLeave(hXRoads);
954}
955
956
957/**
958 * Initializes the per thread lock validator data.
959 *
960 * @param pPerThread The data.
961 */
962DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
963{
964 pPerThread->bmFreeShrdOwners = UINT32_MAX;
965
966 /* ASSUMES the rest has already been zeroed. */
967 Assert(pPerThread->pRec == NULL);
968 Assert(pPerThread->cWriteLocks == 0);
969 Assert(pPerThread->cReadLocks == 0);
970 Assert(pPerThread->fInValidator == false);
971 Assert(pPerThread->pStackTop == NULL);
972}
973
974
975/**
976 * Delete the per thread lock validator data.
977 *
978 * @param pPerThread The data.
979 */
980DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
981{
982 /*
983 * Check that the thread doesn't own any locks at this time.
984 */
985 if (pPerThread->pStackTop)
986 {
987 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
988 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
989 pPerThread->pStackTop, true);
990 rtLockValComplainPanic();
991 }
992
993 /*
994 * Free the recursion records.
995 */
996 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
997 pPerThread->pFreeNestRecs = NULL;
998 while (pCur)
999 {
1000 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1001 RTMemFree(pCur);
1002 pCur = pNext;
1003 }
1004}
1005
1006RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1007 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1008 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1009 const char *pszNameFmt, ...)
1010{
1011 va_list va;
1012 va_start(va, pszNameFmt);
1013 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1014 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1015 va_end(va);
1016 return rc;
1017}
1018
1019
1020RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1021 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1022 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1023 const char *pszNameFmt, va_list va)
1024{
1025 Assert(cMsMinDeadlock >= 1);
1026 Assert(cMsMinOrder >= 1);
1027 AssertPtr(pSrcPos);
1028
1029 /*
1030 * Format the name and calc its length.
1031 */
1032 size_t cbName;
1033 char szName[32];
1034 if (pszNameFmt && *pszNameFmt)
1035 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1036 else
1037 {
1038 static uint32_t volatile s_cAnonymous = 0;
1039 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1040 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1041 }
1042
1043 /*
1044 * Figure out the file and function name lengths and allocate memory for
1045 * it all.
1046 */
1047 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1048 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1049 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVar(sizeof(*pThis) + cbFile + cbFunction + cbName);
1050 if (!pThis)
1051 return VERR_NO_MEMORY;
1052
1053 /*
1054 * Initialize the class data.
1055 */
1056 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1057 pThis->Core.uchHeight = 0;
1058 pThis->Core.pLeft = NULL;
1059 pThis->Core.pRight = NULL;
1060 pThis->Core.pList = NULL;
1061 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1062 pThis->cRefs = 1;
1063 pThis->fAutodidact = fAutodidact;
1064 pThis->fRecursionOk = fRecursionOk;
1065 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1066 pThis->fInTree = false;
1067 pThis->fDonateRefToNextRetainer = false;
1068 pThis->afReserved[0] = false;
1069 pThis->afReserved[1] = false;
1070 pThis->afReserved[2] = false;
1071 pThis->cMsMinDeadlock = cMsMinDeadlock;
1072 pThis->cMsMinOrder = cMsMinOrder;
1073 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1074 pThis->au32Reserved[i] = 0;
1075 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1076 {
1077 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1078 pThis->PriorLocks.aRefs[i].cLookups = 0;
1079 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1080 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1081 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1082 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1083 }
1084 pThis->PriorLocks.pNext = NULL;
1085 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1086 pThis->apPriorLocksHash[i] = NULL;
1087 char *pszDst = (char *)(pThis + 1);
1088 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1089 pszDst += cbName;
1090 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1091 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1092 pszDst += cbFile;
1093 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1094 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1095#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1096 pThis->cHashHits = 0;
1097 pThis->cHashMisses = 0;
1098#endif
1099
1100 *phClass = pThis;
1101 return VINF_SUCCESS;
1102}
1103
1104
1105RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1106{
1107 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1108 va_list va;
1109 va_start(va, pszNameFmt);
1110 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1111 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1112 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1113 pszNameFmt, va);
1114 va_end(va);
1115 return rc;
1116}
1117
1118
1119/**
1120 * Creates a new lock validator class with a reference that is consumed by the
1121 * first call to RTLockValidatorClassRetain.
1122 *
1123 * This is tailored for use in the parameter list of a semaphore constructor.
1124 *
1125 * @returns Class handle with a reference that is automatically consumed by the
1126 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1127 *
1128 * @param pszFile The source position of the call, file.
1129 * @param iLine The source position of the call, line.
1130 * @param pszFunction The source position of the call, function.
1131 * @param pszNameFmt Class name format string, optional (NULL). Max
1132 * length is 32 bytes.
1133 * @param ... Format string arguments.
1134 */
1135RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1136{
1137 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1138 RTLOCKVALCLASSINT *pClass;
1139 va_list va;
1140 va_start(va, pszNameFmt);
1141 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1142 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1143 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1144 pszNameFmt, va);
1145 va_end(va);
1146 if (RT_FAILURE(rc))
1147 return NIL_RTLOCKVALCLASS;
1148 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1149 return pClass;
1150}
1151
1152
1153/**
1154 * Internal class retainer.
1155 * @returns The new reference count.
1156 * @param pClass The class.
1157 */
1158DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1159{
1160 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1161 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1162 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1163 else if ( cRefs == 2
1164 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1165 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1166 return cRefs;
1167}
1168
1169
1170/**
1171 * Validates and retains a lock validator class.
1172 *
1173 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1174 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1175 */
1176DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1177{
1178 if (hClass == NIL_RTLOCKVALCLASS)
1179 return hClass;
1180 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1181 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1182 rtLockValidatorClassRetain(hClass);
1183 return hClass;
1184}
1185
1186
1187/**
1188 * Internal class releaser.
1189 * @returns The new reference count.
1190 * @param pClass The class.
1191 */
1192DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1193{
1194 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1195 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1196 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1197 else if (!cRefs)
1198 rtLockValidatorClassDestroy(pClass);
1199 return cRefs;
1200}
1201
1202
1203/**
1204 * Destroys a class once there are not more references to it.
1205 *
1206 * @param Class The class.
1207 */
1208static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1209{
1210 AssertReturnVoid(!pClass->fInTree);
1211 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1212
1213 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1214 while (pChunk)
1215 {
1216 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1217 {
1218 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1219 if (pClass2 != NIL_RTLOCKVALCLASS)
1220 {
1221 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1222 rtLockValidatorClassRelease(pClass2);
1223 }
1224 }
1225
1226 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1227 pChunk->pNext = NULL;
1228 if (pChunk != &pClass->PriorLocks)
1229 RTMemFree(pChunk);
1230 pChunk = pNext;
1231 }
1232
1233 RTMemFree(pClass);
1234}
1235
1236
1237RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1238{
1239 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1240 rtLockValidatorLazyInit();
1241 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1242
1243 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1244 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1245 while (pClass)
1246 {
1247 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1248 break;
1249 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1250 }
1251
1252 if (RT_SUCCESS(rcLock))
1253 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1254 return pClass;
1255}
1256
1257
1258RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1259{
1260 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1261 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1262 if (hClass == NIL_RTLOCKVALCLASS)
1263 {
1264 /*
1265 * Create a new class and insert it into the tree.
1266 */
1267 va_list va;
1268 va_start(va, pszNameFmt);
1269 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1270 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1271 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1272 pszNameFmt, va);
1273 va_end(va);
1274 if (RT_SUCCESS(rc))
1275 {
1276 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1277 rtLockValidatorLazyInit();
1278 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1279
1280 Assert(!hClass->fInTree);
1281 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1282 Assert(hClass->fInTree);
1283
1284 if (RT_SUCCESS(rcLock))
1285 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1286 return hClass;
1287 }
1288 }
1289 return hClass;
1290}
1291
1292
1293RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1294{
1295 RTLOCKVALCLASSINT *pClass = hClass;
1296 AssertPtrReturn(pClass, UINT32_MAX);
1297 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1298 return rtLockValidatorClassRetain(pClass);
1299}
1300
1301
1302RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1303{
1304 RTLOCKVALCLASSINT *pClass = hClass;
1305 if (pClass == NIL_RTLOCKVALCLASS)
1306 return 0;
1307 AssertPtrReturn(pClass, UINT32_MAX);
1308 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1309 return rtLockValidatorClassRelease(pClass);
1310}
1311
1312
1313/**
1314 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1315 * all the chunks for @a pPriorClass.
1316 *
1317 * @returns true / false.
1318 * @param pClass The class to search.
1319 * @param pPriorClass The class to search for.
1320 */
1321static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1322{
1323 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1324 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1325 {
1326 if (pChunk->aRefs[i].hClass == pPriorClass)
1327 {
1328 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1329 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1330 {
1331 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1332 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1333 }
1334
1335 /* update the hash table entry. */
1336 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1337 if ( !(*ppHashEntry)
1338 || (*ppHashEntry)->cLookups + 128 < cLookups)
1339 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1340
1341#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1342 ASMAtomicIncU32(&pClass->cHashMisses);
1343#endif
1344 return true;
1345 }
1346 }
1347
1348 return false;
1349}
1350
1351
1352/**
1353 * Checks if @a pPriorClass is a known prior class.
1354 *
1355 * @returns true / false.
1356 * @param pClass The class to search.
1357 * @param pPriorClass The class to search for.
1358 */
1359DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1360{
1361 /*
1362 * Hash lookup here.
1363 */
1364 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1365 if ( pRef
1366 && pRef->hClass == pPriorClass)
1367 {
1368 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1369 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1370 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1371#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1372 ASMAtomicIncU32(&pClass->cHashHits);
1373#endif
1374 return true;
1375 }
1376
1377 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1378}
1379
1380
1381/**
1382 * Adds a class to the prior list.
1383 *
1384 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1385 * @param pClass The class to work on.
1386 * @param pPriorClass The class to add.
1387 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1388 * somebody is teaching us via the API (false).
1389 * @param pSrcPos Where this rule was added (optional).
1390 */
1391static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1392 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1393{
1394 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1395 rtLockValidatorLazyInit();
1396 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1397
1398 /*
1399 * Check that there are no conflict (no assert since we might race each other).
1400 */
1401 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1402 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1403 {
1404 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1405 {
1406 /*
1407 * Scan the table for a free entry, allocating a new chunk if necessary.
1408 */
1409 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1410 {
1411 bool fDone = false;
1412 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1413 {
1414 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1415 if (fDone)
1416 {
1417 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1418 rtLockValidatorClassRetain(pPriorClass);
1419 rc = VINF_SUCCESS;
1420 break;
1421 }
1422 }
1423 if (fDone)
1424 break;
1425
1426 /* If no more chunks, allocate a new one and insert the class before linking it. */
1427 if (!pChunk->pNext)
1428 {
1429 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1430 if (!pNew)
1431 {
1432 rc = VERR_NO_MEMORY;
1433 break;
1434 }
1435 pNew->pNext = NULL;
1436 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1437 {
1438 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1439 pNew->aRefs[i].cLookups = 0;
1440 pNew->aRefs[i].fAutodidacticism = false;
1441 pNew->aRefs[i].afReserved[0] = false;
1442 pNew->aRefs[i].afReserved[1] = false;
1443 pNew->aRefs[i].afReserved[2] = false;
1444 }
1445
1446 pNew->aRefs[0].hClass = pPriorClass;
1447 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1448
1449 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1450 rtLockValidatorClassRetain(pPriorClass);
1451 rc = VINF_SUCCESS;
1452 break;
1453 }
1454 } /* chunk loop */
1455 }
1456 else
1457 rc = VINF_SUCCESS;
1458 }
1459 else
1460 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1461
1462 if (RT_SUCCESS(rcLock))
1463 RTCritSectLeave(&g_LockValClassTeachCS);
1464 return rc;
1465}
1466
1467
1468RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1469{
1470 RTLOCKVALCLASSINT *pClass = hClass;
1471 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1472 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1473
1474 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1475 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1476 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1477
1478 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1479}
1480
1481
1482RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1483{
1484 RTLOCKVALCLASSINT *pClass = hClass;
1485 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1486 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1487
1488 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1489 return VINF_SUCCESS;
1490}
1491
1492
1493/**
1494 * Unlinks all siblings.
1495 *
1496 * This is used during record deletion and assumes no races.
1497 *
1498 * @param pCore One of the siblings.
1499 */
1500static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1501{
1502 /* ASSUMES sibling destruction doesn't involve any races and that all
1503 related records are to be disposed off now. */
1504 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1505 while (pSibling)
1506 {
1507 PRTLOCKVALRECUNION volatile *ppCoreNext;
1508 switch (pSibling->Core.u32Magic)
1509 {
1510 case RTLOCKVALRECEXCL_MAGIC:
1511 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1512 ppCoreNext = &pSibling->Excl.pSibling;
1513 break;
1514
1515 case RTLOCKVALRECSHRD_MAGIC:
1516 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1517 ppCoreNext = &pSibling->Shared.pSibling;
1518 break;
1519
1520 default:
1521 AssertFailed();
1522 ppCoreNext = NULL;
1523 break;
1524 }
1525 if (RT_UNLIKELY(ppCoreNext))
1526 break;
1527 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1528 }
1529}
1530
1531
1532RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1533{
1534 /*
1535 * Validate input.
1536 */
1537 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1538 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1539
1540 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1541 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1542 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1543 , VERR_SEM_LV_INVALID_PARAMETER);
1544
1545 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1546 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1547 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1548 , VERR_SEM_LV_INVALID_PARAMETER);
1549
1550 /*
1551 * Link them (circular list).
1552 */
1553 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1554 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1555 {
1556 p1->Excl.pSibling = p2;
1557 p2->Shared.pSibling = p1;
1558 }
1559 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1560 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1561 {
1562 p1->Shared.pSibling = p2;
1563 p2->Excl.pSibling = p1;
1564 }
1565 else
1566 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1567
1568 return VINF_SUCCESS;
1569}
1570
1571
1572/**
1573 * Gets the lock name for the given record.
1574 *
1575 * @returns Read-only lock name.
1576 * @param pRec The lock record.
1577 */
1578DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1579{
1580 switch (pRec->Core.u32Magic)
1581 {
1582 case RTLOCKVALRECEXCL_MAGIC:
1583 return pRec->Excl.szName;
1584 case RTLOCKVALRECSHRD_MAGIC:
1585 return pRec->Shared.szName;
1586 case RTLOCKVALRECSHRDOWN_MAGIC:
1587 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1588 case RTLOCKVALRECNEST_MAGIC:
1589 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1590 if (VALID_PTR(pRec))
1591 {
1592 switch (pRec->Core.u32Magic)
1593 {
1594 case RTLOCKVALRECEXCL_MAGIC:
1595 return pRec->Excl.szName;
1596 case RTLOCKVALRECSHRD_MAGIC:
1597 return pRec->Shared.szName;
1598 case RTLOCKVALRECSHRDOWN_MAGIC:
1599 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1600 default:
1601 return "unknown-nested";
1602 }
1603 }
1604 return "orphaned-nested";
1605 default:
1606 return "unknown";
1607 }
1608}
1609
1610
1611/**
1612 * Gets the class for this locking record.
1613 *
1614 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1615 * @param pRec The lock validator record.
1616 */
1617DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1618{
1619 switch (pRec->Core.u32Magic)
1620 {
1621 case RTLOCKVALRECEXCL_MAGIC:
1622 return pRec->Excl.hClass;
1623
1624 case RTLOCKVALRECSHRD_MAGIC:
1625 return pRec->Shared.hClass;
1626
1627 case RTLOCKVALRECSHRDOWN_MAGIC:
1628 {
1629 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1630 if (RT_LIKELY( VALID_PTR(pSharedRec)
1631 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1632 return pSharedRec->hClass;
1633 return NIL_RTLOCKVALCLASS;
1634 }
1635
1636 case RTLOCKVALRECNEST_MAGIC:
1637 {
1638 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1639 if (VALID_PTR(pRealRec))
1640 {
1641 switch (pRealRec->Core.u32Magic)
1642 {
1643 case RTLOCKVALRECEXCL_MAGIC:
1644 return pRealRec->Excl.hClass;
1645
1646 case RTLOCKVALRECSHRDOWN_MAGIC:
1647 {
1648 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1649 if (RT_LIKELY( VALID_PTR(pSharedRec)
1650 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1651 return pSharedRec->hClass;
1652 break;
1653 }
1654
1655 default:
1656 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1657 break;
1658 }
1659 }
1660 return NIL_RTLOCKVALCLASS;
1661 }
1662
1663 default:
1664 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1665 return NIL_RTLOCKVALCLASS;
1666 }
1667}
1668
1669
1670/**
1671 * Gets the class for this locking record and the pointer to the one below it in
1672 * the stack.
1673 *
1674 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1675 * @param pRec The lock validator record.
1676 * @param puSubClass Where to return the sub-class.
1677 * @param ppDown Where to return the pointer to the record below.
1678 */
1679DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1680rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1681{
1682 switch (pRec->Core.u32Magic)
1683 {
1684 case RTLOCKVALRECEXCL_MAGIC:
1685 *ppDown = pRec->Excl.pDown;
1686 *puSubClass = pRec->Excl.uSubClass;
1687 return pRec->Excl.hClass;
1688
1689 case RTLOCKVALRECSHRD_MAGIC:
1690 *ppDown = NULL;
1691 *puSubClass = pRec->Shared.uSubClass;
1692 return pRec->Shared.hClass;
1693
1694 case RTLOCKVALRECSHRDOWN_MAGIC:
1695 {
1696 *ppDown = pRec->ShrdOwner.pDown;
1697
1698 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1699 if (RT_LIKELY( VALID_PTR(pSharedRec)
1700 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1701 {
1702 *puSubClass = pSharedRec->uSubClass;
1703 return pSharedRec->hClass;
1704 }
1705 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1706 return NIL_RTLOCKVALCLASS;
1707 }
1708
1709 case RTLOCKVALRECNEST_MAGIC:
1710 {
1711 *ppDown = pRec->Nest.pDown;
1712
1713 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1714 if (VALID_PTR(pRealRec))
1715 {
1716 switch (pRealRec->Core.u32Magic)
1717 {
1718 case RTLOCKVALRECEXCL_MAGIC:
1719 *puSubClass = pRealRec->Excl.uSubClass;
1720 return pRealRec->Excl.hClass;
1721
1722 case RTLOCKVALRECSHRDOWN_MAGIC:
1723 {
1724 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1725 if (RT_LIKELY( VALID_PTR(pSharedRec)
1726 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1727 {
1728 *puSubClass = pSharedRec->uSubClass;
1729 return pSharedRec->hClass;
1730 }
1731 break;
1732 }
1733
1734 default:
1735 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1736 break;
1737 }
1738 }
1739 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1740 return NIL_RTLOCKVALCLASS;
1741 }
1742
1743 default:
1744 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1745 *ppDown = NULL;
1746 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1747 return NIL_RTLOCKVALCLASS;
1748 }
1749}
1750
1751
1752/**
1753 * Gets the sub-class for a lock record.
1754 *
1755 * @returns the sub-class.
1756 * @param pRec The lock validator record.
1757 */
1758DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1759{
1760 switch (pRec->Core.u32Magic)
1761 {
1762 case RTLOCKVALRECEXCL_MAGIC:
1763 return pRec->Excl.uSubClass;
1764
1765 case RTLOCKVALRECSHRD_MAGIC:
1766 return pRec->Shared.uSubClass;
1767
1768 case RTLOCKVALRECSHRDOWN_MAGIC:
1769 {
1770 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1771 if (RT_LIKELY( VALID_PTR(pSharedRec)
1772 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1773 return pSharedRec->uSubClass;
1774 return RTLOCKVAL_SUB_CLASS_NONE;
1775 }
1776
1777 case RTLOCKVALRECNEST_MAGIC:
1778 {
1779 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1780 if (VALID_PTR(pRealRec))
1781 {
1782 switch (pRealRec->Core.u32Magic)
1783 {
1784 case RTLOCKVALRECEXCL_MAGIC:
1785 return pRec->Excl.uSubClass;
1786
1787 case RTLOCKVALRECSHRDOWN_MAGIC:
1788 {
1789 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1790 if (RT_LIKELY( VALID_PTR(pSharedRec)
1791 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1792 return pSharedRec->uSubClass;
1793 break;
1794 }
1795
1796 default:
1797 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1798 break;
1799 }
1800 }
1801 return RTLOCKVAL_SUB_CLASS_NONE;
1802 }
1803
1804 default:
1805 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1806 return RTLOCKVAL_SUB_CLASS_NONE;
1807 }
1808}
1809
1810
1811
1812
1813/**
1814 * Calculates the depth of a lock stack.
1815 *
1816 * @returns Number of stack frames.
1817 * @param pThread The thread.
1818 */
1819static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1820{
1821 uint32_t cEntries = 0;
1822 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1823 while (VALID_PTR(pCur))
1824 {
1825 switch (pCur->Core.u32Magic)
1826 {
1827 case RTLOCKVALRECEXCL_MAGIC:
1828 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1829 break;
1830
1831 case RTLOCKVALRECSHRDOWN_MAGIC:
1832 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1833 break;
1834
1835 case RTLOCKVALRECNEST_MAGIC:
1836 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1837 break;
1838
1839 default:
1840 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1841 }
1842 cEntries++;
1843 }
1844 return cEntries;
1845}
1846
1847
1848/**
1849 * Checks if the stack contains @a pRec.
1850 *
1851 * @returns true / false.
1852 * @param pThreadSelf The current thread.
1853 * @param pRec The lock record.
1854 */
1855static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1856{
1857 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1858 while (pCur)
1859 {
1860 AssertPtrReturn(pCur, false);
1861 if (pCur == pRec)
1862 return true;
1863 switch (pCur->Core.u32Magic)
1864 {
1865 case RTLOCKVALRECEXCL_MAGIC:
1866 Assert(pCur->Excl.cRecursion >= 1);
1867 pCur = pCur->Excl.pDown;
1868 break;
1869
1870 case RTLOCKVALRECSHRDOWN_MAGIC:
1871 Assert(pCur->ShrdOwner.cRecursion >= 1);
1872 pCur = pCur->ShrdOwner.pDown;
1873 break;
1874
1875 case RTLOCKVALRECNEST_MAGIC:
1876 Assert(pCur->Nest.cRecursion > 1);
1877 pCur = pCur->Nest.pDown;
1878 break;
1879
1880 default:
1881 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1882 }
1883 }
1884 return false;
1885}
1886
1887
1888/**
1889 * Pushes a lock record onto the stack.
1890 *
1891 * @param pThreadSelf The current thread.
1892 * @param pRec The lock record.
1893 */
1894static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1895{
1896 Assert(pThreadSelf == RTThreadSelf());
1897 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1898
1899 switch (pRec->Core.u32Magic)
1900 {
1901 case RTLOCKVALRECEXCL_MAGIC:
1902 Assert(pRec->Excl.cRecursion == 1);
1903 Assert(pRec->Excl.pDown == NULL);
1904 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1905 break;
1906
1907 case RTLOCKVALRECSHRDOWN_MAGIC:
1908 Assert(pRec->ShrdOwner.cRecursion == 1);
1909 Assert(pRec->ShrdOwner.pDown == NULL);
1910 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1911 break;
1912
1913 default:
1914 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1915 }
1916 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1917}
1918
1919
1920/**
1921 * Pops a lock record off the stack.
1922 *
1923 * @param pThreadSelf The current thread.
1924 * @param pRec The lock.
1925 */
1926static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1927{
1928 Assert(pThreadSelf == RTThreadSelf());
1929
1930 PRTLOCKVALRECUNION pDown;
1931 switch (pRec->Core.u32Magic)
1932 {
1933 case RTLOCKVALRECEXCL_MAGIC:
1934 Assert(pRec->Excl.cRecursion == 0);
1935 pDown = pRec->Excl.pDown;
1936 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1937 break;
1938
1939 case RTLOCKVALRECSHRDOWN_MAGIC:
1940 Assert(pRec->ShrdOwner.cRecursion == 0);
1941 pDown = pRec->ShrdOwner.pDown;
1942 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1943 break;
1944
1945 default:
1946 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1947 }
1948 if (pThreadSelf->LockValidator.pStackTop == pRec)
1949 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1950 else
1951 {
1952 /* Find the pointer to our record and unlink ourselves. */
1953 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1954 while (pCur)
1955 {
1956 PRTLOCKVALRECUNION volatile *ppDown;
1957 switch (pCur->Core.u32Magic)
1958 {
1959 case RTLOCKVALRECEXCL_MAGIC:
1960 Assert(pCur->Excl.cRecursion >= 1);
1961 ppDown = &pCur->Excl.pDown;
1962 break;
1963
1964 case RTLOCKVALRECSHRDOWN_MAGIC:
1965 Assert(pCur->ShrdOwner.cRecursion >= 1);
1966 ppDown = &pCur->ShrdOwner.pDown;
1967 break;
1968
1969 case RTLOCKVALRECNEST_MAGIC:
1970 Assert(pCur->Nest.cRecursion >= 1);
1971 ppDown = &pCur->Nest.pDown;
1972 break;
1973
1974 default:
1975 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1976 }
1977 pCur = *ppDown;
1978 if (pCur == pRec)
1979 {
1980 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1981 return;
1982 }
1983 }
1984 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
1985 }
1986}
1987
1988
1989/**
1990 * Creates and pushes lock recursion record onto the stack.
1991 *
1992 * @param pThreadSelf The current thread.
1993 * @param pRec The lock record.
1994 * @param pSrcPos Where the recursion occurred.
1995 */
1996static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
1997{
1998 Assert(pThreadSelf == RTThreadSelf());
1999 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2000
2001#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2002 /*
2003 * Allocate a new recursion record
2004 */
2005 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2006 if (pRecursionRec)
2007 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2008 else
2009 {
2010 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2011 if (!pRecursionRec)
2012 return;
2013 }
2014
2015 /*
2016 * Initialize it.
2017 */
2018 switch (pRec->Core.u32Magic)
2019 {
2020 case RTLOCKVALRECEXCL_MAGIC:
2021 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2022 break;
2023
2024 case RTLOCKVALRECSHRDOWN_MAGIC:
2025 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2026 break;
2027
2028 default:
2029 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2030 rtLockValidatorSerializeDestructEnter();
2031 rtLockValidatorSerializeDestructLeave();
2032 RTMemFree(pRecursionRec);
2033 return;
2034 }
2035 Assert(pRecursionRec->cRecursion > 1);
2036 pRecursionRec->pRec = pRec;
2037 pRecursionRec->pDown = NULL;
2038 pRecursionRec->pNextFree = NULL;
2039 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2040 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2041
2042 /*
2043 * Link it.
2044 */
2045 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2046 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2047#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2048}
2049
2050
2051/**
2052 * Pops a lock recursion record off the stack.
2053 *
2054 * @param pThreadSelf The current thread.
2055 * @param pRec The lock record.
2056 */
2057static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2058{
2059 Assert(pThreadSelf == RTThreadSelf());
2060 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2061
2062 uint32_t cRecursion;
2063 switch (pRec->Core.u32Magic)
2064 {
2065 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2066 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2067 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2068 }
2069 Assert(cRecursion >= 1);
2070
2071#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2072 /*
2073 * Pop the recursion record.
2074 */
2075 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2076 if ( pNest != NULL
2077 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2078 && pNest->Nest.pRec == pRec
2079 )
2080 {
2081 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2082 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2083 }
2084 else
2085 {
2086 /* Find the record above ours. */
2087 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2088 for (;;)
2089 {
2090 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2091 switch (pNest->Core.u32Magic)
2092 {
2093 case RTLOCKVALRECEXCL_MAGIC:
2094 ppDown = &pNest->Excl.pDown;
2095 pNest = *ppDown;
2096 continue;
2097 case RTLOCKVALRECSHRDOWN_MAGIC:
2098 ppDown = &pNest->ShrdOwner.pDown;
2099 pNest = *ppDown;
2100 continue;
2101 case RTLOCKVALRECNEST_MAGIC:
2102 if (pNest->Nest.pRec == pRec)
2103 break;
2104 ppDown = &pNest->Nest.pDown;
2105 pNest = *ppDown;
2106 continue;
2107 default:
2108 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2109 }
2110 break; /* ugly */
2111 }
2112 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2113 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2114 }
2115
2116 /*
2117 * Invalidate and free the record.
2118 */
2119 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2120 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2121 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2122 pNest->Nest.cRecursion = 0;
2123 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2124 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2125#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2126}
2127
2128
2129/**
2130 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2131 * returns VERR_SEM_LV_WRONG_ORDER.
2132 */
2133static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2134 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2135 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2136
2137
2138{
2139 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2140 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2141 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2142 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2143 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2144 rtLockValComplainPanic();
2145 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2146}
2147
2148
2149/**
2150 * Checks if the sub-class order is ok or not.
2151 *
2152 * Used to deal with two locks from the same class.
2153 *
2154 * @returns true if ok, false if not.
2155 * @param uSubClass1 The sub-class of the lock that is being
2156 * considered.
2157 * @param uSubClass2 The sub-class of the lock that is already being
2158 * held.
2159 */
2160DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2161{
2162 if (uSubClass1 > uSubClass2)
2163 {
2164 /* NONE kills ANY. */
2165 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2166 return false;
2167 return true;
2168 }
2169
2170 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2171 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2172 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2173 return true;
2174 return false;
2175}
2176
2177
2178/**
2179 * Checks if the class and sub-class lock order is ok.
2180 *
2181 * @returns true if ok, false if not.
2182 * @param pClass1 The class of the lock that is being considered.
2183 * @param uSubClass1 The sub-class that goes with @a pClass1.
2184 * @param pClass2 The class of the lock that is already being
2185 * held.
2186 * @param uSubClass2 The sub-class that goes with @a pClass2.
2187 */
2188DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2189 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2190{
2191 if (pClass1 == pClass2)
2192 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2193 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2194}
2195
2196
2197/**
2198 * Checks the locking order, part two.
2199 *
2200 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2201 * @param pClass The lock class.
2202 * @param uSubClass The lock sub-class.
2203 * @param pThreadSelf The current thread.
2204 * @param pRec The lock record.
2205 * @param pSrcPos The source position of the locking operation.
2206 */
2207static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2208 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2209 PCRTLOCKVALSRCPOS const pSrcPos,
2210 RTLOCKVALCLASSINT * const pFirstBadClass,
2211 PRTLOCKVALRECUNION const pFirstBadRec,
2212 PRTLOCKVALRECUNION const pFirstBadDown)
2213{
2214 /*
2215 * Something went wrong, pCur is pointing to where.
2216 */
2217 if ( pClass == pFirstBadClass
2218 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2219 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2220 pRec, pFirstBadRec, pClass, pFirstBadClass);
2221 if (!pClass->fAutodidact)
2222 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2223 pRec, pFirstBadRec, pClass, pFirstBadClass);
2224
2225 /*
2226 * This class is an autodidact, so we have to check out the rest of the stack
2227 * for direct violations.
2228 */
2229 uint32_t cNewRules = 1;
2230 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2231 while (pCur)
2232 {
2233 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2234
2235 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2236 pCur = pCur->Nest.pDown;
2237 else
2238 {
2239 PRTLOCKVALRECUNION pDown;
2240 uint32_t uPriorSubClass;
2241 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2242 if (pPriorClass != NIL_RTLOCKVALCLASS)
2243 {
2244 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2245 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2246 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2247 {
2248 if ( pClass == pPriorClass
2249 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2250 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2251 pRec, pCur, pClass, pPriorClass);
2252 cNewRules++;
2253 }
2254 }
2255 pCur = pDown;
2256 }
2257 }
2258
2259 if (cNewRules == 1)
2260 {
2261 /*
2262 * Special case the simple operation, hoping that it will be a
2263 * frequent case.
2264 */
2265 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2266 if (rc == VERR_SEM_LV_WRONG_ORDER)
2267 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2268 pRec, pFirstBadRec, pClass, pFirstBadClass);
2269 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2270 }
2271 else
2272 {
2273 /*
2274 * We may be adding more than one rule, so we have to take the lock
2275 * before starting to add the rules. This means we have to check
2276 * the state after taking it since we might be racing someone adding
2277 * a conflicting rule.
2278 */
2279 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2280 rtLockValidatorLazyInit();
2281 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2282
2283 /* Check */
2284 pCur = pFirstBadRec;
2285 while (pCur)
2286 {
2287 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2288 pCur = pCur->Nest.pDown;
2289 else
2290 {
2291 uint32_t uPriorSubClass;
2292 PRTLOCKVALRECUNION pDown;
2293 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2294 if (pPriorClass != NIL_RTLOCKVALCLASS)
2295 {
2296 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2297 {
2298 if ( pClass == pPriorClass
2299 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2300 {
2301 if (RT_SUCCESS(rcLock))
2302 RTCritSectLeave(&g_LockValClassTeachCS);
2303 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2304 pRec, pCur, pClass, pPriorClass);
2305 }
2306 }
2307 }
2308 pCur = pDown;
2309 }
2310 }
2311
2312 /* Iterate the stack yet again, adding new rules this time. */
2313 pCur = pFirstBadRec;
2314 while (pCur)
2315 {
2316 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2317 pCur = pCur->Nest.pDown;
2318 else
2319 {
2320 uint32_t uPriorSubClass;
2321 PRTLOCKVALRECUNION pDown;
2322 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2323 if (pPriorClass != NIL_RTLOCKVALCLASS)
2324 {
2325 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2326 {
2327 Assert( pClass != pPriorClass
2328 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2329 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2330 if (RT_FAILURE(rc))
2331 {
2332 Assert(rc == VERR_NO_MEMORY);
2333 break;
2334 }
2335 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2336 }
2337 }
2338 pCur = pDown;
2339 }
2340 }
2341
2342 if (RT_SUCCESS(rcLock))
2343 RTCritSectLeave(&g_LockValClassTeachCS);
2344 }
2345
2346 return VINF_SUCCESS;
2347}
2348
2349
2350
2351/**
2352 * Checks the locking order.
2353 *
2354 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2355 * @param pClass The lock class.
2356 * @param uSubClass The lock sub-class.
2357 * @param pThreadSelf The current thread.
2358 * @param pRec The lock record.
2359 * @param pSrcPos The source position of the locking operation.
2360 */
2361static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2362 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2363 PCRTLOCKVALSRCPOS pSrcPos)
2364{
2365 /*
2366 * Some internal paranoia first.
2367 */
2368 AssertPtr(pClass);
2369 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2370 AssertPtr(pThreadSelf);
2371 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2372 AssertPtr(pRec);
2373 AssertPtrNull(pSrcPos);
2374
2375 /*
2376 * Walk the stack, delegate problems to a worker routine.
2377 */
2378 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2379 if (!pCur)
2380 return VINF_SUCCESS;
2381
2382 for (;;)
2383 {
2384 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2385
2386 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2387 pCur = pCur->Nest.pDown;
2388 else
2389 {
2390 uint32_t uPriorSubClass;
2391 PRTLOCKVALRECUNION pDown;
2392 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2393 if (pPriorClass != NIL_RTLOCKVALCLASS)
2394 {
2395 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2396 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2397 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2398 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2399 pPriorClass, pCur, pDown);
2400 }
2401 pCur = pDown;
2402 }
2403 if (!pCur)
2404 return VINF_SUCCESS;
2405 }
2406}
2407
2408
2409/**
2410 * Check that the lock record is the topmost one on the stack, complain and fail
2411 * if it isn't.
2412 *
2413 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2414 * VERR_SEM_LV_INVALID_PARAMETER.
2415 * @param pThreadSelf The current thread.
2416 * @param pRec The record.
2417 */
2418static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2419{
2420 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2421 Assert(pThreadSelf == RTThreadSelf());
2422
2423 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2424 if (RT_LIKELY( pTop == pRec
2425 || ( pTop
2426 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2427 && pTop->Nest.pRec == pRec) ))
2428 return VINF_SUCCESS;
2429
2430#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2431 /* Look for a recursion record so the right frame is dumped and marked. */
2432 while (pTop)
2433 {
2434 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2435 {
2436 if (pTop->Nest.pRec == pRec)
2437 {
2438 pRec = pTop;
2439 break;
2440 }
2441 pTop = pTop->Nest.pDown;
2442 }
2443 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2444 pTop = pTop->Excl.pDown;
2445 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2446 pTop = pTop->ShrdOwner.pDown;
2447 else
2448 break;
2449 }
2450#endif
2451
2452 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2453 rtLockValComplainPanic();
2454 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2455}
2456
2457
2458/**
2459 * Checks if all owners are blocked - shared record operated in signaller mode.
2460 *
2461 * @returns true / false accordingly.
2462 * @param pRec The record.
2463 * @param pThreadSelf The current thread.
2464 */
2465DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2466{
2467 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2468 uint32_t cAllocated = pRec->cAllocated;
2469 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2470 if (cEntries == 0)
2471 return false;
2472
2473 for (uint32_t i = 0; i < cAllocated; i++)
2474 {
2475 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2476 if ( pEntry
2477 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2478 {
2479 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2480 if (!pCurThread)
2481 return false;
2482 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2483 return false;
2484 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2485 && pCurThread != pThreadSelf)
2486 return false;
2487 if (--cEntries == 0)
2488 break;
2489 }
2490 else
2491 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2492 }
2493
2494 return true;
2495}
2496
2497
2498/**
2499 * Verifies the deadlock stack before calling it a deadlock.
2500 *
2501 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2502 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2503 * @retval VERR_TRY_AGAIN if something changed.
2504 *
2505 * @param pStack The deadlock detection stack.
2506 * @param pThreadSelf The current thread.
2507 */
2508static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2509{
2510 uint32_t const c = pStack->c;
2511 for (uint32_t iPass = 0; iPass < 3; iPass++)
2512 {
2513 for (uint32_t i = 1; i < c; i++)
2514 {
2515 PRTTHREADINT pThread = pStack->a[i].pThread;
2516 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2517 return VERR_TRY_AGAIN;
2518 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2519 return VERR_TRY_AGAIN;
2520 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2521 return VERR_TRY_AGAIN;
2522 /* ASSUMES the signaller records won't have siblings! */
2523 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2524 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2525 && pRec->Shared.fSignaller
2526 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2527 return VERR_TRY_AGAIN;
2528 }
2529 RTThreadYield();
2530 }
2531
2532 if (c == 1)
2533 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2534 return VERR_SEM_LV_DEADLOCK;
2535}
2536
2537
2538/**
2539 * Checks for stack cycles caused by another deadlock before returning.
2540 *
2541 * @retval VINF_SUCCESS if the stack is simply too small.
2542 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2543 *
2544 * @param pStack The deadlock detection stack.
2545 */
2546static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2547{
2548 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2549 {
2550 PRTTHREADINT pThread = pStack->a[i].pThread;
2551 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2552 if (pStack->a[j].pThread == pThread)
2553 return VERR_SEM_LV_EXISTING_DEADLOCK;
2554 }
2555 static bool volatile s_fComplained = false;
2556 if (!s_fComplained)
2557 {
2558 s_fComplained = true;
2559 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2560 }
2561 return VINF_SUCCESS;
2562}
2563
2564
2565/**
2566 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2567 * detection.
2568 *
2569 * @retval VINF_SUCCESS
2570 * @retval VERR_SEM_LV_DEADLOCK
2571 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2572 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2573 * @retval VERR_TRY_AGAIN
2574 *
2575 * @param pStack The stack to use.
2576 * @param pOriginalRec The original record.
2577 * @param pThreadSelf The calling thread.
2578 */
2579static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2580 PRTTHREADINT const pThreadSelf)
2581{
2582 pStack->c = 0;
2583
2584 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2585 compiler may make a better job of it when using individual variables. */
2586 PRTLOCKVALRECUNION pRec = pOriginalRec;
2587 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2588 uint32_t iEntry = UINT32_MAX;
2589 PRTTHREADINT pThread = NIL_RTTHREAD;
2590 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2591 for (uint32_t iLoop = 0; ; iLoop++)
2592 {
2593 /*
2594 * Process the current record.
2595 */
2596 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2597
2598 /* Find the next relevant owner thread and record. */
2599 PRTLOCKVALRECUNION pNextRec = NULL;
2600 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2601 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2602 switch (pRec->Core.u32Magic)
2603 {
2604 case RTLOCKVALRECEXCL_MAGIC:
2605 Assert(iEntry == UINT32_MAX);
2606 for (;;)
2607 {
2608 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2609 if ( !pNextThread
2610 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2611 break;
2612 enmNextState = rtThreadGetState(pNextThread);
2613 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2614 && pNextThread != pThreadSelf)
2615 break;
2616 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2617 if (RT_LIKELY( !pNextRec
2618 || enmNextState == rtThreadGetState(pNextThread)))
2619 break;
2620 pNextRec = NULL;
2621 }
2622 if (!pNextRec)
2623 {
2624 pRec = pRec->Excl.pSibling;
2625 if ( pRec
2626 && pRec != pFirstSibling)
2627 continue;
2628 pNextThread = NIL_RTTHREAD;
2629 }
2630 break;
2631
2632 case RTLOCKVALRECSHRD_MAGIC:
2633 if (!pRec->Shared.fSignaller)
2634 {
2635 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2636 /** @todo The read side of a read-write lock is problematic if
2637 * the implementation prioritizes writers over readers because
2638 * that means we should could deadlock against current readers
2639 * if a writer showed up. If the RW sem implementation is
2640 * wrapping some native API, it's not so easy to detect when we
2641 * should do this and when we shouldn't. Checking when we
2642 * shouldn't is subject to wakeup scheduling and cannot easily
2643 * be made reliable.
2644 *
2645 * At the moment we circumvent all this mess by declaring that
2646 * readers has priority. This is TRUE on linux, but probably
2647 * isn't on Solaris and FreeBSD. */
2648 if ( pRec == pFirstSibling
2649 && pRec->Shared.pSibling != NULL
2650 && pRec->Shared.pSibling != pFirstSibling)
2651 {
2652 pRec = pRec->Shared.pSibling;
2653 Assert(iEntry == UINT32_MAX);
2654 continue;
2655 }
2656 }
2657
2658 /* Scan the owner table for blocked owners. */
2659 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2660 && ( !pRec->Shared.fSignaller
2661 || iEntry != UINT32_MAX
2662 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2663 )
2664 )
2665 {
2666 uint32_t cAllocated = pRec->Shared.cAllocated;
2667 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2668 while (++iEntry < cAllocated)
2669 {
2670 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2671 if (pEntry)
2672 {
2673 for (;;)
2674 {
2675 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2676 break;
2677 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2678 if ( !pNextThread
2679 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2680 break;
2681 enmNextState = rtThreadGetState(pNextThread);
2682 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2683 && pNextThread != pThreadSelf)
2684 break;
2685 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2686 if (RT_LIKELY( !pNextRec
2687 || enmNextState == rtThreadGetState(pNextThread)))
2688 break;
2689 pNextRec = NULL;
2690 }
2691 if (pNextRec)
2692 break;
2693 }
2694 else
2695 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2696 }
2697 if (pNextRec)
2698 break;
2699 pNextThread = NIL_RTTHREAD;
2700 }
2701
2702 /* Advance to the next sibling, if any. */
2703 pRec = pRec->Shared.pSibling;
2704 if ( pRec != NULL
2705 && pRec != pFirstSibling)
2706 {
2707 iEntry = UINT32_MAX;
2708 continue;
2709 }
2710 break;
2711
2712 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2713 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2714 break;
2715
2716 case RTLOCKVALRECSHRDOWN_MAGIC:
2717 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2718 default:
2719 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2720 break;
2721 }
2722
2723 if (pNextRec)
2724 {
2725 /*
2726 * Recurse and check for deadlock.
2727 */
2728 uint32_t i = pStack->c;
2729 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2730 return rtLockValidatorDdHandleStackOverflow(pStack);
2731
2732 pStack->c++;
2733 pStack->a[i].pRec = pRec;
2734 pStack->a[i].iEntry = iEntry;
2735 pStack->a[i].enmState = enmState;
2736 pStack->a[i].pThread = pThread;
2737 pStack->a[i].pFirstSibling = pFirstSibling;
2738
2739 if (RT_UNLIKELY( pNextThread == pThreadSelf
2740 && ( i != 0
2741 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2742 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2743 )
2744 )
2745 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2746
2747 pRec = pNextRec;
2748 pFirstSibling = pNextRec;
2749 iEntry = UINT32_MAX;
2750 enmState = enmNextState;
2751 pThread = pNextThread;
2752 }
2753 else
2754 {
2755 /*
2756 * No deadlock here, unwind the stack and deal with any unfinished
2757 * business there.
2758 */
2759 uint32_t i = pStack->c;
2760 for (;;)
2761 {
2762 /* pop */
2763 if (i == 0)
2764 return VINF_SUCCESS;
2765 i--;
2766 pRec = pStack->a[i].pRec;
2767 iEntry = pStack->a[i].iEntry;
2768
2769 /* Examine it. */
2770 uint32_t u32Magic = pRec->Core.u32Magic;
2771 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2772 pRec = pRec->Excl.pSibling;
2773 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2774 {
2775 if (iEntry + 1 < pRec->Shared.cAllocated)
2776 break; /* continue processing this record. */
2777 pRec = pRec->Shared.pSibling;
2778 }
2779 else
2780 {
2781 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2782 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2783 continue;
2784 }
2785
2786 /* Any next record to advance to? */
2787 if ( !pRec
2788 || pRec == pStack->a[i].pFirstSibling)
2789 continue;
2790 iEntry = UINT32_MAX;
2791 break;
2792 }
2793
2794 /* Restore the rest of the state and update the stack. */
2795 pFirstSibling = pStack->a[i].pFirstSibling;
2796 enmState = pStack->a[i].enmState;
2797 pThread = pStack->a[i].pThread;
2798 pStack->c = i;
2799 }
2800
2801 Assert(iLoop != 1000000);
2802 }
2803}
2804
2805
2806/**
2807 * Check for the simple no-deadlock case.
2808 *
2809 * @returns true if no deadlock, false if further investigation is required.
2810 *
2811 * @param pOriginalRec The original record.
2812 */
2813DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2814{
2815 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2816 && !pOriginalRec->Excl.pSibling)
2817 {
2818 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2819 if ( !pThread
2820 || pThread->u32Magic != RTTHREADINT_MAGIC)
2821 return true;
2822 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2823 if (!RTTHREAD_IS_SLEEPING(enmState))
2824 return true;
2825 }
2826 return false;
2827}
2828
2829
2830/**
2831 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2832 *
2833 * @param pStack The chain of locks causing the deadlock.
2834 * @param pRec The record relating to the current thread's lock
2835 * operation.
2836 * @param pThreadSelf This thread.
2837 * @param pSrcPos Where we are going to deadlock.
2838 * @param rc The return code.
2839 */
2840static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2841 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2842{
2843 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2844 {
2845 const char *pszWhat;
2846 switch (rc)
2847 {
2848 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2849 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2850 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2851 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2852 }
2853 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2854 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2855 for (uint32_t i = 0; i < pStack->c; i++)
2856 {
2857 char szPrefix[24];
2858 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2859 PRTLOCKVALRECUNION pShrdOwner = NULL;
2860 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2861 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2862 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2863 {
2864 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2865 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2866 }
2867 else
2868 {
2869 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2870 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2871 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2872 }
2873 }
2874 rtLockValComplainMore("---- end of deadlock chain ----\n");
2875 }
2876
2877 rtLockValComplainPanic();
2878}
2879
2880
2881/**
2882 * Perform deadlock detection.
2883 *
2884 * @retval VINF_SUCCESS
2885 * @retval VERR_SEM_LV_DEADLOCK
2886 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2887 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2888 *
2889 * @param pRec The record relating to the current thread's lock
2890 * operation.
2891 * @param pThreadSelf The current thread.
2892 * @param pSrcPos The position of the current lock operation.
2893 */
2894static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2895{
2896 RTLOCKVALDDSTACK Stack;
2897 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2898 if (RT_SUCCESS(rc))
2899 return VINF_SUCCESS;
2900
2901 if (rc == VERR_TRY_AGAIN)
2902 {
2903 for (uint32_t iLoop = 0; ; iLoop++)
2904 {
2905 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2906 if (RT_SUCCESS_NP(rc))
2907 return VINF_SUCCESS;
2908 if (rc != VERR_TRY_AGAIN)
2909 break;
2910 RTThreadYield();
2911 if (iLoop >= 3)
2912 return VINF_SUCCESS;
2913 }
2914 }
2915
2916 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2917 return rc;
2918}
2919
2920
2921RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2922 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2923{
2924 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2925 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2926 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2927 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2928 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2929
2930 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2931 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2932 pRec->afReserved[0] = 0;
2933 pRec->afReserved[1] = 0;
2934 pRec->afReserved[2] = 0;
2935 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2936 pRec->hThread = NIL_RTTHREAD;
2937 pRec->pDown = NULL;
2938 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2939 pRec->uSubClass = uSubClass;
2940 pRec->cRecursion = 0;
2941 pRec->hLock = hLock;
2942 pRec->pSibling = NULL;
2943 if (pszNameFmt)
2944 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2945 else
2946 {
2947 static uint32_t volatile s_cAnonymous = 0;
2948 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2949 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2950 }
2951
2952 /* Lazy initialization. */
2953 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2954 rtLockValidatorLazyInit();
2955}
2956
2957
2958RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2959 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2960{
2961 va_list va;
2962 va_start(va, pszNameFmt);
2963 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2964 va_end(va);
2965}
2966
2967
2968RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2969 uint32_t uSubClass, void *pvLock, bool fEnabled,
2970 const char *pszNameFmt, va_list va)
2971{
2972 PRTLOCKVALRECEXCL pRec;
2973 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2974 if (!pRec)
2975 return VERR_NO_MEMORY;
2976 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2977 return VINF_SUCCESS;
2978}
2979
2980
2981RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2982 uint32_t uSubClass, void *pvLock, bool fEnabled,
2983 const char *pszNameFmt, ...)
2984{
2985 va_list va;
2986 va_start(va, pszNameFmt);
2987 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2988 va_end(va);
2989 return rc;
2990}
2991
2992
2993RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
2994{
2995 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
2996
2997 rtLockValidatorSerializeDestructEnter();
2998
2999 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3000 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3001 RTLOCKVALCLASS hClass;
3002 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3003 if (pRec->pSibling)
3004 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3005 rtLockValidatorSerializeDestructLeave();
3006 if (hClass != NIL_RTLOCKVALCLASS)
3007 RTLockValidatorClassRelease(hClass);
3008}
3009
3010
3011RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3012{
3013 PRTLOCKVALRECEXCL pRec = *ppRec;
3014 *ppRec = NULL;
3015 if (pRec)
3016 {
3017 RTLockValidatorRecExclDelete(pRec);
3018 RTMemFree(pRec);
3019 }
3020}
3021
3022
3023RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3024{
3025 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3026 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3027 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3028 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3029 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3030 RTLOCKVAL_SUB_CLASS_INVALID);
3031 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3032}
3033
3034
3035RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3036 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3037{
3038 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3039 if (!pRecU)
3040 return;
3041 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3042 if (!pRecU->Excl.fEnabled)
3043 return;
3044 if (hThreadSelf == NIL_RTTHREAD)
3045 {
3046 hThreadSelf = RTThreadSelfAutoAdopt();
3047 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3048 }
3049 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3050 Assert(hThreadSelf == RTThreadSelf());
3051
3052 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3053
3054 if (pRecU->Excl.hThread == hThreadSelf)
3055 {
3056 Assert(!fFirstRecursion);
3057 pRecU->Excl.cRecursion++;
3058 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3059 }
3060 else
3061 {
3062 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3063
3064 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3065 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3066 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3067
3068 rtLockValidatorStackPush(hThreadSelf, pRecU);
3069 }
3070}
3071
3072
3073/**
3074 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3075 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3076 */
3077static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3078{
3079 RTTHREADINT *pThread = pRec->Excl.hThread;
3080 AssertReturnVoid(pThread != NIL_RTTHREAD);
3081 Assert(pThread == RTThreadSelf());
3082
3083 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3084 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3085 if (c == 0)
3086 {
3087 rtLockValidatorStackPop(pThread, pRec);
3088 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3089 }
3090 else
3091 {
3092 Assert(c < UINT32_C(0xffff0000));
3093 Assert(!fFinalRecursion);
3094 rtLockValidatorStackPopRecursion(pThread, pRec);
3095 }
3096}
3097
3098RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3099{
3100 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3101 if (!pRecU)
3102 return VINF_SUCCESS;
3103 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3104 if (!pRecU->Excl.fEnabled)
3105 return VINF_SUCCESS;
3106
3107 /*
3108 * Check the release order.
3109 */
3110 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3111 && pRecU->Excl.hClass->fStrictReleaseOrder
3112 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3113 )
3114 {
3115 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3116 if (RT_FAILURE(rc))
3117 return rc;
3118 }
3119
3120 /*
3121 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3122 */
3123 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3124 return VINF_SUCCESS;
3125}
3126
3127
3128RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3129{
3130 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3131 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3132 if (pRecU->Excl.fEnabled)
3133 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3134}
3135
3136
3137RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3138{
3139 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3140 if (!pRecU)
3141 return VINF_SUCCESS;
3142 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3143 if (!pRecU->Excl.fEnabled)
3144 return VINF_SUCCESS;
3145 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3146 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3147
3148 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3149 && !pRecU->Excl.hClass->fRecursionOk)
3150 {
3151 rtLockValComplainFirst("Recursion not allowed by the class!",
3152 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3153 rtLockValComplainPanic();
3154 return VERR_SEM_LV_NESTED;
3155 }
3156
3157 Assert(pRecU->Excl.cRecursion < _1M);
3158 pRecU->Excl.cRecursion++;
3159 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3160 return VINF_SUCCESS;
3161}
3162
3163
3164RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3165{
3166 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3167 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3168 if (!pRecU->Excl.fEnabled)
3169 return VINF_SUCCESS;
3170 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3171 Assert(pRecU->Excl.hThread == RTThreadSelf());
3172 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3173
3174 /*
3175 * Check the release order.
3176 */
3177 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3178 && pRecU->Excl.hClass->fStrictReleaseOrder
3179 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3180 )
3181 {
3182 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3183 if (RT_FAILURE(rc))
3184 return rc;
3185 }
3186
3187 /*
3188 * Perform the unwind.
3189 */
3190 pRecU->Excl.cRecursion--;
3191 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3192 return VINF_SUCCESS;
3193}
3194
3195
3196RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3197{
3198 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3199 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3200 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3201 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3202 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3203 , VERR_SEM_LV_INVALID_PARAMETER);
3204 if (!pRecU->Excl.fEnabled)
3205 return VINF_SUCCESS;
3206 Assert(pRecU->Excl.hThread == RTThreadSelf());
3207 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3208 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3209
3210 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3211 && !pRecU->Excl.hClass->fRecursionOk)
3212 {
3213 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3214 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3215 rtLockValComplainPanic();
3216 return VERR_SEM_LV_NESTED;
3217 }
3218
3219 Assert(pRecU->Excl.cRecursion < _1M);
3220 pRecU->Excl.cRecursion++;
3221 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3222
3223 return VINF_SUCCESS;
3224}
3225
3226
3227RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3228{
3229 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3230 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3231 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3232 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3233 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3234 , VERR_SEM_LV_INVALID_PARAMETER);
3235 if (!pRecU->Excl.fEnabled)
3236 return VINF_SUCCESS;
3237 Assert(pRecU->Excl.hThread == RTThreadSelf());
3238 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3239 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3240
3241 /*
3242 * Check the release order.
3243 */
3244 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3245 && pRecU->Excl.hClass->fStrictReleaseOrder
3246 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3247 )
3248 {
3249 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3250 if (RT_FAILURE(rc))
3251 return rc;
3252 }
3253
3254 /*
3255 * Perform the unwind.
3256 */
3257 pRecU->Excl.cRecursion--;
3258 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3259 return VINF_SUCCESS;
3260}
3261
3262
3263RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3264 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3265{
3266 /*
3267 * Validate and adjust input. Quit early if order validation is disabled.
3268 */
3269 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3270 if (!pRecU)
3271 return VINF_SUCCESS;
3272 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3273 if ( !pRecU->Excl.fEnabled
3274 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3275 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3276 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3277 return VINF_SUCCESS;
3278
3279 if (hThreadSelf == NIL_RTTHREAD)
3280 {
3281 hThreadSelf = RTThreadSelfAutoAdopt();
3282 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3283 }
3284 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3285 Assert(hThreadSelf == RTThreadSelf());
3286
3287 /*
3288 * Detect recursion as it isn't subject to order restrictions.
3289 */
3290 if (pRec->hThread == hThreadSelf)
3291 return VINF_SUCCESS;
3292
3293 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3294}
3295
3296
3297RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3298 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3299 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3300{
3301 /*
3302 * Fend off wild life.
3303 */
3304 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3305 if (!pRecU)
3306 return VINF_SUCCESS;
3307 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3308 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3309 if (!pRec->fEnabled)
3310 return VINF_SUCCESS;
3311
3312 PRTTHREADINT pThreadSelf = hThreadSelf;
3313 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3314 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3315 Assert(pThreadSelf == RTThreadSelf());
3316
3317 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3318
3319 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3320 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3321 {
3322 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3323 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3324 , VERR_SEM_LV_INVALID_PARAMETER);
3325 enmSleepState = enmThreadState;
3326 }
3327
3328 /*
3329 * Record the location.
3330 */
3331 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3332 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3333 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3334 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3335 rtThreadSetState(pThreadSelf, enmSleepState);
3336
3337 /*
3338 * Don't do deadlock detection if we're recursing.
3339 *
3340 * On some hosts we don't do recursion accounting our selves and there
3341 * isn't any other place to check for this.
3342 */
3343 int rc = VINF_SUCCESS;
3344 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3345 {
3346 if ( !fRecursiveOk
3347 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3348 && !pRecU->Excl.hClass->fRecursionOk))
3349 {
3350 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3351 rtLockValComplainPanic();
3352 rc = VERR_SEM_LV_NESTED;
3353 }
3354 }
3355 /*
3356 * Perform deadlock detection.
3357 */
3358 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3359 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3360 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3361 rc = VINF_SUCCESS;
3362 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3363 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3364
3365 if (RT_SUCCESS(rc))
3366 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3367 else
3368 {
3369 rtThreadSetState(pThreadSelf, enmThreadState);
3370 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3371 }
3372 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3373 return rc;
3374}
3375RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3376
3377
3378RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3379 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3380 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3381{
3382 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3383 if (RT_SUCCESS(rc))
3384 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3385 enmSleepState, fReallySleeping);
3386 return rc;
3387}
3388RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3389
3390
3391RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3392 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3393{
3394 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3395 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3396 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3397 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3398 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3399
3400 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3401 pRec->uSubClass = uSubClass;
3402 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3403 pRec->hLock = hLock;
3404 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3405 pRec->fSignaller = fSignaller;
3406 pRec->pSibling = NULL;
3407
3408 /* the table */
3409 pRec->cEntries = 0;
3410 pRec->iLastEntry = 0;
3411 pRec->cAllocated = 0;
3412 pRec->fReallocating = false;
3413 pRec->fPadding = false;
3414 pRec->papOwners = NULL;
3415
3416 /* the name */
3417 if (pszNameFmt)
3418 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3419 else
3420 {
3421 static uint32_t volatile s_cAnonymous = 0;
3422 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3423 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3424 }
3425}
3426
3427
3428RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3429 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3430{
3431 va_list va;
3432 va_start(va, pszNameFmt);
3433 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3434 va_end(va);
3435}
3436
3437
3438RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3439{
3440 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3441
3442 /*
3443 * Flip it into table realloc mode and take the destruction lock.
3444 */
3445 rtLockValidatorSerializeDestructEnter();
3446 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3447 {
3448 rtLockValidatorSerializeDestructLeave();
3449
3450 rtLockValidatorSerializeDetectionEnter();
3451 rtLockValidatorSerializeDetectionLeave();
3452
3453 rtLockValidatorSerializeDestructEnter();
3454 }
3455
3456 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3457 RTLOCKVALCLASS hClass;
3458 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3459 if (pRec->papOwners)
3460 {
3461 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3462 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3463 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3464
3465 RTMemFree((void *)pRec->papOwners);
3466 }
3467 if (pRec->pSibling)
3468 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3469 ASMAtomicWriteBool(&pRec->fReallocating, false);
3470
3471 rtLockValidatorSerializeDestructLeave();
3472
3473 if (hClass != NIL_RTLOCKVALCLASS)
3474 RTLockValidatorClassRelease(hClass);
3475}
3476
3477
3478RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3479{
3480 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3481 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3482 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3483 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3484 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3485 RTLOCKVAL_SUB_CLASS_INVALID);
3486 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3487}
3488
3489
3490/**
3491 * Locates an owner (thread) in a shared lock record.
3492 *
3493 * @returns Pointer to the owner entry on success, NULL on failure..
3494 * @param pShared The shared lock record.
3495 * @param hThread The thread (owner) to find.
3496 * @param piEntry Where to optionally return the table in index.
3497 * Optional.
3498 */
3499DECLINLINE(PRTLOCKVALRECUNION)
3500rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3501{
3502 rtLockValidatorSerializeDetectionEnter();
3503
3504 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3505 if (papOwners)
3506 {
3507 uint32_t const cMax = pShared->cAllocated;
3508 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3509 {
3510 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3511 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3512 {
3513 rtLockValidatorSerializeDetectionLeave();
3514 if (piEntry)
3515 *piEntry = iEntry;
3516 return pEntry;
3517 }
3518 }
3519 }
3520
3521 rtLockValidatorSerializeDetectionLeave();
3522 return NULL;
3523}
3524
3525
3526RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3527 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3528{
3529 /*
3530 * Validate and adjust input. Quit early if order validation is disabled.
3531 */
3532 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3533 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3534 if ( !pRecU->Shared.fEnabled
3535 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3536 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3537 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3538 )
3539 return VINF_SUCCESS;
3540
3541 if (hThreadSelf == NIL_RTTHREAD)
3542 {
3543 hThreadSelf = RTThreadSelfAutoAdopt();
3544 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3545 }
3546 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3547 Assert(hThreadSelf == RTThreadSelf());
3548
3549 /*
3550 * Detect recursion as it isn't subject to order restrictions.
3551 */
3552 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3553 if (pEntry)
3554 return VINF_SUCCESS;
3555
3556 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3557}
3558
3559
3560RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3561 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3562 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3563{
3564 /*
3565 * Fend off wild life.
3566 */
3567 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3568 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3569 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3570 if (!pRecU->Shared.fEnabled)
3571 return VINF_SUCCESS;
3572
3573 PRTTHREADINT pThreadSelf = hThreadSelf;
3574 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3575 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3576 Assert(pThreadSelf == RTThreadSelf());
3577
3578 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3579
3580 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3581 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3582 {
3583 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3584 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3585 , VERR_SEM_LV_INVALID_PARAMETER);
3586 enmSleepState = enmThreadState;
3587 }
3588
3589 /*
3590 * Record the location.
3591 */
3592 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3593 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3594 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3595 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3596 rtThreadSetState(pThreadSelf, enmSleepState);
3597
3598 /*
3599 * Don't do deadlock detection if we're recursing.
3600 */
3601 int rc = VINF_SUCCESS;
3602 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3603 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3604 : NULL;
3605 if (pEntry)
3606 {
3607 if ( !fRecursiveOk
3608 || ( pRec->hClass
3609 && !pRec->hClass->fRecursionOk)
3610 )
3611 {
3612 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3613 rtLockValComplainPanic();
3614 rc = VERR_SEM_LV_NESTED;
3615 }
3616 }
3617 /*
3618 * Perform deadlock detection.
3619 */
3620 else if ( pRec->hClass
3621 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3622 || pRec->hClass->cMsMinDeadlock > cMillies))
3623 rc = VINF_SUCCESS;
3624 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3625 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3626
3627 if (RT_SUCCESS(rc))
3628 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3629 else
3630 {
3631 rtThreadSetState(pThreadSelf, enmThreadState);
3632 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3633 }
3634 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3635 return rc;
3636}
3637RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3638
3639
3640RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3641 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3642 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3643{
3644 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3645 if (RT_SUCCESS(rc))
3646 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3647 enmSleepState, fReallySleeping);
3648 return rc;
3649}
3650RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3651
3652
3653/**
3654 * Allocates and initializes an owner entry for the shared lock record.
3655 *
3656 * @returns The new owner entry.
3657 * @param pRec The shared lock record.
3658 * @param pThreadSelf The calling thread and owner. Used for record
3659 * initialization and allocation.
3660 * @param pSrcPos The source position.
3661 */
3662DECLINLINE(PRTLOCKVALRECUNION)
3663rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3664{
3665 PRTLOCKVALRECUNION pEntry;
3666
3667 /*
3668 * Check if the thread has any statically allocated records we can easily
3669 * make use of.
3670 */
3671 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3672 if ( iEntry > 0
3673 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3674 {
3675 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3676 Assert(!pEntry->ShrdOwner.fReserved);
3677 pEntry->ShrdOwner.fStaticAlloc = true;
3678 rtThreadGet(pThreadSelf);
3679 }
3680 else
3681 {
3682 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3683 if (RT_UNLIKELY(!pEntry))
3684 return NULL;
3685 pEntry->ShrdOwner.fStaticAlloc = false;
3686 }
3687
3688 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3689 pEntry->ShrdOwner.cRecursion = 1;
3690 pEntry->ShrdOwner.fReserved = true;
3691 pEntry->ShrdOwner.hThread = pThreadSelf;
3692 pEntry->ShrdOwner.pDown = NULL;
3693 pEntry->ShrdOwner.pSharedRec = pRec;
3694#if HC_ARCH_BITS == 32
3695 pEntry->ShrdOwner.pvReserved = NULL;
3696#endif
3697 if (pSrcPos)
3698 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3699 else
3700 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3701 return pEntry;
3702}
3703
3704
3705/**
3706 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3707 *
3708 * @param pEntry The owner entry.
3709 */
3710DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3711{
3712 if (pEntry)
3713 {
3714 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3715 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3716
3717 PRTTHREADINT pThread;
3718 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3719
3720 Assert(pEntry->fReserved);
3721 pEntry->fReserved = false;
3722
3723 if (pEntry->fStaticAlloc)
3724 {
3725 AssertPtrReturnVoid(pThread);
3726 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3727
3728 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3729 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3730
3731 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3732 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3733
3734 rtThreadRelease(pThread);
3735 }
3736 else
3737 {
3738 rtLockValidatorSerializeDestructEnter();
3739 rtLockValidatorSerializeDestructLeave();
3740
3741 RTMemFree(pEntry);
3742 }
3743 }
3744}
3745
3746
3747/**
3748 * Make more room in the table.
3749 *
3750 * @retval true on success
3751 * @retval false if we're out of memory or running into a bad race condition
3752 * (probably a bug somewhere). No longer holding the lock.
3753 *
3754 * @param pShared The shared lock record.
3755 */
3756static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3757{
3758 for (unsigned i = 0; i < 1000; i++)
3759 {
3760 /*
3761 * Switch to the other data access direction.
3762 */
3763 rtLockValidatorSerializeDetectionLeave();
3764 if (i >= 10)
3765 {
3766 Assert(i != 10 && i != 100);
3767 RTThreadSleep(i >= 100);
3768 }
3769 rtLockValidatorSerializeDestructEnter();
3770
3771 /*
3772 * Try grab the privilege to reallocating the table.
3773 */
3774 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3775 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3776 {
3777 uint32_t cAllocated = pShared->cAllocated;
3778 if (cAllocated < pShared->cEntries)
3779 {
3780 /*
3781 * Ok, still not enough space. Reallocate the table.
3782 */
3783#if 0 /** @todo enable this after making sure growing works flawlessly. */
3784 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3785#else
3786 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3787#endif
3788 PRTLOCKVALRECSHRDOWN *papOwners;
3789 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3790 (cAllocated + cInc) * sizeof(void *));
3791 if (!papOwners)
3792 {
3793 ASMAtomicWriteBool(&pShared->fReallocating, false);
3794 rtLockValidatorSerializeDestructLeave();
3795 /* RTMemRealloc will assert */
3796 return false;
3797 }
3798
3799 while (cInc-- > 0)
3800 {
3801 papOwners[cAllocated] = NULL;
3802 cAllocated++;
3803 }
3804
3805 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3806 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3807 }
3808 ASMAtomicWriteBool(&pShared->fReallocating, false);
3809 }
3810 rtLockValidatorSerializeDestructLeave();
3811
3812 rtLockValidatorSerializeDetectionEnter();
3813 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3814 break;
3815
3816 if (pShared->cAllocated >= pShared->cEntries)
3817 return true;
3818 }
3819
3820 rtLockValidatorSerializeDetectionLeave();
3821 AssertFailed(); /* too many iterations or destroyed while racing. */
3822 return false;
3823}
3824
3825
3826/**
3827 * Adds an owner entry to a shared lock record.
3828 *
3829 * @returns true on success, false on serious race or we're if out of memory.
3830 * @param pShared The shared lock record.
3831 * @param pEntry The owner entry.
3832 */
3833DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3834{
3835 rtLockValidatorSerializeDetectionEnter();
3836 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3837 {
3838 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3839 && !rtLockValidatorRecSharedMakeRoom(pShared))
3840 return false; /* the worker leave the lock */
3841
3842 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3843 uint32_t const cMax = pShared->cAllocated;
3844 for (unsigned i = 0; i < 100; i++)
3845 {
3846 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3847 {
3848 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3849 {
3850 rtLockValidatorSerializeDetectionLeave();
3851 return true;
3852 }
3853 }
3854 Assert(i != 25);
3855 }
3856 AssertFailed();
3857 }
3858 rtLockValidatorSerializeDetectionLeave();
3859 return false;
3860}
3861
3862
3863/**
3864 * Remove an owner entry from a shared lock record and free it.
3865 *
3866 * @param pShared The shared lock record.
3867 * @param pEntry The owner entry to remove.
3868 * @param iEntry The last known index.
3869 */
3870DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3871 uint32_t iEntry)
3872{
3873 /*
3874 * Remove it from the table.
3875 */
3876 rtLockValidatorSerializeDetectionEnter();
3877 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3878 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3879 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3880 {
3881 /* this shouldn't happen yet... */
3882 AssertFailed();
3883 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3884 uint32_t const cMax = pShared->cAllocated;
3885 for (iEntry = 0; iEntry < cMax; iEntry++)
3886 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3887 break;
3888 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3889 }
3890 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3891 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3892 rtLockValidatorSerializeDetectionLeave();
3893
3894 /*
3895 * Successfully removed, now free it.
3896 */
3897 rtLockValidatorRecSharedFreeOwner(pEntry);
3898}
3899
3900
3901RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3902{
3903 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3904 if (!pRec->fEnabled)
3905 return;
3906 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3907 AssertReturnVoid(pRec->fSignaller);
3908
3909 /*
3910 * Free all current owners.
3911 */
3912 rtLockValidatorSerializeDetectionEnter();
3913 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3914 {
3915 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3916 uint32_t iEntry = 0;
3917 uint32_t cEntries = pRec->cAllocated;
3918 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3919 while (iEntry < cEntries)
3920 {
3921 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3922 if (pEntry)
3923 {
3924 ASMAtomicDecU32(&pRec->cEntries);
3925 rtLockValidatorSerializeDetectionLeave();
3926
3927 rtLockValidatorRecSharedFreeOwner(pEntry);
3928
3929 rtLockValidatorSerializeDetectionEnter();
3930 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3931 break;
3932 cEntries = pRec->cAllocated;
3933 papEntries = pRec->papOwners;
3934 }
3935 iEntry++;
3936 }
3937 }
3938 rtLockValidatorSerializeDetectionLeave();
3939
3940 if (hThread != NIL_RTTHREAD)
3941 {
3942 /*
3943 * Allocate a new owner entry and insert it into the table.
3944 */
3945 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3946 if ( pEntry
3947 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3948 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3949 }
3950}
3951RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
3952
3953
3954RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3955{
3956 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3957 if (!pRec->fEnabled)
3958 return;
3959 if (hThread == NIL_RTTHREAD)
3960 {
3961 hThread = RTThreadSelfAutoAdopt();
3962 AssertReturnVoid(hThread != NIL_RTTHREAD);
3963 }
3964 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
3965
3966 /*
3967 * Recursive?
3968 *
3969 * Note! This code can be optimized to try avoid scanning the table on
3970 * insert. However, that's annoying work that makes the code big,
3971 * so it can wait til later sometime.
3972 */
3973 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
3974 if (pEntry)
3975 {
3976 Assert(!pRec->fSignaller);
3977 pEntry->ShrdOwner.cRecursion++;
3978 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
3979 return;
3980 }
3981
3982 /*
3983 * Allocate a new owner entry and insert it into the table.
3984 */
3985 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3986 if (pEntry)
3987 {
3988 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3989 {
3990 if (!pRec->fSignaller)
3991 rtLockValidatorStackPush(hThread, pEntry);
3992 }
3993 else
3994 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3995 }
3996}
3997RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
3998
3999
4000RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4001{
4002 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4003 if (!pRec->fEnabled)
4004 return;
4005 if (hThread == NIL_RTTHREAD)
4006 {
4007 hThread = RTThreadSelfAutoAdopt();
4008 AssertReturnVoid(hThread != NIL_RTTHREAD);
4009 }
4010 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4011
4012 /*
4013 * Find the entry hope it's a recursive one.
4014 */
4015 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4016 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4017 AssertReturnVoid(pEntry);
4018 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4019
4020 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4021 if (c == 0)
4022 {
4023 if (!pRec->fSignaller)
4024 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4025 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4026 }
4027 else
4028 {
4029 Assert(!pRec->fSignaller);
4030 rtLockValidatorStackPopRecursion(hThread, pEntry);
4031 }
4032}
4033RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4034
4035
4036RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4037{
4038 /* Validate and resolve input. */
4039 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4040 if (!pRec->fEnabled)
4041 return false;
4042 if (hThread == NIL_RTTHREAD)
4043 {
4044 hThread = RTThreadSelfAutoAdopt();
4045 AssertReturn(hThread != NIL_RTTHREAD, false);
4046 }
4047 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4048
4049 /* Do the job. */
4050 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4051 return pEntry != NULL;
4052}
4053RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4054
4055
4056RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4057{
4058 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4059 if (!pRec->fEnabled)
4060 return VINF_SUCCESS;
4061 if (hThreadSelf == NIL_RTTHREAD)
4062 {
4063 hThreadSelf = RTThreadSelfAutoAdopt();
4064 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4065 }
4066 Assert(hThreadSelf == RTThreadSelf());
4067 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4068
4069 /*
4070 * Locate the entry for this thread in the table.
4071 */
4072 uint32_t iEntry = 0;
4073 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4074 if (RT_UNLIKELY(!pEntry))
4075 {
4076 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4077 rtLockValComplainPanic();
4078 return VERR_SEM_LV_NOT_OWNER;
4079 }
4080
4081 /*
4082 * Check the release order.
4083 */
4084 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4085 && pRec->hClass->fStrictReleaseOrder
4086 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4087 )
4088 {
4089 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4090 if (RT_FAILURE(rc))
4091 return rc;
4092 }
4093
4094 /*
4095 * Release the ownership or unwind a level of recursion.
4096 */
4097 Assert(pEntry->ShrdOwner.cRecursion > 0);
4098 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4099 if (c == 0)
4100 {
4101 rtLockValidatorStackPop(hThreadSelf, pEntry);
4102 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4103 }
4104 else
4105 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4106
4107 return VINF_SUCCESS;
4108}
4109
4110
4111RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4112{
4113 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4114 if (!pRec->fEnabled)
4115 return VINF_SUCCESS;
4116 if (hThreadSelf == NIL_RTTHREAD)
4117 {
4118 hThreadSelf = RTThreadSelfAutoAdopt();
4119 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4120 }
4121 Assert(hThreadSelf == RTThreadSelf());
4122 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4123
4124 /*
4125 * Locate the entry for this thread in the table.
4126 */
4127 uint32_t iEntry = 0;
4128 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4129 if (RT_UNLIKELY(!pEntry))
4130 {
4131 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4132 rtLockValComplainPanic();
4133 return VERR_SEM_LV_NOT_SIGNALLER;
4134 }
4135 return VINF_SUCCESS;
4136}
4137
4138
4139RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4140{
4141 if (Thread == NIL_RTTHREAD)
4142 return 0;
4143
4144 PRTTHREADINT pThread = rtThreadGet(Thread);
4145 if (!pThread)
4146 return VERR_INVALID_HANDLE;
4147 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4148 rtThreadRelease(pThread);
4149 return cWriteLocks;
4150}
4151RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4152
4153
4154RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4155{
4156 PRTTHREADINT pThread = rtThreadGet(Thread);
4157 AssertReturnVoid(pThread);
4158 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4159 rtThreadRelease(pThread);
4160}
4161RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4162
4163
4164RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4165{
4166 PRTTHREADINT pThread = rtThreadGet(Thread);
4167 AssertReturnVoid(pThread);
4168 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4169 rtThreadRelease(pThread);
4170}
4171RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4172
4173
4174RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4175{
4176 if (Thread == NIL_RTTHREAD)
4177 return 0;
4178
4179 PRTTHREADINT pThread = rtThreadGet(Thread);
4180 if (!pThread)
4181 return VERR_INVALID_HANDLE;
4182 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4183 rtThreadRelease(pThread);
4184 return cReadLocks;
4185}
4186RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4187
4188
4189RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4190{
4191 PRTTHREADINT pThread = rtThreadGet(Thread);
4192 Assert(pThread);
4193 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4194 rtThreadRelease(pThread);
4195}
4196RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4197
4198
4199RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4200{
4201 PRTTHREADINT pThread = rtThreadGet(Thread);
4202 Assert(pThread);
4203 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4204 rtThreadRelease(pThread);
4205}
4206RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4207
4208
4209RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4210{
4211 void *pvLock = NULL;
4212 PRTTHREADINT pThread = rtThreadGet(hThread);
4213 if (pThread)
4214 {
4215 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4216 if (RTTHREAD_IS_SLEEPING(enmState))
4217 {
4218 rtLockValidatorSerializeDetectionEnter();
4219
4220 enmState = rtThreadGetState(pThread);
4221 if (RTTHREAD_IS_SLEEPING(enmState))
4222 {
4223 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4224 if (pRec)
4225 {
4226 switch (pRec->Core.u32Magic)
4227 {
4228 case RTLOCKVALRECEXCL_MAGIC:
4229 pvLock = pRec->Excl.hLock;
4230 break;
4231
4232 case RTLOCKVALRECSHRDOWN_MAGIC:
4233 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4234 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4235 break;
4236 case RTLOCKVALRECSHRD_MAGIC:
4237 pvLock = pRec->Shared.hLock;
4238 break;
4239 }
4240 if (RTThreadGetState(pThread) != enmState)
4241 pvLock = NULL;
4242 }
4243 }
4244
4245 rtLockValidatorSerializeDetectionLeave();
4246 }
4247 rtThreadRelease(pThread);
4248 }
4249 return pvLock;
4250}
4251RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4252
4253
4254RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4255{
4256 bool fRet = false;
4257 PRTTHREADINT pThread = rtThreadGet(hThread);
4258 if (pThread)
4259 {
4260 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4261 rtThreadRelease(pThread);
4262 }
4263 return fRet;
4264}
4265RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4266
4267
4268RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4269{
4270 bool fRet = false;
4271 if (hCurrentThread == NIL_RTTHREAD)
4272 hCurrentThread = RTThreadSelf();
4273 else
4274 Assert(hCurrentThread == RTThreadSelf());
4275 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4276 if (pThread)
4277 {
4278 if (hClass != NIL_RTLOCKVALCLASS)
4279 {
4280 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4281 while (VALID_PTR(pCur) && !fRet)
4282 {
4283 switch (pCur->Core.u32Magic)
4284 {
4285 case RTLOCKVALRECEXCL_MAGIC:
4286 fRet = pCur->Excl.hClass == hClass;
4287 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4288 break;
4289 case RTLOCKVALRECSHRDOWN_MAGIC:
4290 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4291 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4292 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4293 break;
4294 case RTLOCKVALRECNEST_MAGIC:
4295 switch (pCur->Nest.pRec->Core.u32Magic)
4296 {
4297 case RTLOCKVALRECEXCL_MAGIC:
4298 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4299 break;
4300 case RTLOCKVALRECSHRDOWN_MAGIC:
4301 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4302 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4303 break;
4304 }
4305 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4306 break;
4307 default:
4308 pCur = NULL;
4309 break;
4310 }
4311 }
4312 }
4313
4314 rtThreadRelease(pThread);
4315 }
4316 return fRet;
4317}
4318RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4319
4320
4321RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4322{
4323 bool fRet = false;
4324 if (hCurrentThread == NIL_RTTHREAD)
4325 hCurrentThread = RTThreadSelf();
4326 else
4327 Assert(hCurrentThread == RTThreadSelf());
4328 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4329 if (pThread)
4330 {
4331 if (hClass != NIL_RTLOCKVALCLASS)
4332 {
4333 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4334 while (VALID_PTR(pCur) && !fRet)
4335 {
4336 switch (pCur->Core.u32Magic)
4337 {
4338 case RTLOCKVALRECEXCL_MAGIC:
4339 fRet = pCur->Excl.hClass == hClass
4340 && pCur->Excl.uSubClass == uSubClass;
4341 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4342 break;
4343 case RTLOCKVALRECSHRDOWN_MAGIC:
4344 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4345 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4346 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4347 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4348 break;
4349 case RTLOCKVALRECNEST_MAGIC:
4350 switch (pCur->Nest.pRec->Core.u32Magic)
4351 {
4352 case RTLOCKVALRECEXCL_MAGIC:
4353 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4354 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4355 break;
4356 case RTLOCKVALRECSHRDOWN_MAGIC:
4357 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4358 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4359 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4360 break;
4361 }
4362 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4363 break;
4364 default:
4365 pCur = NULL;
4366 break;
4367 }
4368 }
4369 }
4370
4371 rtThreadRelease(pThread);
4372 }
4373 return fRet;
4374}
4375RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4376
4377
4378RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4379{
4380 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4381}
4382RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4383
4384
4385RTDECL(bool) RTLockValidatorIsEnabled(void)
4386{
4387 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4388}
4389RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4390
4391
4392RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4393{
4394 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4395}
4396RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4397
4398
4399RTDECL(bool) RTLockValidatorIsQuiet(void)
4400{
4401 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4402}
4403RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4404
4405
4406RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4407{
4408 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4409}
4410RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4411
4412
4413RTDECL(bool) RTLockValidatorMayPanic(void)
4414{
4415 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4416}
4417RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4418
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette