VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 43172

Last change on this file since 43172 was 42225, checked in by vboxsync, 12 years ago

Runtime/lockvalidator: fixed wrong index found by Parfait

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 157.5 KB
Line 
1/* $Id: lockvalidator.cpp 42225 2012-07-19 11:24:10Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/*******************************************************************************
28* Header Files *
29*******************************************************************************/
30#include <iprt/lockvalidator.h>
31#include "internal/iprt.h"
32
33#include <iprt/asm.h>
34#include <iprt/assert.h>
35#include <iprt/env.h>
36#include <iprt/err.h>
37#include <iprt/mem.h>
38#include <iprt/once.h>
39#include <iprt/semaphore.h>
40#include <iprt/string.h>
41#include <iprt/thread.h>
42
43#include "internal/lockvalidator.h"
44#include "internal/magics.h"
45#include "internal/strhash.h"
46#include "internal/thread.h"
47
48
49/*******************************************************************************
50* Defined Constants And Macros *
51*******************************************************************************/
52/** Macro that asserts that a pointer is aligned correctly.
53 * Only used when fighting bugs. */
54#if 1
55# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
56 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
57#else
58# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
59#endif
60
61/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
62#define RTLOCKVALCLASS_HASH(hClass) \
63 ( ((uintptr_t)(hClass) >> 6 ) \
64 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
65 / sizeof(PRTLOCKVALCLASSREF)) )
66
67/** The max value for RTLOCKVALCLASSINT::cRefs. */
68#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
69/** The max value for RTLOCKVALCLASSREF::cLookups. */
70#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
71/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
72 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
73#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
74
75
76/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
77 * Enable recursion records. */
78#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
79# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
80#endif
81
82/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
83 * Enables some extra verbosity in the lock dumping. */
84#if defined(DOXYGEN_RUNNING)
85# define RTLOCKVAL_WITH_VERBOSE_DUMPS
86#endif
87
88/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
89 * Enables collection prior class hash lookup statistics, dumping them when
90 * complaining about the class. */
91#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
92# define RTLOCKVAL_WITH_CLASS_HASH_STATS
93#endif
94
95
96/*******************************************************************************
97* Structures and Typedefs *
98*******************************************************************************/
99/**
100 * Deadlock detection stack entry.
101 */
102typedef struct RTLOCKVALDDENTRY
103{
104 /** The current record. */
105 PRTLOCKVALRECUNION pRec;
106 /** The current entry number if pRec is a shared one. */
107 uint32_t iEntry;
108 /** The thread state of the thread we followed to get to pFirstSibling.
109 * This is only used for validating a deadlock stack. */
110 RTTHREADSTATE enmState;
111 /** The thread we followed to get to pFirstSibling.
112 * This is only used for validating a deadlock stack. */
113 PRTTHREADINT pThread;
114 /** What pThread is waiting on, i.e. where we entered the circular list of
115 * siblings. This is used for validating a deadlock stack as well as
116 * terminating the sibling walk. */
117 PRTLOCKVALRECUNION pFirstSibling;
118} RTLOCKVALDDENTRY;
119
120
121/**
122 * Deadlock detection stack.
123 */
124typedef struct RTLOCKVALDDSTACK
125{
126 /** The number stack entries. */
127 uint32_t c;
128 /** The stack entries. */
129 RTLOCKVALDDENTRY a[32];
130} RTLOCKVALDDSTACK;
131/** Pointer to a deadlock detection stack. */
132typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
133
134
135/**
136 * Reference to another class.
137 */
138typedef struct RTLOCKVALCLASSREF
139{
140 /** The class. */
141 RTLOCKVALCLASS hClass;
142 /** The number of lookups of this class. */
143 uint32_t volatile cLookups;
144 /** Indicates whether the entry was added automatically during order checking
145 * (true) or manually via the API (false). */
146 bool fAutodidacticism;
147 /** Reserved / explicit alignment padding. */
148 bool afReserved[3];
149} RTLOCKVALCLASSREF;
150/** Pointer to a class reference. */
151typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
152
153
154/** Pointer to a chunk of class references. */
155typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
156/**
157 * Chunk of class references.
158 */
159typedef struct RTLOCKVALCLASSREFCHUNK
160{
161 /** Array of refs. */
162#if 0 /** @todo for testing allocation of new chunks. */
163 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
164#else
165 RTLOCKVALCLASSREF aRefs[2];
166#endif
167 /** Pointer to the next chunk. */
168 PRTLOCKVALCLASSREFCHUNK volatile pNext;
169} RTLOCKVALCLASSREFCHUNK;
170
171
172/**
173 * Lock class.
174 */
175typedef struct RTLOCKVALCLASSINT
176{
177 /** AVL node core. */
178 AVLLU32NODECORE Core;
179 /** Magic value (RTLOCKVALCLASS_MAGIC). */
180 uint32_t volatile u32Magic;
181 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
182 uint32_t volatile cRefs;
183 /** Whether the class is allowed to teach it self new locking order rules. */
184 bool fAutodidact;
185 /** Whether to allow recursion. */
186 bool fRecursionOk;
187 /** Strict release order. */
188 bool fStrictReleaseOrder;
189 /** Whether this class is in the tree. */
190 bool fInTree;
191 /** Donate a reference to the next retainer. This is a hack to make
192 * RTLockValidatorClassCreateUnique work. */
193 bool volatile fDonateRefToNextRetainer;
194 /** Reserved future use / explicit alignment. */
195 bool afReserved[3];
196 /** The minimum wait interval for which we do deadlock detection
197 * (milliseconds). */
198 RTMSINTERVAL cMsMinDeadlock;
199 /** The minimum wait interval for which we do order checks (milliseconds). */
200 RTMSINTERVAL cMsMinOrder;
201 /** More padding. */
202 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
203 /** Classes that may be taken prior to this one.
204 * This is a linked list where each node contains a chunk of locks so that we
205 * reduce the number of allocations as well as localize the data. */
206 RTLOCKVALCLASSREFCHUNK PriorLocks;
207 /** Hash table containing frequently encountered prior locks. */
208 PRTLOCKVALCLASSREF apPriorLocksHash[17];
209 /** Class name. (Allocated after the end of the block as usual.) */
210 char const *pszName;
211 /** Where this class was created.
212 * This is mainly used for finding automatically created lock classes.
213 * @remarks The strings are stored after this structure so we won't crash
214 * if the class lives longer than the module (dll/so/dylib) that
215 * spawned it. */
216 RTLOCKVALSRCPOS CreatePos;
217#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
218 /** Hash hits. */
219 uint32_t volatile cHashHits;
220 /** Hash misses. */
221 uint32_t volatile cHashMisses;
222#endif
223} RTLOCKVALCLASSINT;
224AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
225AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
226
227
228/*******************************************************************************
229* Global Variables *
230*******************************************************************************/
231/** Serializing object destruction and deadlock detection.
232 *
233 * This makes sure that none of the memory examined by the deadlock detection
234 * code will become invalid (reused for other purposes or made not present)
235 * while the detection is in progress.
236 *
237 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
238 * EW: Deadlock detection and some related activities.
239 */
240static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
241/** Serializing class tree insert and lookups. */
242static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
243/** Class tree. */
244static PAVLLU32NODECORE g_LockValClassTree = NULL;
245/** Critical section serializing the teaching new rules to the classes. */
246static RTCRITSECT g_LockValClassTeachCS;
247
248/** Whether the lock validator is enabled or disabled.
249 * Only applies to new locks. */
250static bool volatile g_fLockValidatorEnabled = true;
251/** Set if the lock validator is quiet. */
252#ifdef RT_STRICT
253static bool volatile g_fLockValidatorQuiet = false;
254#else
255static bool volatile g_fLockValidatorQuiet = true;
256#endif
257/** Set if the lock validator may panic. */
258#ifdef RT_STRICT
259static bool volatile g_fLockValidatorMayPanic = true;
260#else
261static bool volatile g_fLockValidatorMayPanic = false;
262#endif
263/** Whether to return an error status on wrong locking order. */
264static bool volatile g_fLockValSoftWrongOrder = false;
265
266
267/*******************************************************************************
268* Internal Functions *
269*******************************************************************************/
270static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
271static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
272
273
274/**
275 * Lazy initialization of the lock validator globals.
276 */
277static void rtLockValidatorLazyInit(void)
278{
279 static uint32_t volatile s_fInitializing = false;
280 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
281 {
282 /*
283 * The locks.
284 */
285 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
286 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
287 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
288
289 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
290 {
291 RTSEMRW hSemRW;
292 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
293 if (RT_SUCCESS(rc))
294 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
295 }
296
297 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
298 {
299 RTSEMXROADS hXRoads;
300 int rc = RTSemXRoadsCreate(&hXRoads);
301 if (RT_SUCCESS(rc))
302 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
303 }
304
305#ifdef IN_RING3
306 /*
307 * Check the environment for our config variables.
308 */
309 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
310 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
311 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
312 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
313
314 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
315 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
316 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
317 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
318
319 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
320 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
321 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
322 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
323
324 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
325 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
326 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
327 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
328#endif
329
330 /*
331 * Register cleanup
332 */
333 /** @todo register some cleanup callback if we care. */
334
335 ASMAtomicWriteU32(&s_fInitializing, false);
336 }
337}
338
339
340
341/** Wrapper around ASMAtomicReadPtr. */
342DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
343{
344 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
345 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
346 return p;
347}
348
349
350/** Wrapper around ASMAtomicWritePtr. */
351DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
352{
353 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
354 ASMAtomicWritePtr(ppRec, pRecNew);
355}
356
357
358/** Wrapper around ASMAtomicReadPtr. */
359DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
360{
361 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
362 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
363 return p;
364}
365
366
367/** Wrapper around ASMAtomicUoReadPtr. */
368DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
369{
370 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
371 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
372 return p;
373}
374
375
376/**
377 * Reads a volatile thread handle field and returns the thread name.
378 *
379 * @returns Thread name (read only).
380 * @param phThread The thread handle field.
381 */
382static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
383{
384 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
385 if (!pThread)
386 return "<NIL>";
387 if (!VALID_PTR(pThread))
388 return "<INVALID>";
389 if (pThread->u32Magic != RTTHREADINT_MAGIC)
390 return "<BAD-THREAD-MAGIC>";
391 return pThread->szName;
392}
393
394
395/**
396 * Launch a simple assertion like complaint w/ panic.
397 *
398 * @param pszFile Where from - file.
399 * @param iLine Where from - line.
400 * @param pszFunction Where from - function.
401 * @param pszWhat What we're complaining about.
402 * @param ... Format arguments.
403 */
404static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
405{
406 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
407 {
408 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
409 va_list va;
410 va_start(va, pszWhat);
411 RTAssertMsg2WeakV(pszWhat, va);
412 va_end(va);
413 }
414 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
415 RTAssertPanic();
416}
417
418
419/**
420 * Describes the class.
421 *
422 * @param pszPrefix Message prefix.
423 * @param pClass The class to complain about.
424 * @param uSubClass My sub-class.
425 * @param fVerbose Verbose description including relations to other
426 * classes.
427 */
428static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
429{
430 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
431 return;
432
433 /* Stringify the sub-class. */
434 const char *pszSubClass;
435 char szSubClass[32];
436 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
437 switch (uSubClass)
438 {
439 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
440 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
441 default:
442 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
443 pszSubClass = szSubClass;
444 break;
445 }
446 else
447 {
448 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
449 pszSubClass = szSubClass;
450 }
451
452 /* Validate the class pointer. */
453 if (!VALID_PTR(pClass))
454 {
455 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
456 return;
457 }
458 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
459 {
460 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
461 return;
462 }
463
464 /* OK, dump the class info. */
465 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
466 pClass,
467 pClass->pszName,
468 pClass->CreatePos.pszFile,
469 pClass->CreatePos.uLine,
470 pClass->CreatePos.pszFunction,
471 pClass->CreatePos.uId,
472 pszSubClass);
473 if (fVerbose)
474 {
475 uint32_t i = 0;
476 uint32_t cPrinted = 0;
477 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
478 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
479 {
480 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
481 if (pCurClass != NIL_RTLOCKVALCLASS)
482 {
483 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
484 cPrinted == 0
485 ? "Prior:"
486 : " ",
487 i,
488 pCurClass->pszName,
489 pChunk->aRefs[j].fAutodidacticism
490 ? "autodidactic"
491 : "manually ",
492 pChunk->aRefs[j].cLookups,
493 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
494 cPrinted++;
495 }
496 }
497 if (!cPrinted)
498 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
499#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
500 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
501#endif
502 }
503 else
504 {
505 uint32_t cPrinted = 0;
506 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
507 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
508 {
509 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
510 if (pCurClass != NIL_RTLOCKVALCLASS)
511 {
512 if ((cPrinted % 10) == 0)
513 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
514 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
515 else if ((cPrinted % 10) != 9)
516 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
517 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
518 else
519 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
520 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
521 cPrinted++;
522 }
523 }
524 if (!cPrinted)
525 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
526 else if ((cPrinted % 10) != 0)
527 RTAssertMsg2AddWeak("\n");
528 }
529}
530
531
532/**
533 * Helper for getting the class name.
534 * @returns Class name string.
535 * @param pClass The class.
536 */
537static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
538{
539 if (!pClass)
540 return "<nil-class>";
541 if (!VALID_PTR(pClass))
542 return "<bad-class-ptr>";
543 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
544 return "<bad-class-magic>";
545 if (!pClass->pszName)
546 return "<no-class-name>";
547 return pClass->pszName;
548}
549
550/**
551 * Formats the sub-class.
552 *
553 * @returns Stringified sub-class.
554 * @param uSubClass The name.
555 * @param pszBuf Buffer that is big enough.
556 */
557static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
558{
559 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
560 switch (uSubClass)
561 {
562 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
563 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
564 default:
565 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
566 break;
567 }
568 else
569 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
570 return pszBuf;
571}
572
573
574/**
575 * Helper for rtLockValComplainAboutLock.
576 */
577DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
578 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
579 const char *pszFrameType)
580{
581 char szBuf[32];
582 switch (u32Magic)
583 {
584 case RTLOCKVALRECEXCL_MAGIC:
585#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
586 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
587 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
588 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
589 rtLockValComplainGetClassName(pRec->Excl.hClass),
590 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
591 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
592 pszFrameType, pszSuffix);
593#else
594 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
595 pRec->Excl.hLock, pRec->Excl.szName,
596 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
597 rtLockValComplainGetClassName(pRec->Excl.hClass),
598 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
599 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
600 pszFrameType, pszSuffix);
601#endif
602 break;
603
604 case RTLOCKVALRECSHRD_MAGIC:
605 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
606 pRec->Shared.hLock, pRec->Shared.szName, pRec,
607 rtLockValComplainGetClassName(pRec->Shared.hClass),
608 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
609 pszFrameType, pszSuffix);
610 break;
611
612 case RTLOCKVALRECSHRDOWN_MAGIC:
613 {
614 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
615 if ( VALID_PTR(pShared)
616 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
617#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
618 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
619 pShared->hLock, pShared->pszName, pShared,
620 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
621 rtLockValComplainGetClassName(pShared->hClass),
622 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
623 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
624 pszSuffix2, pszSuffix);
625#else
626 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
627 pShared->hLock, pShared->szName,
628 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
629 rtLockValComplainGetClassName(pShared->hClass),
630 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
631 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
632 pszFrameType, pszSuffix);
633#endif
634 else
635 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
636 pShared,
637 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
638 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
639 pszFrameType, pszSuffix);
640 break;
641 }
642
643 default:
644 AssertMsgFailed(("%#x\n", u32Magic));
645 }
646}
647
648
649/**
650 * Describes the lock.
651 *
652 * @param pszPrefix Message prefix.
653 * @param pRec The lock record we're working on.
654 * @param pszSuffix Message suffix.
655 */
656static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
657{
658#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
659# define FIX_REC(r) 1
660#else
661# define FIX_REC(r) (r)
662#endif
663 if ( VALID_PTR(pRec)
664 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
665 {
666 switch (pRec->Core.u32Magic)
667 {
668 case RTLOCKVALRECEXCL_MAGIC:
669 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
670 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
671 break;
672
673 case RTLOCKVALRECSHRD_MAGIC:
674 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
675 break;
676
677 case RTLOCKVALRECSHRDOWN_MAGIC:
678 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
679 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
680 break;
681
682 case RTLOCKVALRECNEST_MAGIC:
683 {
684 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
685 uint32_t u32Magic;
686 if ( VALID_PTR(pRealRec)
687 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
688 || u32Magic == RTLOCKVALRECSHRD_MAGIC
689 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
690 )
691 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
692 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
693 else
694 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
695 pRealRec, pRec, pRec->Nest.cRecursion,
696 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
697 pszSuffix);
698 break;
699 }
700
701 default:
702 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
703 break;
704 }
705 }
706#undef FIX_REC
707}
708
709
710/**
711 * Dump the lock stack.
712 *
713 * @param pThread The thread which lock stack we're gonna dump.
714 * @param cchIndent The indentation in chars.
715 * @param cMinFrames The minimum number of frames to consider
716 * dumping.
717 * @param pHighightRec Record that should be marked specially in the
718 * dump.
719 */
720static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
721 PRTLOCKVALRECUNION pHighightRec)
722{
723 if ( VALID_PTR(pThread)
724 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
725 && pThread->u32Magic == RTTHREADINT_MAGIC
726 )
727 {
728 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
729 if (cEntries >= cMinFrames)
730 {
731 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
732 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
733 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
734 for (uint32_t i = 0; VALID_PTR(pCur); i++)
735 {
736 char szPrefix[80];
737 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
738 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
739 switch (pCur->Core.u32Magic)
740 {
741 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
742 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
743 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
744 default:
745 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
746 pCur = NULL;
747 break;
748 }
749 }
750 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
751 }
752 }
753}
754
755
756/**
757 * Launch the initial complaint.
758 *
759 * @param pszWhat What we're complaining about.
760 * @param pSrcPos Where we are complaining from, as it were.
761 * @param pThreadSelf The calling thread.
762 * @param pRec The main lock involved. Can be NULL.
763 * @param fDumpStack Whether to dump the lock stack (true) or not
764 * (false).
765 */
766static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
767 PRTLOCKVALRECUNION pRec, bool fDumpStack)
768{
769 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
770 {
771 ASMCompilerBarrier(); /* paranoia */
772 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
773 if (pSrcPos && pSrcPos->uId)
774 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
775 else
776 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
777 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
778 if (fDumpStack)
779 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
780 }
781}
782
783
784/**
785 * Continue bitching.
786 *
787 * @param pszFormat Format string.
788 * @param ... Format arguments.
789 */
790static void rtLockValComplainMore(const char *pszFormat, ...)
791{
792 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
793 {
794 va_list va;
795 va_start(va, pszFormat);
796 RTAssertMsg2AddWeakV(pszFormat, va);
797 va_end(va);
798 }
799}
800
801
802/**
803 * Raise a panic if enabled.
804 */
805static void rtLockValComplainPanic(void)
806{
807 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
808 RTAssertPanic();
809}
810
811
812/**
813 * Copy a source position record.
814 *
815 * @param pDst The destination.
816 * @param pSrc The source. Can be NULL.
817 */
818DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
819{
820 if (pSrc)
821 {
822 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
823 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
824 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
825 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
826 }
827 else
828 {
829 ASMAtomicUoWriteU32(&pDst->uLine, 0);
830 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
831 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
832 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
833 }
834}
835
836
837/**
838 * Init a source position record.
839 *
840 * @param pSrcPos The source position record.
841 */
842DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
843{
844 pSrcPos->pszFile = NULL;
845 pSrcPos->pszFunction = NULL;
846 pSrcPos->uId = 0;
847 pSrcPos->uLine = 0;
848#if HC_ARCH_BITS == 64
849 pSrcPos->u32Padding = 0;
850#endif
851}
852
853
854/**
855 * Hashes the specified source position.
856 *
857 * @returns Hash.
858 * @param pSrcPos The source position record.
859 */
860static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
861{
862 uint32_t uHash;
863 if ( ( pSrcPos->pszFile
864 || pSrcPos->pszFunction)
865 && pSrcPos->uLine != 0)
866 {
867 uHash = 0;
868 if (pSrcPos->pszFile)
869 uHash = sdbmInc(pSrcPos->pszFile, uHash);
870 if (pSrcPos->pszFunction)
871 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
872 uHash += pSrcPos->uLine;
873 }
874 else
875 {
876 Assert(pSrcPos->uId);
877 uHash = (uint32_t)pSrcPos->uId;
878 }
879
880 return uHash;
881}
882
883
884/**
885 * Compares two source positions.
886 *
887 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
888 * otherwise.
889 * @param pSrcPos1 The first source position.
890 * @param pSrcPos2 The second source position.
891 */
892static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
893{
894 if (pSrcPos1->uLine != pSrcPos2->uLine)
895 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
896
897 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
898 if (iDiff != 0)
899 return iDiff;
900
901 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
902 if (iDiff != 0)
903 return iDiff;
904
905 if (pSrcPos1->uId != pSrcPos2->uId)
906 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
907 return 0;
908}
909
910
911
912/**
913 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
914 */
915DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
916{
917 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
918 if (hXRoads != NIL_RTSEMXROADS)
919 RTSemXRoadsNSEnter(hXRoads);
920}
921
922
923/**
924 * Call after rtLockValidatorSerializeDestructEnter.
925 */
926DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
927{
928 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
929 if (hXRoads != NIL_RTSEMXROADS)
930 RTSemXRoadsNSLeave(hXRoads);
931}
932
933
934/**
935 * Serializes deadlock detection against destruction of the objects being
936 * inspected.
937 */
938DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
939{
940 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
941 if (hXRoads != NIL_RTSEMXROADS)
942 RTSemXRoadsEWEnter(hXRoads);
943}
944
945
946/**
947 * Call after rtLockValidatorSerializeDetectionEnter.
948 */
949DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
950{
951 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
952 if (hXRoads != NIL_RTSEMXROADS)
953 RTSemXRoadsEWLeave(hXRoads);
954}
955
956
957/**
958 * Initializes the per thread lock validator data.
959 *
960 * @param pPerThread The data.
961 */
962DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
963{
964 pPerThread->bmFreeShrdOwners = UINT32_MAX;
965
966 /* ASSUMES the rest has already been zeroed. */
967 Assert(pPerThread->pRec == NULL);
968 Assert(pPerThread->cWriteLocks == 0);
969 Assert(pPerThread->cReadLocks == 0);
970 Assert(pPerThread->fInValidator == false);
971 Assert(pPerThread->pStackTop == NULL);
972}
973
974
975/**
976 * Delete the per thread lock validator data.
977 *
978 * @param pPerThread The data.
979 */
980DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
981{
982 /*
983 * Check that the thread doesn't own any locks at this time.
984 */
985 if (pPerThread->pStackTop)
986 {
987 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
988 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
989 pPerThread->pStackTop, true);
990 rtLockValComplainPanic();
991 }
992
993 /*
994 * Free the recursion records.
995 */
996 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
997 pPerThread->pFreeNestRecs = NULL;
998 while (pCur)
999 {
1000 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1001 RTMemFree(pCur);
1002 pCur = pNext;
1003 }
1004}
1005
1006RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1007 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1008 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1009 const char *pszNameFmt, ...)
1010{
1011 va_list va;
1012 va_start(va, pszNameFmt);
1013 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1014 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1015 va_end(va);
1016 return rc;
1017}
1018
1019
1020RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1021 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1022 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1023 const char *pszNameFmt, va_list va)
1024{
1025 Assert(cMsMinDeadlock >= 1);
1026 Assert(cMsMinOrder >= 1);
1027 AssertPtr(pSrcPos);
1028
1029 /*
1030 * Format the name and calc its length.
1031 */
1032 size_t cbName;
1033 char szName[32];
1034 if (pszNameFmt && *pszNameFmt)
1035 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1036 else
1037 {
1038 static uint32_t volatile s_cAnonymous = 0;
1039 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1040 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1041 }
1042
1043 /*
1044 * Figure out the file and function name lengths and allocate memory for
1045 * it all.
1046 */
1047 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1048 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1049 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVar(sizeof(*pThis) + cbFile + cbFunction + cbName);
1050 if (!pThis)
1051 return VERR_NO_MEMORY;
1052
1053 /*
1054 * Initialize the class data.
1055 */
1056 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1057 pThis->Core.uchHeight = 0;
1058 pThis->Core.pLeft = NULL;
1059 pThis->Core.pRight = NULL;
1060 pThis->Core.pList = NULL;
1061 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1062 pThis->cRefs = 1;
1063 pThis->fAutodidact = fAutodidact;
1064 pThis->fRecursionOk = fRecursionOk;
1065 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1066 pThis->fInTree = false;
1067 pThis->fDonateRefToNextRetainer = false;
1068 pThis->afReserved[0] = false;
1069 pThis->afReserved[1] = false;
1070 pThis->afReserved[2] = false;
1071 pThis->cMsMinDeadlock = cMsMinDeadlock;
1072 pThis->cMsMinOrder = cMsMinOrder;
1073 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1074 pThis->au32Reserved[i] = 0;
1075 for (unsigned i = 0; i < RT_ELEMENTS(pThis->PriorLocks.aRefs); i++)
1076 {
1077 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1078 pThis->PriorLocks.aRefs[i].cLookups = 0;
1079 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1080 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1081 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1082 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1083 }
1084 pThis->PriorLocks.pNext = NULL;
1085 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1086 pThis->apPriorLocksHash[i] = NULL;
1087 char *pszDst = (char *)(pThis + 1);
1088 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1089 pszDst += cbName;
1090 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1091 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1092 pszDst += cbFile;
1093 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1094 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1095#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1096 pThis->cHashHits = 0;
1097 pThis->cHashMisses = 0;
1098#endif
1099
1100 *phClass = pThis;
1101 return VINF_SUCCESS;
1102}
1103
1104
1105RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1106{
1107 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1108 va_list va;
1109 va_start(va, pszNameFmt);
1110 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1111 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1112 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1113 pszNameFmt, va);
1114 va_end(va);
1115 return rc;
1116}
1117
1118
1119/**
1120 * Creates a new lock validator class with a reference that is consumed by the
1121 * first call to RTLockValidatorClassRetain.
1122 *
1123 * This is tailored for use in the parameter list of a semaphore constructor.
1124 *
1125 * @returns Class handle with a reference that is automatically consumed by the
1126 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1127 *
1128 * @param pszFile The source position of the call, file.
1129 * @param iLine The source position of the call, line.
1130 * @param pszFunction The source position of the call, function.
1131 * @param pszNameFmt Class name format string, optional (NULL). Max
1132 * length is 32 bytes.
1133 * @param ... Format string arguments.
1134 */
1135RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1136{
1137 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1138 RTLOCKVALCLASSINT *pClass;
1139 va_list va;
1140 va_start(va, pszNameFmt);
1141 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1142 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1143 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1144 pszNameFmt, va);
1145 va_end(va);
1146 if (RT_FAILURE(rc))
1147 return NIL_RTLOCKVALCLASS;
1148 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1149 return pClass;
1150}
1151
1152
1153/**
1154 * Internal class retainer.
1155 * @returns The new reference count.
1156 * @param pClass The class.
1157 */
1158DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1159{
1160 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1161 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1162 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1163 else if ( cRefs == 2
1164 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1165 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1166 return cRefs;
1167}
1168
1169
1170/**
1171 * Validates and retains a lock validator class.
1172 *
1173 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1174 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1175 */
1176DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1177{
1178 if (hClass == NIL_RTLOCKVALCLASS)
1179 return hClass;
1180 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1181 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1182 rtLockValidatorClassRetain(hClass);
1183 return hClass;
1184}
1185
1186
1187/**
1188 * Internal class releaser.
1189 * @returns The new reference count.
1190 * @param pClass The class.
1191 */
1192DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1193{
1194 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1195 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1196 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1197 else if (!cRefs)
1198 rtLockValidatorClassDestroy(pClass);
1199 return cRefs;
1200}
1201
1202
1203/**
1204 * Destroys a class once there are not more references to it.
1205 *
1206 * @param Class The class.
1207 */
1208static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1209{
1210 AssertReturnVoid(!pClass->fInTree);
1211 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1212
1213 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1214 while (pChunk)
1215 {
1216 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1217 {
1218 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1219 if (pClass2 != NIL_RTLOCKVALCLASS)
1220 {
1221 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1222 rtLockValidatorClassRelease(pClass2);
1223 }
1224 }
1225
1226 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1227 pChunk->pNext = NULL;
1228 if (pChunk != &pClass->PriorLocks)
1229 RTMemFree(pChunk);
1230 pChunk = pNext;
1231 }
1232
1233 RTMemFree(pClass);
1234}
1235
1236
1237RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1238{
1239 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1240 rtLockValidatorLazyInit();
1241 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1242
1243 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1244 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1245 while (pClass)
1246 {
1247 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1248 break;
1249 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1250 }
1251
1252 if (RT_SUCCESS(rcLock))
1253 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1254 return pClass;
1255}
1256
1257
1258RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1259{
1260 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1261 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1262 if (hClass == NIL_RTLOCKVALCLASS)
1263 {
1264 /*
1265 * Create a new class and insert it into the tree.
1266 */
1267 va_list va;
1268 va_start(va, pszNameFmt);
1269 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1270 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1271 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1272 pszNameFmt, va);
1273 va_end(va);
1274 if (RT_SUCCESS(rc))
1275 {
1276 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1277 rtLockValidatorLazyInit();
1278 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1279
1280 Assert(!hClass->fInTree);
1281 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1282 Assert(hClass->fInTree);
1283
1284 if (RT_SUCCESS(rcLock))
1285 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1286 return hClass;
1287 }
1288 }
1289 return hClass;
1290}
1291
1292
1293RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1294{
1295 RTLOCKVALCLASSINT *pClass = hClass;
1296 AssertPtrReturn(pClass, UINT32_MAX);
1297 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1298 return rtLockValidatorClassRetain(pClass);
1299}
1300
1301
1302RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1303{
1304 RTLOCKVALCLASSINT *pClass = hClass;
1305 if (pClass == NIL_RTLOCKVALCLASS)
1306 return 0;
1307 AssertPtrReturn(pClass, UINT32_MAX);
1308 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1309 return rtLockValidatorClassRelease(pClass);
1310}
1311
1312
1313/**
1314 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1315 * all the chunks for @a pPriorClass.
1316 *
1317 * @returns true / false.
1318 * @param pClass The class to search.
1319 * @param pPriorClass The class to search for.
1320 */
1321static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1322{
1323 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1324 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1325 {
1326 if (pChunk->aRefs[i].hClass == pPriorClass)
1327 {
1328 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1329 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1330 {
1331 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1332 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1333 }
1334
1335 /* update the hash table entry. */
1336 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1337 if ( !(*ppHashEntry)
1338 || (*ppHashEntry)->cLookups + 128 < cLookups)
1339 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1340
1341#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1342 ASMAtomicIncU32(&pClass->cHashMisses);
1343#endif
1344 return true;
1345 }
1346 }
1347
1348 return false;
1349}
1350
1351
1352/**
1353 * Checks if @a pPriorClass is a known prior class.
1354 *
1355 * @returns true / false.
1356 * @param pClass The class to search.
1357 * @param pPriorClass The class to search for.
1358 */
1359DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1360{
1361 /*
1362 * Hash lookup here.
1363 */
1364 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1365 if ( pRef
1366 && pRef->hClass == pPriorClass)
1367 {
1368 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1369 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1370 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1371#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1372 ASMAtomicIncU32(&pClass->cHashHits);
1373#endif
1374 return true;
1375 }
1376
1377 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1378}
1379
1380
1381/**
1382 * Adds a class to the prior list.
1383 *
1384 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1385 * @param pClass The class to work on.
1386 * @param pPriorClass The class to add.
1387 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1388 * somebody is teaching us via the API (false).
1389 * @param pSrcPos Where this rule was added (optional).
1390 */
1391static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1392 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1393{
1394 NOREF(pSrcPos);
1395 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1396 rtLockValidatorLazyInit();
1397 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1398
1399 /*
1400 * Check that there are no conflict (no assert since we might race each other).
1401 */
1402 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1403 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1404 {
1405 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1406 {
1407 /*
1408 * Scan the table for a free entry, allocating a new chunk if necessary.
1409 */
1410 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1411 {
1412 bool fDone = false;
1413 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1414 {
1415 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1416 if (fDone)
1417 {
1418 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1419 rtLockValidatorClassRetain(pPriorClass);
1420 rc = VINF_SUCCESS;
1421 break;
1422 }
1423 }
1424 if (fDone)
1425 break;
1426
1427 /* If no more chunks, allocate a new one and insert the class before linking it. */
1428 if (!pChunk->pNext)
1429 {
1430 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1431 if (!pNew)
1432 {
1433 rc = VERR_NO_MEMORY;
1434 break;
1435 }
1436 pNew->pNext = NULL;
1437 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1438 {
1439 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1440 pNew->aRefs[i].cLookups = 0;
1441 pNew->aRefs[i].fAutodidacticism = false;
1442 pNew->aRefs[i].afReserved[0] = false;
1443 pNew->aRefs[i].afReserved[1] = false;
1444 pNew->aRefs[i].afReserved[2] = false;
1445 }
1446
1447 pNew->aRefs[0].hClass = pPriorClass;
1448 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1449
1450 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1451 rtLockValidatorClassRetain(pPriorClass);
1452 rc = VINF_SUCCESS;
1453 break;
1454 }
1455 } /* chunk loop */
1456 }
1457 else
1458 rc = VINF_SUCCESS;
1459 }
1460 else
1461 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1462
1463 if (RT_SUCCESS(rcLock))
1464 RTCritSectLeave(&g_LockValClassTeachCS);
1465 return rc;
1466}
1467
1468
1469RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1470{
1471 RTLOCKVALCLASSINT *pClass = hClass;
1472 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1473 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1474
1475 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1476 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1477 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1478
1479 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1480}
1481
1482
1483RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1484{
1485 RTLOCKVALCLASSINT *pClass = hClass;
1486 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1487 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1488
1489 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1490 return VINF_SUCCESS;
1491}
1492
1493
1494/**
1495 * Unlinks all siblings.
1496 *
1497 * This is used during record deletion and assumes no races.
1498 *
1499 * @param pCore One of the siblings.
1500 */
1501static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1502{
1503 /* ASSUMES sibling destruction doesn't involve any races and that all
1504 related records are to be disposed off now. */
1505 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1506 while (pSibling)
1507 {
1508 PRTLOCKVALRECUNION volatile *ppCoreNext;
1509 switch (pSibling->Core.u32Magic)
1510 {
1511 case RTLOCKVALRECEXCL_MAGIC:
1512 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1513 ppCoreNext = &pSibling->Excl.pSibling;
1514 break;
1515
1516 case RTLOCKVALRECSHRD_MAGIC:
1517 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1518 ppCoreNext = &pSibling->Shared.pSibling;
1519 break;
1520
1521 default:
1522 AssertFailed();
1523 ppCoreNext = NULL;
1524 break;
1525 }
1526 if (RT_UNLIKELY(ppCoreNext))
1527 break;
1528 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1529 }
1530}
1531
1532
1533RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1534{
1535 /*
1536 * Validate input.
1537 */
1538 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1539 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1540
1541 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1542 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1543 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1544 , VERR_SEM_LV_INVALID_PARAMETER);
1545
1546 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1547 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1548 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1549 , VERR_SEM_LV_INVALID_PARAMETER);
1550
1551 /*
1552 * Link them (circular list).
1553 */
1554 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1555 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1556 {
1557 p1->Excl.pSibling = p2;
1558 p2->Shared.pSibling = p1;
1559 }
1560 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1561 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1562 {
1563 p1->Shared.pSibling = p2;
1564 p2->Excl.pSibling = p1;
1565 }
1566 else
1567 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1568
1569 return VINF_SUCCESS;
1570}
1571
1572
1573/**
1574 * Gets the lock name for the given record.
1575 *
1576 * @returns Read-only lock name.
1577 * @param pRec The lock record.
1578 */
1579DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1580{
1581 switch (pRec->Core.u32Magic)
1582 {
1583 case RTLOCKVALRECEXCL_MAGIC:
1584 return pRec->Excl.szName;
1585 case RTLOCKVALRECSHRD_MAGIC:
1586 return pRec->Shared.szName;
1587 case RTLOCKVALRECSHRDOWN_MAGIC:
1588 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1589 case RTLOCKVALRECNEST_MAGIC:
1590 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1591 if (VALID_PTR(pRec))
1592 {
1593 switch (pRec->Core.u32Magic)
1594 {
1595 case RTLOCKVALRECEXCL_MAGIC:
1596 return pRec->Excl.szName;
1597 case RTLOCKVALRECSHRD_MAGIC:
1598 return pRec->Shared.szName;
1599 case RTLOCKVALRECSHRDOWN_MAGIC:
1600 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1601 default:
1602 return "unknown-nested";
1603 }
1604 }
1605 return "orphaned-nested";
1606 default:
1607 return "unknown";
1608 }
1609}
1610
1611
1612/**
1613 * Gets the class for this locking record.
1614 *
1615 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1616 * @param pRec The lock validator record.
1617 */
1618DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1619{
1620 switch (pRec->Core.u32Magic)
1621 {
1622 case RTLOCKVALRECEXCL_MAGIC:
1623 return pRec->Excl.hClass;
1624
1625 case RTLOCKVALRECSHRD_MAGIC:
1626 return pRec->Shared.hClass;
1627
1628 case RTLOCKVALRECSHRDOWN_MAGIC:
1629 {
1630 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1631 if (RT_LIKELY( VALID_PTR(pSharedRec)
1632 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1633 return pSharedRec->hClass;
1634 return NIL_RTLOCKVALCLASS;
1635 }
1636
1637 case RTLOCKVALRECNEST_MAGIC:
1638 {
1639 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1640 if (VALID_PTR(pRealRec))
1641 {
1642 switch (pRealRec->Core.u32Magic)
1643 {
1644 case RTLOCKVALRECEXCL_MAGIC:
1645 return pRealRec->Excl.hClass;
1646
1647 case RTLOCKVALRECSHRDOWN_MAGIC:
1648 {
1649 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1650 if (RT_LIKELY( VALID_PTR(pSharedRec)
1651 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1652 return pSharedRec->hClass;
1653 break;
1654 }
1655
1656 default:
1657 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1658 break;
1659 }
1660 }
1661 return NIL_RTLOCKVALCLASS;
1662 }
1663
1664 default:
1665 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1666 return NIL_RTLOCKVALCLASS;
1667 }
1668}
1669
1670
1671/**
1672 * Gets the class for this locking record and the pointer to the one below it in
1673 * the stack.
1674 *
1675 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1676 * @param pRec The lock validator record.
1677 * @param puSubClass Where to return the sub-class.
1678 * @param ppDown Where to return the pointer to the record below.
1679 */
1680DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1681rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1682{
1683 switch (pRec->Core.u32Magic)
1684 {
1685 case RTLOCKVALRECEXCL_MAGIC:
1686 *ppDown = pRec->Excl.pDown;
1687 *puSubClass = pRec->Excl.uSubClass;
1688 return pRec->Excl.hClass;
1689
1690 case RTLOCKVALRECSHRD_MAGIC:
1691 *ppDown = NULL;
1692 *puSubClass = pRec->Shared.uSubClass;
1693 return pRec->Shared.hClass;
1694
1695 case RTLOCKVALRECSHRDOWN_MAGIC:
1696 {
1697 *ppDown = pRec->ShrdOwner.pDown;
1698
1699 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1700 if (RT_LIKELY( VALID_PTR(pSharedRec)
1701 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1702 {
1703 *puSubClass = pSharedRec->uSubClass;
1704 return pSharedRec->hClass;
1705 }
1706 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1707 return NIL_RTLOCKVALCLASS;
1708 }
1709
1710 case RTLOCKVALRECNEST_MAGIC:
1711 {
1712 *ppDown = pRec->Nest.pDown;
1713
1714 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1715 if (VALID_PTR(pRealRec))
1716 {
1717 switch (pRealRec->Core.u32Magic)
1718 {
1719 case RTLOCKVALRECEXCL_MAGIC:
1720 *puSubClass = pRealRec->Excl.uSubClass;
1721 return pRealRec->Excl.hClass;
1722
1723 case RTLOCKVALRECSHRDOWN_MAGIC:
1724 {
1725 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1726 if (RT_LIKELY( VALID_PTR(pSharedRec)
1727 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1728 {
1729 *puSubClass = pSharedRec->uSubClass;
1730 return pSharedRec->hClass;
1731 }
1732 break;
1733 }
1734
1735 default:
1736 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1737 break;
1738 }
1739 }
1740 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1741 return NIL_RTLOCKVALCLASS;
1742 }
1743
1744 default:
1745 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1746 *ppDown = NULL;
1747 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1748 return NIL_RTLOCKVALCLASS;
1749 }
1750}
1751
1752
1753/**
1754 * Gets the sub-class for a lock record.
1755 *
1756 * @returns the sub-class.
1757 * @param pRec The lock validator record.
1758 */
1759DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1760{
1761 switch (pRec->Core.u32Magic)
1762 {
1763 case RTLOCKVALRECEXCL_MAGIC:
1764 return pRec->Excl.uSubClass;
1765
1766 case RTLOCKVALRECSHRD_MAGIC:
1767 return pRec->Shared.uSubClass;
1768
1769 case RTLOCKVALRECSHRDOWN_MAGIC:
1770 {
1771 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1772 if (RT_LIKELY( VALID_PTR(pSharedRec)
1773 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1774 return pSharedRec->uSubClass;
1775 return RTLOCKVAL_SUB_CLASS_NONE;
1776 }
1777
1778 case RTLOCKVALRECNEST_MAGIC:
1779 {
1780 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1781 if (VALID_PTR(pRealRec))
1782 {
1783 switch (pRealRec->Core.u32Magic)
1784 {
1785 case RTLOCKVALRECEXCL_MAGIC:
1786 return pRec->Excl.uSubClass;
1787
1788 case RTLOCKVALRECSHRDOWN_MAGIC:
1789 {
1790 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1791 if (RT_LIKELY( VALID_PTR(pSharedRec)
1792 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1793 return pSharedRec->uSubClass;
1794 break;
1795 }
1796
1797 default:
1798 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1799 break;
1800 }
1801 }
1802 return RTLOCKVAL_SUB_CLASS_NONE;
1803 }
1804
1805 default:
1806 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1807 return RTLOCKVAL_SUB_CLASS_NONE;
1808 }
1809}
1810
1811
1812
1813
1814/**
1815 * Calculates the depth of a lock stack.
1816 *
1817 * @returns Number of stack frames.
1818 * @param pThread The thread.
1819 */
1820static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1821{
1822 uint32_t cEntries = 0;
1823 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1824 while (VALID_PTR(pCur))
1825 {
1826 switch (pCur->Core.u32Magic)
1827 {
1828 case RTLOCKVALRECEXCL_MAGIC:
1829 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1830 break;
1831
1832 case RTLOCKVALRECSHRDOWN_MAGIC:
1833 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1834 break;
1835
1836 case RTLOCKVALRECNEST_MAGIC:
1837 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1838 break;
1839
1840 default:
1841 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1842 }
1843 cEntries++;
1844 }
1845 return cEntries;
1846}
1847
1848
1849#ifdef RT_STRICT
1850/**
1851 * Checks if the stack contains @a pRec.
1852 *
1853 * @returns true / false.
1854 * @param pThreadSelf The current thread.
1855 * @param pRec The lock record.
1856 */
1857static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1858{
1859 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1860 while (pCur)
1861 {
1862 AssertPtrReturn(pCur, false);
1863 if (pCur == pRec)
1864 return true;
1865 switch (pCur->Core.u32Magic)
1866 {
1867 case RTLOCKVALRECEXCL_MAGIC:
1868 Assert(pCur->Excl.cRecursion >= 1);
1869 pCur = pCur->Excl.pDown;
1870 break;
1871
1872 case RTLOCKVALRECSHRDOWN_MAGIC:
1873 Assert(pCur->ShrdOwner.cRecursion >= 1);
1874 pCur = pCur->ShrdOwner.pDown;
1875 break;
1876
1877 case RTLOCKVALRECNEST_MAGIC:
1878 Assert(pCur->Nest.cRecursion > 1);
1879 pCur = pCur->Nest.pDown;
1880 break;
1881
1882 default:
1883 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1884 }
1885 }
1886 return false;
1887}
1888#endif /* RT_STRICT */
1889
1890
1891/**
1892 * Pushes a lock record onto the stack.
1893 *
1894 * @param pThreadSelf The current thread.
1895 * @param pRec The lock record.
1896 */
1897static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1898{
1899 Assert(pThreadSelf == RTThreadSelf());
1900 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1901
1902 switch (pRec->Core.u32Magic)
1903 {
1904 case RTLOCKVALRECEXCL_MAGIC:
1905 Assert(pRec->Excl.cRecursion == 1);
1906 Assert(pRec->Excl.pDown == NULL);
1907 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1908 break;
1909
1910 case RTLOCKVALRECSHRDOWN_MAGIC:
1911 Assert(pRec->ShrdOwner.cRecursion == 1);
1912 Assert(pRec->ShrdOwner.pDown == NULL);
1913 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1914 break;
1915
1916 default:
1917 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1918 }
1919 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1920}
1921
1922
1923/**
1924 * Pops a lock record off the stack.
1925 *
1926 * @param pThreadSelf The current thread.
1927 * @param pRec The lock.
1928 */
1929static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1930{
1931 Assert(pThreadSelf == RTThreadSelf());
1932
1933 PRTLOCKVALRECUNION pDown;
1934 switch (pRec->Core.u32Magic)
1935 {
1936 case RTLOCKVALRECEXCL_MAGIC:
1937 Assert(pRec->Excl.cRecursion == 0);
1938 pDown = pRec->Excl.pDown;
1939 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1940 break;
1941
1942 case RTLOCKVALRECSHRDOWN_MAGIC:
1943 Assert(pRec->ShrdOwner.cRecursion == 0);
1944 pDown = pRec->ShrdOwner.pDown;
1945 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1946 break;
1947
1948 default:
1949 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1950 }
1951 if (pThreadSelf->LockValidator.pStackTop == pRec)
1952 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1953 else
1954 {
1955 /* Find the pointer to our record and unlink ourselves. */
1956 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1957 while (pCur)
1958 {
1959 PRTLOCKVALRECUNION volatile *ppDown;
1960 switch (pCur->Core.u32Magic)
1961 {
1962 case RTLOCKVALRECEXCL_MAGIC:
1963 Assert(pCur->Excl.cRecursion >= 1);
1964 ppDown = &pCur->Excl.pDown;
1965 break;
1966
1967 case RTLOCKVALRECSHRDOWN_MAGIC:
1968 Assert(pCur->ShrdOwner.cRecursion >= 1);
1969 ppDown = &pCur->ShrdOwner.pDown;
1970 break;
1971
1972 case RTLOCKVALRECNEST_MAGIC:
1973 Assert(pCur->Nest.cRecursion >= 1);
1974 ppDown = &pCur->Nest.pDown;
1975 break;
1976
1977 default:
1978 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1979 }
1980 pCur = *ppDown;
1981 if (pCur == pRec)
1982 {
1983 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1984 return;
1985 }
1986 }
1987 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
1988 }
1989}
1990
1991
1992/**
1993 * Creates and pushes lock recursion record onto the stack.
1994 *
1995 * @param pThreadSelf The current thread.
1996 * @param pRec The lock record.
1997 * @param pSrcPos Where the recursion occurred.
1998 */
1999static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2000{
2001 Assert(pThreadSelf == RTThreadSelf());
2002 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2003
2004#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2005 /*
2006 * Allocate a new recursion record
2007 */
2008 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2009 if (pRecursionRec)
2010 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2011 else
2012 {
2013 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2014 if (!pRecursionRec)
2015 return;
2016 }
2017
2018 /*
2019 * Initialize it.
2020 */
2021 switch (pRec->Core.u32Magic)
2022 {
2023 case RTLOCKVALRECEXCL_MAGIC:
2024 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2025 break;
2026
2027 case RTLOCKVALRECSHRDOWN_MAGIC:
2028 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2029 break;
2030
2031 default:
2032 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2033 rtLockValidatorSerializeDestructEnter();
2034 rtLockValidatorSerializeDestructLeave();
2035 RTMemFree(pRecursionRec);
2036 return;
2037 }
2038 Assert(pRecursionRec->cRecursion > 1);
2039 pRecursionRec->pRec = pRec;
2040 pRecursionRec->pDown = NULL;
2041 pRecursionRec->pNextFree = NULL;
2042 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2043 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2044
2045 /*
2046 * Link it.
2047 */
2048 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2049 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2050#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2051}
2052
2053
2054/**
2055 * Pops a lock recursion record off the stack.
2056 *
2057 * @param pThreadSelf The current thread.
2058 * @param pRec The lock record.
2059 */
2060static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2061{
2062 Assert(pThreadSelf == RTThreadSelf());
2063 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2064
2065 uint32_t cRecursion;
2066 switch (pRec->Core.u32Magic)
2067 {
2068 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2069 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2070 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2071 }
2072 Assert(cRecursion >= 1);
2073
2074#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2075 /*
2076 * Pop the recursion record.
2077 */
2078 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2079 if ( pNest != NULL
2080 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2081 && pNest->Nest.pRec == pRec
2082 )
2083 {
2084 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2085 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2086 }
2087 else
2088 {
2089 /* Find the record above ours. */
2090 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2091 for (;;)
2092 {
2093 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2094 switch (pNest->Core.u32Magic)
2095 {
2096 case RTLOCKVALRECEXCL_MAGIC:
2097 ppDown = &pNest->Excl.pDown;
2098 pNest = *ppDown;
2099 continue;
2100 case RTLOCKVALRECSHRDOWN_MAGIC:
2101 ppDown = &pNest->ShrdOwner.pDown;
2102 pNest = *ppDown;
2103 continue;
2104 case RTLOCKVALRECNEST_MAGIC:
2105 if (pNest->Nest.pRec == pRec)
2106 break;
2107 ppDown = &pNest->Nest.pDown;
2108 pNest = *ppDown;
2109 continue;
2110 default:
2111 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2112 }
2113 break; /* ugly */
2114 }
2115 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2116 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2117 }
2118
2119 /*
2120 * Invalidate and free the record.
2121 */
2122 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2123 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2124 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2125 pNest->Nest.cRecursion = 0;
2126 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2127 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2128#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2129}
2130
2131
2132/**
2133 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2134 * returns VERR_SEM_LV_WRONG_ORDER.
2135 */
2136static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2137 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2138 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2139
2140
2141{
2142 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2143 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2144 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2145 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2146 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2147 rtLockValComplainPanic();
2148 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2149}
2150
2151
2152/**
2153 * Checks if the sub-class order is ok or not.
2154 *
2155 * Used to deal with two locks from the same class.
2156 *
2157 * @returns true if ok, false if not.
2158 * @param uSubClass1 The sub-class of the lock that is being
2159 * considered.
2160 * @param uSubClass2 The sub-class of the lock that is already being
2161 * held.
2162 */
2163DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2164{
2165 if (uSubClass1 > uSubClass2)
2166 {
2167 /* NONE kills ANY. */
2168 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2169 return false;
2170 return true;
2171 }
2172
2173 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2174 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2175 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2176 return true;
2177 return false;
2178}
2179
2180
2181/**
2182 * Checks if the class and sub-class lock order is ok.
2183 *
2184 * @returns true if ok, false if not.
2185 * @param pClass1 The class of the lock that is being considered.
2186 * @param uSubClass1 The sub-class that goes with @a pClass1.
2187 * @param pClass2 The class of the lock that is already being
2188 * held.
2189 * @param uSubClass2 The sub-class that goes with @a pClass2.
2190 */
2191DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2192 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2193{
2194 if (pClass1 == pClass2)
2195 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2196 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2197}
2198
2199
2200/**
2201 * Checks the locking order, part two.
2202 *
2203 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2204 * @param pClass The lock class.
2205 * @param uSubClass The lock sub-class.
2206 * @param pThreadSelf The current thread.
2207 * @param pRec The lock record.
2208 * @param pSrcPos The source position of the locking operation.
2209 */
2210static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2211 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2212 PCRTLOCKVALSRCPOS const pSrcPos,
2213 RTLOCKVALCLASSINT * const pFirstBadClass,
2214 PRTLOCKVALRECUNION const pFirstBadRec,
2215 PRTLOCKVALRECUNION const pFirstBadDown)
2216{
2217 /*
2218 * Something went wrong, pCur is pointing to where.
2219 */
2220 if ( pClass == pFirstBadClass
2221 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2222 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2223 pRec, pFirstBadRec, pClass, pFirstBadClass);
2224 if (!pClass->fAutodidact)
2225 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2226 pRec, pFirstBadRec, pClass, pFirstBadClass);
2227
2228 /*
2229 * This class is an autodidact, so we have to check out the rest of the stack
2230 * for direct violations.
2231 */
2232 uint32_t cNewRules = 1;
2233 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2234 while (pCur)
2235 {
2236 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2237
2238 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2239 pCur = pCur->Nest.pDown;
2240 else
2241 {
2242 PRTLOCKVALRECUNION pDown;
2243 uint32_t uPriorSubClass;
2244 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2245 if (pPriorClass != NIL_RTLOCKVALCLASS)
2246 {
2247 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2248 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2249 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2250 {
2251 if ( pClass == pPriorClass
2252 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2253 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2254 pRec, pCur, pClass, pPriorClass);
2255 cNewRules++;
2256 }
2257 }
2258 pCur = pDown;
2259 }
2260 }
2261
2262 if (cNewRules == 1)
2263 {
2264 /*
2265 * Special case the simple operation, hoping that it will be a
2266 * frequent case.
2267 */
2268 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2269 if (rc == VERR_SEM_LV_WRONG_ORDER)
2270 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2271 pRec, pFirstBadRec, pClass, pFirstBadClass);
2272 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2273 }
2274 else
2275 {
2276 /*
2277 * We may be adding more than one rule, so we have to take the lock
2278 * before starting to add the rules. This means we have to check
2279 * the state after taking it since we might be racing someone adding
2280 * a conflicting rule.
2281 */
2282 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2283 rtLockValidatorLazyInit();
2284 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2285
2286 /* Check */
2287 pCur = pFirstBadRec;
2288 while (pCur)
2289 {
2290 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2291 pCur = pCur->Nest.pDown;
2292 else
2293 {
2294 uint32_t uPriorSubClass;
2295 PRTLOCKVALRECUNION pDown;
2296 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2297 if (pPriorClass != NIL_RTLOCKVALCLASS)
2298 {
2299 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2300 {
2301 if ( pClass == pPriorClass
2302 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2303 {
2304 if (RT_SUCCESS(rcLock))
2305 RTCritSectLeave(&g_LockValClassTeachCS);
2306 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2307 pRec, pCur, pClass, pPriorClass);
2308 }
2309 }
2310 }
2311 pCur = pDown;
2312 }
2313 }
2314
2315 /* Iterate the stack yet again, adding new rules this time. */
2316 pCur = pFirstBadRec;
2317 while (pCur)
2318 {
2319 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2320 pCur = pCur->Nest.pDown;
2321 else
2322 {
2323 uint32_t uPriorSubClass;
2324 PRTLOCKVALRECUNION pDown;
2325 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2326 if (pPriorClass != NIL_RTLOCKVALCLASS)
2327 {
2328 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2329 {
2330 Assert( pClass != pPriorClass
2331 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2332 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2333 if (RT_FAILURE(rc))
2334 {
2335 Assert(rc == VERR_NO_MEMORY);
2336 break;
2337 }
2338 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2339 }
2340 }
2341 pCur = pDown;
2342 }
2343 }
2344
2345 if (RT_SUCCESS(rcLock))
2346 RTCritSectLeave(&g_LockValClassTeachCS);
2347 }
2348
2349 return VINF_SUCCESS;
2350}
2351
2352
2353
2354/**
2355 * Checks the locking order.
2356 *
2357 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2358 * @param pClass The lock class.
2359 * @param uSubClass The lock sub-class.
2360 * @param pThreadSelf The current thread.
2361 * @param pRec The lock record.
2362 * @param pSrcPos The source position of the locking operation.
2363 */
2364static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2365 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2366 PCRTLOCKVALSRCPOS pSrcPos)
2367{
2368 /*
2369 * Some internal paranoia first.
2370 */
2371 AssertPtr(pClass);
2372 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2373 AssertPtr(pThreadSelf);
2374 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2375 AssertPtr(pRec);
2376 AssertPtrNull(pSrcPos);
2377
2378 /*
2379 * Walk the stack, delegate problems to a worker routine.
2380 */
2381 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2382 if (!pCur)
2383 return VINF_SUCCESS;
2384
2385 for (;;)
2386 {
2387 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2388
2389 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2390 pCur = pCur->Nest.pDown;
2391 else
2392 {
2393 uint32_t uPriorSubClass;
2394 PRTLOCKVALRECUNION pDown;
2395 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2396 if (pPriorClass != NIL_RTLOCKVALCLASS)
2397 {
2398 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2399 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2400 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2401 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2402 pPriorClass, pCur, pDown);
2403 }
2404 pCur = pDown;
2405 }
2406 if (!pCur)
2407 return VINF_SUCCESS;
2408 }
2409}
2410
2411
2412/**
2413 * Check that the lock record is the topmost one on the stack, complain and fail
2414 * if it isn't.
2415 *
2416 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2417 * VERR_SEM_LV_INVALID_PARAMETER.
2418 * @param pThreadSelf The current thread.
2419 * @param pRec The record.
2420 */
2421static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2422{
2423 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2424 Assert(pThreadSelf == RTThreadSelf());
2425
2426 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2427 if (RT_LIKELY( pTop == pRec
2428 || ( pTop
2429 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2430 && pTop->Nest.pRec == pRec) ))
2431 return VINF_SUCCESS;
2432
2433#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2434 /* Look for a recursion record so the right frame is dumped and marked. */
2435 while (pTop)
2436 {
2437 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2438 {
2439 if (pTop->Nest.pRec == pRec)
2440 {
2441 pRec = pTop;
2442 break;
2443 }
2444 pTop = pTop->Nest.pDown;
2445 }
2446 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2447 pTop = pTop->Excl.pDown;
2448 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2449 pTop = pTop->ShrdOwner.pDown;
2450 else
2451 break;
2452 }
2453#endif
2454
2455 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2456 rtLockValComplainPanic();
2457 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2458}
2459
2460
2461/**
2462 * Checks if all owners are blocked - shared record operated in signaller mode.
2463 *
2464 * @returns true / false accordingly.
2465 * @param pRec The record.
2466 * @param pThreadSelf The current thread.
2467 */
2468DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2469{
2470 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2471 uint32_t cAllocated = pRec->cAllocated;
2472 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2473 if (cEntries == 0)
2474 return false;
2475
2476 for (uint32_t i = 0; i < cAllocated; i++)
2477 {
2478 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2479 if ( pEntry
2480 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2481 {
2482 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2483 if (!pCurThread)
2484 return false;
2485 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2486 return false;
2487 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2488 && pCurThread != pThreadSelf)
2489 return false;
2490 if (--cEntries == 0)
2491 break;
2492 }
2493 else
2494 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2495 }
2496
2497 return true;
2498}
2499
2500
2501/**
2502 * Verifies the deadlock stack before calling it a deadlock.
2503 *
2504 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2505 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2506 * @retval VERR_TRY_AGAIN if something changed.
2507 *
2508 * @param pStack The deadlock detection stack.
2509 * @param pThreadSelf The current thread.
2510 */
2511static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2512{
2513 uint32_t const c = pStack->c;
2514 for (uint32_t iPass = 0; iPass < 3; iPass++)
2515 {
2516 for (uint32_t i = 1; i < c; i++)
2517 {
2518 PRTTHREADINT pThread = pStack->a[i].pThread;
2519 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2520 return VERR_TRY_AGAIN;
2521 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2522 return VERR_TRY_AGAIN;
2523 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2524 return VERR_TRY_AGAIN;
2525 /* ASSUMES the signaller records won't have siblings! */
2526 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2527 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2528 && pRec->Shared.fSignaller
2529 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2530 return VERR_TRY_AGAIN;
2531 }
2532 RTThreadYield();
2533 }
2534
2535 if (c == 1)
2536 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2537 return VERR_SEM_LV_DEADLOCK;
2538}
2539
2540
2541/**
2542 * Checks for stack cycles caused by another deadlock before returning.
2543 *
2544 * @retval VINF_SUCCESS if the stack is simply too small.
2545 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2546 *
2547 * @param pStack The deadlock detection stack.
2548 */
2549static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2550{
2551 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2552 {
2553 PRTTHREADINT pThread = pStack->a[i].pThread;
2554 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2555 if (pStack->a[j].pThread == pThread)
2556 return VERR_SEM_LV_EXISTING_DEADLOCK;
2557 }
2558 static bool volatile s_fComplained = false;
2559 if (!s_fComplained)
2560 {
2561 s_fComplained = true;
2562 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2563 }
2564 return VINF_SUCCESS;
2565}
2566
2567
2568/**
2569 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2570 * detection.
2571 *
2572 * @retval VINF_SUCCESS
2573 * @retval VERR_SEM_LV_DEADLOCK
2574 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2575 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2576 * @retval VERR_TRY_AGAIN
2577 *
2578 * @param pStack The stack to use.
2579 * @param pOriginalRec The original record.
2580 * @param pThreadSelf The calling thread.
2581 */
2582static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2583 PRTTHREADINT const pThreadSelf)
2584{
2585 pStack->c = 0;
2586
2587 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2588 compiler may make a better job of it when using individual variables. */
2589 PRTLOCKVALRECUNION pRec = pOriginalRec;
2590 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2591 uint32_t iEntry = UINT32_MAX;
2592 PRTTHREADINT pThread = NIL_RTTHREAD;
2593 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2594 for (uint32_t iLoop = 0; ; iLoop++)
2595 {
2596 /*
2597 * Process the current record.
2598 */
2599 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2600
2601 /* Find the next relevant owner thread and record. */
2602 PRTLOCKVALRECUNION pNextRec = NULL;
2603 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2604 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2605 switch (pRec->Core.u32Magic)
2606 {
2607 case RTLOCKVALRECEXCL_MAGIC:
2608 Assert(iEntry == UINT32_MAX);
2609 for (;;)
2610 {
2611 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2612 if ( !pNextThread
2613 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2614 break;
2615 enmNextState = rtThreadGetState(pNextThread);
2616 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2617 && pNextThread != pThreadSelf)
2618 break;
2619 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2620 if (RT_LIKELY( !pNextRec
2621 || enmNextState == rtThreadGetState(pNextThread)))
2622 break;
2623 pNextRec = NULL;
2624 }
2625 if (!pNextRec)
2626 {
2627 pRec = pRec->Excl.pSibling;
2628 if ( pRec
2629 && pRec != pFirstSibling)
2630 continue;
2631 pNextThread = NIL_RTTHREAD;
2632 }
2633 break;
2634
2635 case RTLOCKVALRECSHRD_MAGIC:
2636 if (!pRec->Shared.fSignaller)
2637 {
2638 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2639 /** @todo The read side of a read-write lock is problematic if
2640 * the implementation prioritizes writers over readers because
2641 * that means we should could deadlock against current readers
2642 * if a writer showed up. If the RW sem implementation is
2643 * wrapping some native API, it's not so easy to detect when we
2644 * should do this and when we shouldn't. Checking when we
2645 * shouldn't is subject to wakeup scheduling and cannot easily
2646 * be made reliable.
2647 *
2648 * At the moment we circumvent all this mess by declaring that
2649 * readers has priority. This is TRUE on linux, but probably
2650 * isn't on Solaris and FreeBSD. */
2651 if ( pRec == pFirstSibling
2652 && pRec->Shared.pSibling != NULL
2653 && pRec->Shared.pSibling != pFirstSibling)
2654 {
2655 pRec = pRec->Shared.pSibling;
2656 Assert(iEntry == UINT32_MAX);
2657 continue;
2658 }
2659 }
2660
2661 /* Scan the owner table for blocked owners. */
2662 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2663 && ( !pRec->Shared.fSignaller
2664 || iEntry != UINT32_MAX
2665 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2666 )
2667 )
2668 {
2669 uint32_t cAllocated = pRec->Shared.cAllocated;
2670 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2671 while (++iEntry < cAllocated)
2672 {
2673 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2674 if (pEntry)
2675 {
2676 for (;;)
2677 {
2678 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2679 break;
2680 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2681 if ( !pNextThread
2682 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2683 break;
2684 enmNextState = rtThreadGetState(pNextThread);
2685 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2686 && pNextThread != pThreadSelf)
2687 break;
2688 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2689 if (RT_LIKELY( !pNextRec
2690 || enmNextState == rtThreadGetState(pNextThread)))
2691 break;
2692 pNextRec = NULL;
2693 }
2694 if (pNextRec)
2695 break;
2696 }
2697 else
2698 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2699 }
2700 if (pNextRec)
2701 break;
2702 pNextThread = NIL_RTTHREAD;
2703 }
2704
2705 /* Advance to the next sibling, if any. */
2706 pRec = pRec->Shared.pSibling;
2707 if ( pRec != NULL
2708 && pRec != pFirstSibling)
2709 {
2710 iEntry = UINT32_MAX;
2711 continue;
2712 }
2713 break;
2714
2715 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2716 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2717 break;
2718
2719 case RTLOCKVALRECSHRDOWN_MAGIC:
2720 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2721 default:
2722 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2723 break;
2724 }
2725
2726 if (pNextRec)
2727 {
2728 /*
2729 * Recurse and check for deadlock.
2730 */
2731 uint32_t i = pStack->c;
2732 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2733 return rtLockValidatorDdHandleStackOverflow(pStack);
2734
2735 pStack->c++;
2736 pStack->a[i].pRec = pRec;
2737 pStack->a[i].iEntry = iEntry;
2738 pStack->a[i].enmState = enmState;
2739 pStack->a[i].pThread = pThread;
2740 pStack->a[i].pFirstSibling = pFirstSibling;
2741
2742 if (RT_UNLIKELY( pNextThread == pThreadSelf
2743 && ( i != 0
2744 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2745 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2746 )
2747 )
2748 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2749
2750 pRec = pNextRec;
2751 pFirstSibling = pNextRec;
2752 iEntry = UINT32_MAX;
2753 enmState = enmNextState;
2754 pThread = pNextThread;
2755 }
2756 else
2757 {
2758 /*
2759 * No deadlock here, unwind the stack and deal with any unfinished
2760 * business there.
2761 */
2762 uint32_t i = pStack->c;
2763 for (;;)
2764 {
2765 /* pop */
2766 if (i == 0)
2767 return VINF_SUCCESS;
2768 i--;
2769 pRec = pStack->a[i].pRec;
2770 iEntry = pStack->a[i].iEntry;
2771
2772 /* Examine it. */
2773 uint32_t u32Magic = pRec->Core.u32Magic;
2774 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2775 pRec = pRec->Excl.pSibling;
2776 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2777 {
2778 if (iEntry + 1 < pRec->Shared.cAllocated)
2779 break; /* continue processing this record. */
2780 pRec = pRec->Shared.pSibling;
2781 }
2782 else
2783 {
2784 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2785 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2786 continue;
2787 }
2788
2789 /* Any next record to advance to? */
2790 if ( !pRec
2791 || pRec == pStack->a[i].pFirstSibling)
2792 continue;
2793 iEntry = UINT32_MAX;
2794 break;
2795 }
2796
2797 /* Restore the rest of the state and update the stack. */
2798 pFirstSibling = pStack->a[i].pFirstSibling;
2799 enmState = pStack->a[i].enmState;
2800 pThread = pStack->a[i].pThread;
2801 pStack->c = i;
2802 }
2803
2804 Assert(iLoop != 1000000);
2805 }
2806}
2807
2808
2809/**
2810 * Check for the simple no-deadlock case.
2811 *
2812 * @returns true if no deadlock, false if further investigation is required.
2813 *
2814 * @param pOriginalRec The original record.
2815 */
2816DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2817{
2818 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2819 && !pOriginalRec->Excl.pSibling)
2820 {
2821 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2822 if ( !pThread
2823 || pThread->u32Magic != RTTHREADINT_MAGIC)
2824 return true;
2825 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2826 if (!RTTHREAD_IS_SLEEPING(enmState))
2827 return true;
2828 }
2829 return false;
2830}
2831
2832
2833/**
2834 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2835 *
2836 * @param pStack The chain of locks causing the deadlock.
2837 * @param pRec The record relating to the current thread's lock
2838 * operation.
2839 * @param pThreadSelf This thread.
2840 * @param pSrcPos Where we are going to deadlock.
2841 * @param rc The return code.
2842 */
2843static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2844 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2845{
2846 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2847 {
2848 const char *pszWhat;
2849 switch (rc)
2850 {
2851 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2852 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2853 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2854 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2855 }
2856 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2857 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2858 for (uint32_t i = 0; i < pStack->c; i++)
2859 {
2860 char szPrefix[24];
2861 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2862 PRTLOCKVALRECUNION pShrdOwner = NULL;
2863 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2864 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2865 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2866 {
2867 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2868 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2869 }
2870 else
2871 {
2872 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2873 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2874 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2875 }
2876 }
2877 rtLockValComplainMore("---- end of deadlock chain ----\n");
2878 }
2879
2880 rtLockValComplainPanic();
2881}
2882
2883
2884/**
2885 * Perform deadlock detection.
2886 *
2887 * @retval VINF_SUCCESS
2888 * @retval VERR_SEM_LV_DEADLOCK
2889 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2890 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2891 *
2892 * @param pRec The record relating to the current thread's lock
2893 * operation.
2894 * @param pThreadSelf The current thread.
2895 * @param pSrcPos The position of the current lock operation.
2896 */
2897static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2898{
2899 RTLOCKVALDDSTACK Stack;
2900 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2901 if (RT_SUCCESS(rc))
2902 return VINF_SUCCESS;
2903
2904 if (rc == VERR_TRY_AGAIN)
2905 {
2906 for (uint32_t iLoop = 0; ; iLoop++)
2907 {
2908 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2909 if (RT_SUCCESS_NP(rc))
2910 return VINF_SUCCESS;
2911 if (rc != VERR_TRY_AGAIN)
2912 break;
2913 RTThreadYield();
2914 if (iLoop >= 3)
2915 return VINF_SUCCESS;
2916 }
2917 }
2918
2919 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2920 return rc;
2921}
2922
2923
2924RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2925 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2926{
2927 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2928 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2929 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2930 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2931 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2932
2933 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2934 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2935 pRec->afReserved[0] = 0;
2936 pRec->afReserved[1] = 0;
2937 pRec->afReserved[2] = 0;
2938 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2939 pRec->hThread = NIL_RTTHREAD;
2940 pRec->pDown = NULL;
2941 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2942 pRec->uSubClass = uSubClass;
2943 pRec->cRecursion = 0;
2944 pRec->hLock = hLock;
2945 pRec->pSibling = NULL;
2946 if (pszNameFmt)
2947 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2948 else
2949 {
2950 static uint32_t volatile s_cAnonymous = 0;
2951 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2952 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2953 }
2954
2955 /* Lazy initialization. */
2956 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2957 rtLockValidatorLazyInit();
2958}
2959
2960
2961RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2962 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2963{
2964 va_list va;
2965 va_start(va, pszNameFmt);
2966 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2967 va_end(va);
2968}
2969
2970
2971RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2972 uint32_t uSubClass, void *pvLock, bool fEnabled,
2973 const char *pszNameFmt, va_list va)
2974{
2975 PRTLOCKVALRECEXCL pRec;
2976 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2977 if (!pRec)
2978 return VERR_NO_MEMORY;
2979 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2980 return VINF_SUCCESS;
2981}
2982
2983
2984RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2985 uint32_t uSubClass, void *pvLock, bool fEnabled,
2986 const char *pszNameFmt, ...)
2987{
2988 va_list va;
2989 va_start(va, pszNameFmt);
2990 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2991 va_end(va);
2992 return rc;
2993}
2994
2995
2996RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
2997{
2998 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
2999
3000 rtLockValidatorSerializeDestructEnter();
3001
3002 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3003 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3004 RTLOCKVALCLASS hClass;
3005 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3006 if (pRec->pSibling)
3007 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3008 rtLockValidatorSerializeDestructLeave();
3009 if (hClass != NIL_RTLOCKVALCLASS)
3010 RTLockValidatorClassRelease(hClass);
3011}
3012
3013
3014RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3015{
3016 PRTLOCKVALRECEXCL pRec = *ppRec;
3017 *ppRec = NULL;
3018 if (pRec)
3019 {
3020 RTLockValidatorRecExclDelete(pRec);
3021 RTMemFree(pRec);
3022 }
3023}
3024
3025
3026RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3027{
3028 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3029 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3030 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3031 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3032 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3033 RTLOCKVAL_SUB_CLASS_INVALID);
3034 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3035}
3036
3037
3038RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3039 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3040{
3041 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3042 if (!pRecU)
3043 return;
3044 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3045 if (!pRecU->Excl.fEnabled)
3046 return;
3047 if (hThreadSelf == NIL_RTTHREAD)
3048 {
3049 hThreadSelf = RTThreadSelfAutoAdopt();
3050 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3051 }
3052 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3053 Assert(hThreadSelf == RTThreadSelf());
3054
3055 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3056
3057 if (pRecU->Excl.hThread == hThreadSelf)
3058 {
3059 Assert(!fFirstRecursion);
3060 pRecU->Excl.cRecursion++;
3061 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3062 }
3063 else
3064 {
3065 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3066
3067 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3068 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3069 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3070
3071 rtLockValidatorStackPush(hThreadSelf, pRecU);
3072 }
3073}
3074
3075
3076/**
3077 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3078 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3079 */
3080static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3081{
3082 RTTHREADINT *pThread = pRec->Excl.hThread;
3083 AssertReturnVoid(pThread != NIL_RTTHREAD);
3084 Assert(pThread == RTThreadSelf());
3085
3086 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3087 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3088 if (c == 0)
3089 {
3090 rtLockValidatorStackPop(pThread, pRec);
3091 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3092 }
3093 else
3094 {
3095 Assert(c < UINT32_C(0xffff0000));
3096 Assert(!fFinalRecursion);
3097 rtLockValidatorStackPopRecursion(pThread, pRec);
3098 }
3099}
3100
3101RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3102{
3103 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3104 if (!pRecU)
3105 return VINF_SUCCESS;
3106 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3107 if (!pRecU->Excl.fEnabled)
3108 return VINF_SUCCESS;
3109
3110 /*
3111 * Check the release order.
3112 */
3113 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3114 && pRecU->Excl.hClass->fStrictReleaseOrder
3115 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3116 )
3117 {
3118 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3119 if (RT_FAILURE(rc))
3120 return rc;
3121 }
3122
3123 /*
3124 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3125 */
3126 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3127 return VINF_SUCCESS;
3128}
3129
3130
3131RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3132{
3133 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3134 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3135 if (pRecU->Excl.fEnabled)
3136 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3137}
3138
3139
3140RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3141{
3142 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3143 if (!pRecU)
3144 return VINF_SUCCESS;
3145 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3146 if (!pRecU->Excl.fEnabled)
3147 return VINF_SUCCESS;
3148 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3149 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3150
3151 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3152 && !pRecU->Excl.hClass->fRecursionOk)
3153 {
3154 rtLockValComplainFirst("Recursion not allowed by the class!",
3155 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3156 rtLockValComplainPanic();
3157 return VERR_SEM_LV_NESTED;
3158 }
3159
3160 Assert(pRecU->Excl.cRecursion < _1M);
3161 pRecU->Excl.cRecursion++;
3162 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3163 return VINF_SUCCESS;
3164}
3165
3166
3167RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3168{
3169 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3170 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3171 if (!pRecU->Excl.fEnabled)
3172 return VINF_SUCCESS;
3173 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3174 Assert(pRecU->Excl.hThread == RTThreadSelf());
3175 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3176
3177 /*
3178 * Check the release order.
3179 */
3180 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3181 && pRecU->Excl.hClass->fStrictReleaseOrder
3182 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3183 )
3184 {
3185 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3186 if (RT_FAILURE(rc))
3187 return rc;
3188 }
3189
3190 /*
3191 * Perform the unwind.
3192 */
3193 pRecU->Excl.cRecursion--;
3194 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3195 return VINF_SUCCESS;
3196}
3197
3198
3199RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3200{
3201 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3202 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3203 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3204 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3205 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3206 , VERR_SEM_LV_INVALID_PARAMETER);
3207 if (!pRecU->Excl.fEnabled)
3208 return VINF_SUCCESS;
3209 Assert(pRecU->Excl.hThread == RTThreadSelf());
3210 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3211 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3212
3213 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3214 && !pRecU->Excl.hClass->fRecursionOk)
3215 {
3216 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3217 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3218 rtLockValComplainPanic();
3219 return VERR_SEM_LV_NESTED;
3220 }
3221
3222 Assert(pRecU->Excl.cRecursion < _1M);
3223 pRecU->Excl.cRecursion++;
3224 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3225
3226 return VINF_SUCCESS;
3227}
3228
3229
3230RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3231{
3232 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3233 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3234 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3235 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3236 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3237 , VERR_SEM_LV_INVALID_PARAMETER);
3238 if (!pRecU->Excl.fEnabled)
3239 return VINF_SUCCESS;
3240 Assert(pRecU->Excl.hThread == RTThreadSelf());
3241 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3242 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3243
3244 /*
3245 * Check the release order.
3246 */
3247 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3248 && pRecU->Excl.hClass->fStrictReleaseOrder
3249 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3250 )
3251 {
3252 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3253 if (RT_FAILURE(rc))
3254 return rc;
3255 }
3256
3257 /*
3258 * Perform the unwind.
3259 */
3260 pRecU->Excl.cRecursion--;
3261 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3262 return VINF_SUCCESS;
3263}
3264
3265
3266RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3267 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3268{
3269 /*
3270 * Validate and adjust input. Quit early if order validation is disabled.
3271 */
3272 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3273 if (!pRecU)
3274 return VINF_SUCCESS;
3275 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3276 if ( !pRecU->Excl.fEnabled
3277 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3278 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3279 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3280 return VINF_SUCCESS;
3281
3282 if (hThreadSelf == NIL_RTTHREAD)
3283 {
3284 hThreadSelf = RTThreadSelfAutoAdopt();
3285 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3286 }
3287 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3288 Assert(hThreadSelf == RTThreadSelf());
3289
3290 /*
3291 * Detect recursion as it isn't subject to order restrictions.
3292 */
3293 if (pRec->hThread == hThreadSelf)
3294 return VINF_SUCCESS;
3295
3296 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3297}
3298
3299
3300RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3301 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3302 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3303{
3304 /*
3305 * Fend off wild life.
3306 */
3307 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3308 if (!pRecU)
3309 return VINF_SUCCESS;
3310 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3311 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3312 if (!pRec->fEnabled)
3313 return VINF_SUCCESS;
3314
3315 PRTTHREADINT pThreadSelf = hThreadSelf;
3316 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3317 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3318 Assert(pThreadSelf == RTThreadSelf());
3319
3320 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3321
3322 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3323 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3324 {
3325 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3326 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3327 , VERR_SEM_LV_INVALID_PARAMETER);
3328 enmSleepState = enmThreadState;
3329 }
3330
3331 /*
3332 * Record the location.
3333 */
3334 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3335 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3336 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3337 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3338 rtThreadSetState(pThreadSelf, enmSleepState);
3339
3340 /*
3341 * Don't do deadlock detection if we're recursing.
3342 *
3343 * On some hosts we don't do recursion accounting our selves and there
3344 * isn't any other place to check for this.
3345 */
3346 int rc = VINF_SUCCESS;
3347 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3348 {
3349 if ( !fRecursiveOk
3350 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3351 && !pRecU->Excl.hClass->fRecursionOk))
3352 {
3353 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3354 rtLockValComplainPanic();
3355 rc = VERR_SEM_LV_NESTED;
3356 }
3357 }
3358 /*
3359 * Perform deadlock detection.
3360 */
3361 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3362 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3363 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3364 rc = VINF_SUCCESS;
3365 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3366 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3367
3368 if (RT_SUCCESS(rc))
3369 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3370 else
3371 {
3372 rtThreadSetState(pThreadSelf, enmThreadState);
3373 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3374 }
3375 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3376 return rc;
3377}
3378RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3379
3380
3381RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3382 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3383 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3384{
3385 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3386 if (RT_SUCCESS(rc))
3387 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3388 enmSleepState, fReallySleeping);
3389 return rc;
3390}
3391RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3392
3393
3394RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3395 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3396{
3397 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3398 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3399 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3400 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3401 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3402
3403 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3404 pRec->uSubClass = uSubClass;
3405 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3406 pRec->hLock = hLock;
3407 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3408 pRec->fSignaller = fSignaller;
3409 pRec->pSibling = NULL;
3410
3411 /* the table */
3412 pRec->cEntries = 0;
3413 pRec->iLastEntry = 0;
3414 pRec->cAllocated = 0;
3415 pRec->fReallocating = false;
3416 pRec->fPadding = false;
3417 pRec->papOwners = NULL;
3418
3419 /* the name */
3420 if (pszNameFmt)
3421 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3422 else
3423 {
3424 static uint32_t volatile s_cAnonymous = 0;
3425 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3426 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3427 }
3428}
3429
3430
3431RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3432 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3433{
3434 va_list va;
3435 va_start(va, pszNameFmt);
3436 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3437 va_end(va);
3438}
3439
3440
3441RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3442{
3443 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3444
3445 /*
3446 * Flip it into table realloc mode and take the destruction lock.
3447 */
3448 rtLockValidatorSerializeDestructEnter();
3449 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3450 {
3451 rtLockValidatorSerializeDestructLeave();
3452
3453 rtLockValidatorSerializeDetectionEnter();
3454 rtLockValidatorSerializeDetectionLeave();
3455
3456 rtLockValidatorSerializeDestructEnter();
3457 }
3458
3459 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3460 RTLOCKVALCLASS hClass;
3461 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3462 if (pRec->papOwners)
3463 {
3464 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3465 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3466 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3467
3468 RTMemFree((void *)papOwners);
3469 }
3470 if (pRec->pSibling)
3471 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3472 ASMAtomicWriteBool(&pRec->fReallocating, false);
3473
3474 rtLockValidatorSerializeDestructLeave();
3475
3476 if (hClass != NIL_RTLOCKVALCLASS)
3477 RTLockValidatorClassRelease(hClass);
3478}
3479
3480
3481RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3482{
3483 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3484 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3485 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3486 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3487 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3488 RTLOCKVAL_SUB_CLASS_INVALID);
3489 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3490}
3491
3492
3493/**
3494 * Locates an owner (thread) in a shared lock record.
3495 *
3496 * @returns Pointer to the owner entry on success, NULL on failure..
3497 * @param pShared The shared lock record.
3498 * @param hThread The thread (owner) to find.
3499 * @param piEntry Where to optionally return the table in index.
3500 * Optional.
3501 */
3502DECLINLINE(PRTLOCKVALRECUNION)
3503rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3504{
3505 rtLockValidatorSerializeDetectionEnter();
3506
3507 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3508 if (papOwners)
3509 {
3510 uint32_t const cMax = pShared->cAllocated;
3511 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3512 {
3513 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3514 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3515 {
3516 rtLockValidatorSerializeDetectionLeave();
3517 if (piEntry)
3518 *piEntry = iEntry;
3519 return pEntry;
3520 }
3521 }
3522 }
3523
3524 rtLockValidatorSerializeDetectionLeave();
3525 return NULL;
3526}
3527
3528
3529RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3530 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3531{
3532 /*
3533 * Validate and adjust input. Quit early if order validation is disabled.
3534 */
3535 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3536 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3537 if ( !pRecU->Shared.fEnabled
3538 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3539 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3540 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3541 )
3542 return VINF_SUCCESS;
3543
3544 if (hThreadSelf == NIL_RTTHREAD)
3545 {
3546 hThreadSelf = RTThreadSelfAutoAdopt();
3547 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3548 }
3549 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3550 Assert(hThreadSelf == RTThreadSelf());
3551
3552 /*
3553 * Detect recursion as it isn't subject to order restrictions.
3554 */
3555 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3556 if (pEntry)
3557 return VINF_SUCCESS;
3558
3559 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3560}
3561
3562
3563RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3564 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3565 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3566{
3567 /*
3568 * Fend off wild life.
3569 */
3570 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3571 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3572 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3573 if (!pRecU->Shared.fEnabled)
3574 return VINF_SUCCESS;
3575
3576 PRTTHREADINT pThreadSelf = hThreadSelf;
3577 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3578 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3579 Assert(pThreadSelf == RTThreadSelf());
3580
3581 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3582
3583 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3584 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3585 {
3586 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3587 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3588 , VERR_SEM_LV_INVALID_PARAMETER);
3589 enmSleepState = enmThreadState;
3590 }
3591
3592 /*
3593 * Record the location.
3594 */
3595 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3596 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3597 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3598 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3599 rtThreadSetState(pThreadSelf, enmSleepState);
3600
3601 /*
3602 * Don't do deadlock detection if we're recursing.
3603 */
3604 int rc = VINF_SUCCESS;
3605 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3606 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3607 : NULL;
3608 if (pEntry)
3609 {
3610 if ( !fRecursiveOk
3611 || ( pRec->hClass
3612 && !pRec->hClass->fRecursionOk)
3613 )
3614 {
3615 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3616 rtLockValComplainPanic();
3617 rc = VERR_SEM_LV_NESTED;
3618 }
3619 }
3620 /*
3621 * Perform deadlock detection.
3622 */
3623 else if ( pRec->hClass
3624 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3625 || pRec->hClass->cMsMinDeadlock > cMillies))
3626 rc = VINF_SUCCESS;
3627 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3628 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3629
3630 if (RT_SUCCESS(rc))
3631 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3632 else
3633 {
3634 rtThreadSetState(pThreadSelf, enmThreadState);
3635 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3636 }
3637 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3638 return rc;
3639}
3640RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3641
3642
3643RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3644 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3645 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3646{
3647 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3648 if (RT_SUCCESS(rc))
3649 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3650 enmSleepState, fReallySleeping);
3651 return rc;
3652}
3653RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3654
3655
3656/**
3657 * Allocates and initializes an owner entry for the shared lock record.
3658 *
3659 * @returns The new owner entry.
3660 * @param pRec The shared lock record.
3661 * @param pThreadSelf The calling thread and owner. Used for record
3662 * initialization and allocation.
3663 * @param pSrcPos The source position.
3664 */
3665DECLINLINE(PRTLOCKVALRECUNION)
3666rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3667{
3668 PRTLOCKVALRECUNION pEntry;
3669
3670 /*
3671 * Check if the thread has any statically allocated records we can easily
3672 * make use of.
3673 */
3674 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3675 if ( iEntry > 0
3676 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3677 {
3678 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3679 Assert(!pEntry->ShrdOwner.fReserved);
3680 pEntry->ShrdOwner.fStaticAlloc = true;
3681 rtThreadGet(pThreadSelf);
3682 }
3683 else
3684 {
3685 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3686 if (RT_UNLIKELY(!pEntry))
3687 return NULL;
3688 pEntry->ShrdOwner.fStaticAlloc = false;
3689 }
3690
3691 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3692 pEntry->ShrdOwner.cRecursion = 1;
3693 pEntry->ShrdOwner.fReserved = true;
3694 pEntry->ShrdOwner.hThread = pThreadSelf;
3695 pEntry->ShrdOwner.pDown = NULL;
3696 pEntry->ShrdOwner.pSharedRec = pRec;
3697#if HC_ARCH_BITS == 32
3698 pEntry->ShrdOwner.pvReserved = NULL;
3699#endif
3700 if (pSrcPos)
3701 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3702 else
3703 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3704 return pEntry;
3705}
3706
3707
3708/**
3709 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3710 *
3711 * @param pEntry The owner entry.
3712 */
3713DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3714{
3715 if (pEntry)
3716 {
3717 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3718 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3719
3720 PRTTHREADINT pThread;
3721 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3722
3723 Assert(pEntry->fReserved);
3724 pEntry->fReserved = false;
3725
3726 if (pEntry->fStaticAlloc)
3727 {
3728 AssertPtrReturnVoid(pThread);
3729 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3730
3731 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3732 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3733
3734 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3735 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3736
3737 rtThreadRelease(pThread);
3738 }
3739 else
3740 {
3741 rtLockValidatorSerializeDestructEnter();
3742 rtLockValidatorSerializeDestructLeave();
3743
3744 RTMemFree(pEntry);
3745 }
3746 }
3747}
3748
3749
3750/**
3751 * Make more room in the table.
3752 *
3753 * @retval true on success
3754 * @retval false if we're out of memory or running into a bad race condition
3755 * (probably a bug somewhere). No longer holding the lock.
3756 *
3757 * @param pShared The shared lock record.
3758 */
3759static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3760{
3761 for (unsigned i = 0; i < 1000; i++)
3762 {
3763 /*
3764 * Switch to the other data access direction.
3765 */
3766 rtLockValidatorSerializeDetectionLeave();
3767 if (i >= 10)
3768 {
3769 Assert(i != 10 && i != 100);
3770 RTThreadSleep(i >= 100);
3771 }
3772 rtLockValidatorSerializeDestructEnter();
3773
3774 /*
3775 * Try grab the privilege to reallocating the table.
3776 */
3777 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3778 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3779 {
3780 uint32_t cAllocated = pShared->cAllocated;
3781 if (cAllocated < pShared->cEntries)
3782 {
3783 /*
3784 * Ok, still not enough space. Reallocate the table.
3785 */
3786#if 0 /** @todo enable this after making sure growing works flawlessly. */
3787 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3788#else
3789 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3790#endif
3791 PRTLOCKVALRECSHRDOWN *papOwners;
3792 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3793 (cAllocated + cInc) * sizeof(void *));
3794 if (!papOwners)
3795 {
3796 ASMAtomicWriteBool(&pShared->fReallocating, false);
3797 rtLockValidatorSerializeDestructLeave();
3798 /* RTMemRealloc will assert */
3799 return false;
3800 }
3801
3802 while (cInc-- > 0)
3803 {
3804 papOwners[cAllocated] = NULL;
3805 cAllocated++;
3806 }
3807
3808 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3809 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3810 }
3811 ASMAtomicWriteBool(&pShared->fReallocating, false);
3812 }
3813 rtLockValidatorSerializeDestructLeave();
3814
3815 rtLockValidatorSerializeDetectionEnter();
3816 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3817 break;
3818
3819 if (pShared->cAllocated >= pShared->cEntries)
3820 return true;
3821 }
3822
3823 rtLockValidatorSerializeDetectionLeave();
3824 AssertFailed(); /* too many iterations or destroyed while racing. */
3825 return false;
3826}
3827
3828
3829/**
3830 * Adds an owner entry to a shared lock record.
3831 *
3832 * @returns true on success, false on serious race or we're if out of memory.
3833 * @param pShared The shared lock record.
3834 * @param pEntry The owner entry.
3835 */
3836DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3837{
3838 rtLockValidatorSerializeDetectionEnter();
3839 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3840 {
3841 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3842 && !rtLockValidatorRecSharedMakeRoom(pShared))
3843 return false; /* the worker leave the lock */
3844
3845 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3846 uint32_t const cMax = pShared->cAllocated;
3847 for (unsigned i = 0; i < 100; i++)
3848 {
3849 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3850 {
3851 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3852 {
3853 rtLockValidatorSerializeDetectionLeave();
3854 return true;
3855 }
3856 }
3857 Assert(i != 25);
3858 }
3859 AssertFailed();
3860 }
3861 rtLockValidatorSerializeDetectionLeave();
3862 return false;
3863}
3864
3865
3866/**
3867 * Remove an owner entry from a shared lock record and free it.
3868 *
3869 * @param pShared The shared lock record.
3870 * @param pEntry The owner entry to remove.
3871 * @param iEntry The last known index.
3872 */
3873DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3874 uint32_t iEntry)
3875{
3876 /*
3877 * Remove it from the table.
3878 */
3879 rtLockValidatorSerializeDetectionEnter();
3880 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3881 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3882 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3883 {
3884 /* this shouldn't happen yet... */
3885 AssertFailed();
3886 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3887 uint32_t const cMax = pShared->cAllocated;
3888 for (iEntry = 0; iEntry < cMax; iEntry++)
3889 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3890 break;
3891 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3892 }
3893 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3894 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3895 rtLockValidatorSerializeDetectionLeave();
3896
3897 /*
3898 * Successfully removed, now free it.
3899 */
3900 rtLockValidatorRecSharedFreeOwner(pEntry);
3901}
3902
3903
3904RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3905{
3906 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3907 if (!pRec->fEnabled)
3908 return;
3909 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3910 AssertReturnVoid(pRec->fSignaller);
3911
3912 /*
3913 * Free all current owners.
3914 */
3915 rtLockValidatorSerializeDetectionEnter();
3916 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3917 {
3918 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3919 uint32_t iEntry = 0;
3920 uint32_t cEntries = pRec->cAllocated;
3921 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3922 while (iEntry < cEntries)
3923 {
3924 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3925 if (pEntry)
3926 {
3927 ASMAtomicDecU32(&pRec->cEntries);
3928 rtLockValidatorSerializeDetectionLeave();
3929
3930 rtLockValidatorRecSharedFreeOwner(pEntry);
3931
3932 rtLockValidatorSerializeDetectionEnter();
3933 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3934 break;
3935 cEntries = pRec->cAllocated;
3936 papEntries = pRec->papOwners;
3937 }
3938 iEntry++;
3939 }
3940 }
3941 rtLockValidatorSerializeDetectionLeave();
3942
3943 if (hThread != NIL_RTTHREAD)
3944 {
3945 /*
3946 * Allocate a new owner entry and insert it into the table.
3947 */
3948 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3949 if ( pEntry
3950 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3951 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3952 }
3953}
3954RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
3955
3956
3957RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3958{
3959 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3960 if (!pRec->fEnabled)
3961 return;
3962 if (hThread == NIL_RTTHREAD)
3963 {
3964 hThread = RTThreadSelfAutoAdopt();
3965 AssertReturnVoid(hThread != NIL_RTTHREAD);
3966 }
3967 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
3968
3969 /*
3970 * Recursive?
3971 *
3972 * Note! This code can be optimized to try avoid scanning the table on
3973 * insert. However, that's annoying work that makes the code big,
3974 * so it can wait til later sometime.
3975 */
3976 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
3977 if (pEntry)
3978 {
3979 Assert(!pRec->fSignaller);
3980 pEntry->ShrdOwner.cRecursion++;
3981 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
3982 return;
3983 }
3984
3985 /*
3986 * Allocate a new owner entry and insert it into the table.
3987 */
3988 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3989 if (pEntry)
3990 {
3991 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3992 {
3993 if (!pRec->fSignaller)
3994 rtLockValidatorStackPush(hThread, pEntry);
3995 }
3996 else
3997 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3998 }
3999}
4000RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4001
4002
4003RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4004{
4005 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4006 if (!pRec->fEnabled)
4007 return;
4008 if (hThread == NIL_RTTHREAD)
4009 {
4010 hThread = RTThreadSelfAutoAdopt();
4011 AssertReturnVoid(hThread != NIL_RTTHREAD);
4012 }
4013 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4014
4015 /*
4016 * Find the entry hope it's a recursive one.
4017 */
4018 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4019 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4020 AssertReturnVoid(pEntry);
4021 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4022
4023 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4024 if (c == 0)
4025 {
4026 if (!pRec->fSignaller)
4027 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4028 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4029 }
4030 else
4031 {
4032 Assert(!pRec->fSignaller);
4033 rtLockValidatorStackPopRecursion(hThread, pEntry);
4034 }
4035}
4036RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4037
4038
4039RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4040{
4041 /* Validate and resolve input. */
4042 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4043 if (!pRec->fEnabled)
4044 return false;
4045 if (hThread == NIL_RTTHREAD)
4046 {
4047 hThread = RTThreadSelfAutoAdopt();
4048 AssertReturn(hThread != NIL_RTTHREAD, false);
4049 }
4050 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4051
4052 /* Do the job. */
4053 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4054 return pEntry != NULL;
4055}
4056RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4057
4058
4059RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4060{
4061 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4062 if (!pRec->fEnabled)
4063 return VINF_SUCCESS;
4064 if (hThreadSelf == NIL_RTTHREAD)
4065 {
4066 hThreadSelf = RTThreadSelfAutoAdopt();
4067 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4068 }
4069 Assert(hThreadSelf == RTThreadSelf());
4070 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4071
4072 /*
4073 * Locate the entry for this thread in the table.
4074 */
4075 uint32_t iEntry = 0;
4076 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4077 if (RT_UNLIKELY(!pEntry))
4078 {
4079 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4080 rtLockValComplainPanic();
4081 return VERR_SEM_LV_NOT_OWNER;
4082 }
4083
4084 /*
4085 * Check the release order.
4086 */
4087 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4088 && pRec->hClass->fStrictReleaseOrder
4089 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4090 )
4091 {
4092 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4093 if (RT_FAILURE(rc))
4094 return rc;
4095 }
4096
4097 /*
4098 * Release the ownership or unwind a level of recursion.
4099 */
4100 Assert(pEntry->ShrdOwner.cRecursion > 0);
4101 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4102 if (c == 0)
4103 {
4104 rtLockValidatorStackPop(hThreadSelf, pEntry);
4105 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4106 }
4107 else
4108 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4109
4110 return VINF_SUCCESS;
4111}
4112
4113
4114RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4115{
4116 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4117 if (!pRec->fEnabled)
4118 return VINF_SUCCESS;
4119 if (hThreadSelf == NIL_RTTHREAD)
4120 {
4121 hThreadSelf = RTThreadSelfAutoAdopt();
4122 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4123 }
4124 Assert(hThreadSelf == RTThreadSelf());
4125 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4126
4127 /*
4128 * Locate the entry for this thread in the table.
4129 */
4130 uint32_t iEntry = 0;
4131 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4132 if (RT_UNLIKELY(!pEntry))
4133 {
4134 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4135 rtLockValComplainPanic();
4136 return VERR_SEM_LV_NOT_SIGNALLER;
4137 }
4138 return VINF_SUCCESS;
4139}
4140
4141
4142RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4143{
4144 if (Thread == NIL_RTTHREAD)
4145 return 0;
4146
4147 PRTTHREADINT pThread = rtThreadGet(Thread);
4148 if (!pThread)
4149 return VERR_INVALID_HANDLE;
4150 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4151 rtThreadRelease(pThread);
4152 return cWriteLocks;
4153}
4154RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4155
4156
4157RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4158{
4159 PRTTHREADINT pThread = rtThreadGet(Thread);
4160 AssertReturnVoid(pThread);
4161 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4162 rtThreadRelease(pThread);
4163}
4164RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4165
4166
4167RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4168{
4169 PRTTHREADINT pThread = rtThreadGet(Thread);
4170 AssertReturnVoid(pThread);
4171 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4172 rtThreadRelease(pThread);
4173}
4174RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4175
4176
4177RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4178{
4179 if (Thread == NIL_RTTHREAD)
4180 return 0;
4181
4182 PRTTHREADINT pThread = rtThreadGet(Thread);
4183 if (!pThread)
4184 return VERR_INVALID_HANDLE;
4185 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4186 rtThreadRelease(pThread);
4187 return cReadLocks;
4188}
4189RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4190
4191
4192RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4193{
4194 PRTTHREADINT pThread = rtThreadGet(Thread);
4195 Assert(pThread);
4196 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4197 rtThreadRelease(pThread);
4198}
4199RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4200
4201
4202RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4203{
4204 PRTTHREADINT pThread = rtThreadGet(Thread);
4205 Assert(pThread);
4206 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4207 rtThreadRelease(pThread);
4208}
4209RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4210
4211
4212RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4213{
4214 void *pvLock = NULL;
4215 PRTTHREADINT pThread = rtThreadGet(hThread);
4216 if (pThread)
4217 {
4218 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4219 if (RTTHREAD_IS_SLEEPING(enmState))
4220 {
4221 rtLockValidatorSerializeDetectionEnter();
4222
4223 enmState = rtThreadGetState(pThread);
4224 if (RTTHREAD_IS_SLEEPING(enmState))
4225 {
4226 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4227 if (pRec)
4228 {
4229 switch (pRec->Core.u32Magic)
4230 {
4231 case RTLOCKVALRECEXCL_MAGIC:
4232 pvLock = pRec->Excl.hLock;
4233 break;
4234
4235 case RTLOCKVALRECSHRDOWN_MAGIC:
4236 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4237 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4238 break;
4239 case RTLOCKVALRECSHRD_MAGIC:
4240 pvLock = pRec->Shared.hLock;
4241 break;
4242 }
4243 if (RTThreadGetState(pThread) != enmState)
4244 pvLock = NULL;
4245 }
4246 }
4247
4248 rtLockValidatorSerializeDetectionLeave();
4249 }
4250 rtThreadRelease(pThread);
4251 }
4252 return pvLock;
4253}
4254RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4255
4256
4257RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4258{
4259 bool fRet = false;
4260 PRTTHREADINT pThread = rtThreadGet(hThread);
4261 if (pThread)
4262 {
4263 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4264 rtThreadRelease(pThread);
4265 }
4266 return fRet;
4267}
4268RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4269
4270
4271RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4272{
4273 bool fRet = false;
4274 if (hCurrentThread == NIL_RTTHREAD)
4275 hCurrentThread = RTThreadSelf();
4276 else
4277 Assert(hCurrentThread == RTThreadSelf());
4278 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4279 if (pThread)
4280 {
4281 if (hClass != NIL_RTLOCKVALCLASS)
4282 {
4283 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4284 while (VALID_PTR(pCur) && !fRet)
4285 {
4286 switch (pCur->Core.u32Magic)
4287 {
4288 case RTLOCKVALRECEXCL_MAGIC:
4289 fRet = pCur->Excl.hClass == hClass;
4290 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4291 break;
4292 case RTLOCKVALRECSHRDOWN_MAGIC:
4293 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4294 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4295 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4296 break;
4297 case RTLOCKVALRECNEST_MAGIC:
4298 switch (pCur->Nest.pRec->Core.u32Magic)
4299 {
4300 case RTLOCKVALRECEXCL_MAGIC:
4301 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4302 break;
4303 case RTLOCKVALRECSHRDOWN_MAGIC:
4304 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4305 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4306 break;
4307 }
4308 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4309 break;
4310 default:
4311 pCur = NULL;
4312 break;
4313 }
4314 }
4315 }
4316
4317 rtThreadRelease(pThread);
4318 }
4319 return fRet;
4320}
4321RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4322
4323
4324RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4325{
4326 bool fRet = false;
4327 if (hCurrentThread == NIL_RTTHREAD)
4328 hCurrentThread = RTThreadSelf();
4329 else
4330 Assert(hCurrentThread == RTThreadSelf());
4331 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4332 if (pThread)
4333 {
4334 if (hClass != NIL_RTLOCKVALCLASS)
4335 {
4336 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4337 while (VALID_PTR(pCur) && !fRet)
4338 {
4339 switch (pCur->Core.u32Magic)
4340 {
4341 case RTLOCKVALRECEXCL_MAGIC:
4342 fRet = pCur->Excl.hClass == hClass
4343 && pCur->Excl.uSubClass == uSubClass;
4344 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4345 break;
4346 case RTLOCKVALRECSHRDOWN_MAGIC:
4347 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4348 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4349 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4350 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4351 break;
4352 case RTLOCKVALRECNEST_MAGIC:
4353 switch (pCur->Nest.pRec->Core.u32Magic)
4354 {
4355 case RTLOCKVALRECEXCL_MAGIC:
4356 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4357 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4358 break;
4359 case RTLOCKVALRECSHRDOWN_MAGIC:
4360 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4361 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4362 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4363 break;
4364 }
4365 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4366 break;
4367 default:
4368 pCur = NULL;
4369 break;
4370 }
4371 }
4372 }
4373
4374 rtThreadRelease(pThread);
4375 }
4376 return fRet;
4377}
4378RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4379
4380
4381RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4382{
4383 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4384}
4385RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4386
4387
4388RTDECL(bool) RTLockValidatorIsEnabled(void)
4389{
4390 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4391}
4392RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4393
4394
4395RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4396{
4397 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4398}
4399RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4400
4401
4402RTDECL(bool) RTLockValidatorIsQuiet(void)
4403{
4404 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4405}
4406RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4407
4408
4409RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4410{
4411 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4412}
4413RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4414
4415
4416RTDECL(bool) RTLockValidatorMayPanic(void)
4417{
4418 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4419}
4420RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4421
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette