VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 39071

Last change on this file since 39071 was 39071, checked in by vboxsync, 13 years ago

build fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 157.5 KB
Line 
1/* $Id: lockvalidator.cpp 39071 2011-10-21 10:21:06Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/*******************************************************************************
28* Header Files *
29*******************************************************************************/
30#include <iprt/lockvalidator.h>
31#include "internal/iprt.h"
32
33#include <iprt/asm.h>
34#include <iprt/assert.h>
35#include <iprt/env.h>
36#include <iprt/err.h>
37#include <iprt/mem.h>
38#include <iprt/once.h>
39#include <iprt/semaphore.h>
40#include <iprt/string.h>
41#include <iprt/thread.h>
42
43#include "internal/lockvalidator.h"
44#include "internal/magics.h"
45#include "internal/strhash.h"
46#include "internal/thread.h"
47
48
49/*******************************************************************************
50* Defined Constants And Macros *
51*******************************************************************************/
52/** Macro that asserts that a pointer is aligned correctly.
53 * Only used when fighting bugs. */
54#if 1
55# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
56 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
57#else
58# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
59#endif
60
61/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
62#define RTLOCKVALCLASS_HASH(hClass) \
63 ( ((uintptr_t)(hClass) >> 6 ) \
64 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
65 / sizeof(PRTLOCKVALCLASSREF)) )
66
67/** The max value for RTLOCKVALCLASSINT::cRefs. */
68#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
69/** The max value for RTLOCKVALCLASSREF::cLookups. */
70#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
71/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
72 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
73#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
74
75
76/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
77 * Enable recursion records. */
78#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
79# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
80#endif
81
82/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
83 * Enables some extra verbosity in the lock dumping. */
84#if defined(DOXYGEN_RUNNING)
85# define RTLOCKVAL_WITH_VERBOSE_DUMPS
86#endif
87
88/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
89 * Enables collection prior class hash lookup statistics, dumping them when
90 * complaining about the class. */
91#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
92# define RTLOCKVAL_WITH_CLASS_HASH_STATS
93#endif
94
95
96/*******************************************************************************
97* Structures and Typedefs *
98*******************************************************************************/
99/**
100 * Deadlock detection stack entry.
101 */
102typedef struct RTLOCKVALDDENTRY
103{
104 /** The current record. */
105 PRTLOCKVALRECUNION pRec;
106 /** The current entry number if pRec is a shared one. */
107 uint32_t iEntry;
108 /** The thread state of the thread we followed to get to pFirstSibling.
109 * This is only used for validating a deadlock stack. */
110 RTTHREADSTATE enmState;
111 /** The thread we followed to get to pFirstSibling.
112 * This is only used for validating a deadlock stack. */
113 PRTTHREADINT pThread;
114 /** What pThread is waiting on, i.e. where we entered the circular list of
115 * siblings. This is used for validating a deadlock stack as well as
116 * terminating the sibling walk. */
117 PRTLOCKVALRECUNION pFirstSibling;
118} RTLOCKVALDDENTRY;
119
120
121/**
122 * Deadlock detection stack.
123 */
124typedef struct RTLOCKVALDDSTACK
125{
126 /** The number stack entries. */
127 uint32_t c;
128 /** The stack entries. */
129 RTLOCKVALDDENTRY a[32];
130} RTLOCKVALDDSTACK;
131/** Pointer to a deadlock detection stack. */
132typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
133
134
135/**
136 * Reference to another class.
137 */
138typedef struct RTLOCKVALCLASSREF
139{
140 /** The class. */
141 RTLOCKVALCLASS hClass;
142 /** The number of lookups of this class. */
143 uint32_t volatile cLookups;
144 /** Indicates whether the entry was added automatically during order checking
145 * (true) or manually via the API (false). */
146 bool fAutodidacticism;
147 /** Reserved / explicit alignment padding. */
148 bool afReserved[3];
149} RTLOCKVALCLASSREF;
150/** Pointer to a class reference. */
151typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
152
153
154/** Pointer to a chunk of class references. */
155typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
156/**
157 * Chunk of class references.
158 */
159typedef struct RTLOCKVALCLASSREFCHUNK
160{
161 /** Array of refs. */
162#if 0 /** @todo for testing allocation of new chunks. */
163 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
164#else
165 RTLOCKVALCLASSREF aRefs[2];
166#endif
167 /** Pointer to the next chunk. */
168 PRTLOCKVALCLASSREFCHUNK volatile pNext;
169} RTLOCKVALCLASSREFCHUNK;
170
171
172/**
173 * Lock class.
174 */
175typedef struct RTLOCKVALCLASSINT
176{
177 /** AVL node core. */
178 AVLLU32NODECORE Core;
179 /** Magic value (RTLOCKVALCLASS_MAGIC). */
180 uint32_t volatile u32Magic;
181 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
182 uint32_t volatile cRefs;
183 /** Whether the class is allowed to teach it self new locking order rules. */
184 bool fAutodidact;
185 /** Whether to allow recursion. */
186 bool fRecursionOk;
187 /** Strict release order. */
188 bool fStrictReleaseOrder;
189 /** Whether this class is in the tree. */
190 bool fInTree;
191 /** Donate a reference to the next retainer. This is a hack to make
192 * RTLockValidatorClassCreateUnique work. */
193 bool volatile fDonateRefToNextRetainer;
194 /** Reserved future use / explicit alignment. */
195 bool afReserved[3];
196 /** The minimum wait interval for which we do deadlock detection
197 * (milliseconds). */
198 RTMSINTERVAL cMsMinDeadlock;
199 /** The minimum wait interval for which we do order checks (milliseconds). */
200 RTMSINTERVAL cMsMinOrder;
201 /** More padding. */
202 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
203 /** Classes that may be taken prior to this one.
204 * This is a linked list where each node contains a chunk of locks so that we
205 * reduce the number of allocations as well as localize the data. */
206 RTLOCKVALCLASSREFCHUNK PriorLocks;
207 /** Hash table containing frequently encountered prior locks. */
208 PRTLOCKVALCLASSREF apPriorLocksHash[17];
209 /** Class name. (Allocated after the end of the block as usual.) */
210 char const *pszName;
211 /** Where this class was created.
212 * This is mainly used for finding automatically created lock classes.
213 * @remarks The strings are stored after this structure so we won't crash
214 * if the class lives longer than the module (dll/so/dylib) that
215 * spawned it. */
216 RTLOCKVALSRCPOS CreatePos;
217#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
218 /** Hash hits. */
219 uint32_t volatile cHashHits;
220 /** Hash misses. */
221 uint32_t volatile cHashMisses;
222#endif
223} RTLOCKVALCLASSINT;
224AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
225AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
226
227
228/*******************************************************************************
229* Global Variables *
230*******************************************************************************/
231/** Serializing object destruction and deadlock detection.
232 *
233 * This makes sure that none of the memory examined by the deadlock detection
234 * code will become invalid (reused for other purposes or made not present)
235 * while the detection is in progress.
236 *
237 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
238 * EW: Deadlock detection and some related activities.
239 */
240static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
241/** Serializing class tree insert and lookups. */
242static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
243/** Class tree. */
244static PAVLLU32NODECORE g_LockValClassTree = NULL;
245/** Critical section serializing the teaching new rules to the classes. */
246static RTCRITSECT g_LockValClassTeachCS;
247
248/** Whether the lock validator is enabled or disabled.
249 * Only applies to new locks. */
250static bool volatile g_fLockValidatorEnabled = true;
251/** Set if the lock validator is quiet. */
252#ifdef RT_STRICT
253static bool volatile g_fLockValidatorQuiet = false;
254#else
255static bool volatile g_fLockValidatorQuiet = true;
256#endif
257/** Set if the lock validator may panic. */
258#ifdef RT_STRICT
259static bool volatile g_fLockValidatorMayPanic = true;
260#else
261static bool volatile g_fLockValidatorMayPanic = false;
262#endif
263/** Whether to return an error status on wrong locking order. */
264static bool volatile g_fLockValSoftWrongOrder = false;
265
266
267/*******************************************************************************
268* Internal Functions *
269*******************************************************************************/
270static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
271static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
272
273
274/**
275 * Lazy initialization of the lock validator globals.
276 */
277static void rtLockValidatorLazyInit(void)
278{
279 static uint32_t volatile s_fInitializing = false;
280 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
281 {
282 /*
283 * The locks.
284 */
285 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
286 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
287 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
288
289 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
290 {
291 RTSEMRW hSemRW;
292 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
293 if (RT_SUCCESS(rc))
294 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
295 }
296
297 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
298 {
299 RTSEMXROADS hXRoads;
300 int rc = RTSemXRoadsCreate(&hXRoads);
301 if (RT_SUCCESS(rc))
302 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
303 }
304
305#ifdef IN_RING3
306 /*
307 * Check the environment for our config variables.
308 */
309 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
310 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
311 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
312 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
313
314 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
315 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
316 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
317 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
318
319 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
320 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
321 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
322 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
323
324 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
325 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
326 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
327 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
328#endif
329
330 /*
331 * Register cleanup
332 */
333 /** @todo register some cleanup callback if we care. */
334
335 ASMAtomicWriteU32(&s_fInitializing, false);
336 }
337}
338
339
340
341/** Wrapper around ASMAtomicReadPtr. */
342DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
343{
344 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
345 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
346 return p;
347}
348
349
350/** Wrapper around ASMAtomicWritePtr. */
351DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
352{
353 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
354 ASMAtomicWritePtr(ppRec, pRecNew);
355}
356
357
358/** Wrapper around ASMAtomicReadPtr. */
359DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
360{
361 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
362 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
363 return p;
364}
365
366
367/** Wrapper around ASMAtomicUoReadPtr. */
368DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
369{
370 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
371 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
372 return p;
373}
374
375
376/**
377 * Reads a volatile thread handle field and returns the thread name.
378 *
379 * @returns Thread name (read only).
380 * @param phThread The thread handle field.
381 */
382static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
383{
384 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
385 if (!pThread)
386 return "<NIL>";
387 if (!VALID_PTR(pThread))
388 return "<INVALID>";
389 if (pThread->u32Magic != RTTHREADINT_MAGIC)
390 return "<BAD-THREAD-MAGIC>";
391 return pThread->szName;
392}
393
394
395/**
396 * Launch a simple assertion like complaint w/ panic.
397 *
398 * @param pszFile Where from - file.
399 * @param iLine Where from - line.
400 * @param pszFunction Where from - function.
401 * @param pszWhat What we're complaining about.
402 * @param ... Format arguments.
403 */
404static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
405{
406 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
407 {
408 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
409 va_list va;
410 va_start(va, pszWhat);
411 RTAssertMsg2WeakV(pszWhat, va);
412 va_end(va);
413 }
414 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
415 RTAssertPanic();
416}
417
418
419/**
420 * Describes the class.
421 *
422 * @param pszPrefix Message prefix.
423 * @param pClass The class to complain about.
424 * @param uSubClass My sub-class.
425 * @param fVerbose Verbose description including relations to other
426 * classes.
427 */
428static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
429{
430 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
431 return;
432
433 /* Stringify the sub-class. */
434 const char *pszSubClass;
435 char szSubClass[32];
436 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
437 switch (uSubClass)
438 {
439 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
440 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
441 default:
442 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
443 pszSubClass = szSubClass;
444 break;
445 }
446 else
447 {
448 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
449 pszSubClass = szSubClass;
450 }
451
452 /* Validate the class pointer. */
453 if (!VALID_PTR(pClass))
454 {
455 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
456 return;
457 }
458 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
459 {
460 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
461 return;
462 }
463
464 /* OK, dump the class info. */
465 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
466 pClass,
467 pClass->pszName,
468 pClass->CreatePos.pszFile,
469 pClass->CreatePos.uLine,
470 pClass->CreatePos.pszFunction,
471 pClass->CreatePos.uId,
472 pszSubClass);
473 if (fVerbose)
474 {
475 uint32_t i = 0;
476 uint32_t cPrinted = 0;
477 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
478 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
479 {
480 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
481 if (pCurClass != NIL_RTLOCKVALCLASS)
482 {
483 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
484 cPrinted == 0
485 ? "Prior:"
486 : " ",
487 i,
488 pCurClass->pszName,
489 pChunk->aRefs[j].fAutodidacticism
490 ? "autodidactic"
491 : "manually ",
492 pChunk->aRefs[j].cLookups,
493 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
494 cPrinted++;
495 }
496 }
497 if (!cPrinted)
498 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
499#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
500 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
501#endif
502 }
503 else
504 {
505 uint32_t cPrinted = 0;
506 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
507 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
508 {
509 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
510 if (pCurClass != NIL_RTLOCKVALCLASS)
511 {
512 if ((cPrinted % 10) == 0)
513 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
514 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
515 else if ((cPrinted % 10) != 9)
516 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
517 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
518 else
519 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
520 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
521 cPrinted++;
522 }
523 }
524 if (!cPrinted)
525 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
526 else if ((cPrinted % 10) != 0)
527 RTAssertMsg2AddWeak("\n");
528 }
529}
530
531
532/**
533 * Helper for getting the class name.
534 * @returns Class name string.
535 * @param pClass The class.
536 */
537static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
538{
539 if (!pClass)
540 return "<nil-class>";
541 if (!VALID_PTR(pClass))
542 return "<bad-class-ptr>";
543 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
544 return "<bad-class-magic>";
545 if (!pClass->pszName)
546 return "<no-class-name>";
547 return pClass->pszName;
548}
549
550/**
551 * Formats the sub-class.
552 *
553 * @returns Stringified sub-class.
554 * @param uSubClass The name.
555 * @param pszBuf Buffer that is big enough.
556 */
557static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
558{
559 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
560 switch (uSubClass)
561 {
562 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
563 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
564 default:
565 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
566 break;
567 }
568 else
569 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
570 return pszBuf;
571}
572
573
574/**
575 * Helper for rtLockValComplainAboutLock.
576 */
577DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
578 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
579 const char *pszFrameType)
580{
581 char szBuf[32];
582 switch (u32Magic)
583 {
584 case RTLOCKVALRECEXCL_MAGIC:
585#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
586 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
587 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
588 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
589 rtLockValComplainGetClassName(pRec->Excl.hClass),
590 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
591 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
592 pszFrameType, pszSuffix);
593#else
594 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
595 pRec->Excl.hLock, pRec->Excl.szName,
596 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
597 rtLockValComplainGetClassName(pRec->Excl.hClass),
598 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
599 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
600 pszFrameType, pszSuffix);
601#endif
602 break;
603
604 case RTLOCKVALRECSHRD_MAGIC:
605 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
606 pRec->Shared.hLock, pRec->Shared.szName, pRec,
607 rtLockValComplainGetClassName(pRec->Shared.hClass),
608 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
609 pszFrameType, pszSuffix);
610 break;
611
612 case RTLOCKVALRECSHRDOWN_MAGIC:
613 {
614 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
615 if ( VALID_PTR(pShared)
616 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
617#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
618 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
619 pShared->hLock, pShared->pszName, pShared,
620 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
621 rtLockValComplainGetClassName(pShared->hClass),
622 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
623 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
624 pszSuffix2, pszSuffix);
625#else
626 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
627 pShared->hLock, pShared->szName,
628 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
629 rtLockValComplainGetClassName(pShared->hClass),
630 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
631 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
632 pszFrameType, pszSuffix);
633#endif
634 else
635 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
636 pShared,
637 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
638 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
639 pszFrameType, pszSuffix);
640 break;
641 }
642
643 default:
644 AssertMsgFailed(("%#x\n", u32Magic));
645 }
646}
647
648
649/**
650 * Describes the lock.
651 *
652 * @param pszPrefix Message prefix.
653 * @param pRec The lock record we're working on.
654 * @param pszSuffix Message suffix.
655 */
656static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
657{
658#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
659# define FIX_REC(r) 1
660#else
661# define FIX_REC(r) (r)
662#endif
663 if ( VALID_PTR(pRec)
664 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
665 {
666 switch (pRec->Core.u32Magic)
667 {
668 case RTLOCKVALRECEXCL_MAGIC:
669 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
670 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
671 break;
672
673 case RTLOCKVALRECSHRD_MAGIC:
674 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
675 break;
676
677 case RTLOCKVALRECSHRDOWN_MAGIC:
678 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
679 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
680 break;
681
682 case RTLOCKVALRECNEST_MAGIC:
683 {
684 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
685 uint32_t u32Magic;
686 if ( VALID_PTR(pRealRec)
687 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
688 || u32Magic == RTLOCKVALRECSHRD_MAGIC
689 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
690 )
691 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
692 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
693 else
694 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
695 pRealRec, pRec, pRec->Nest.cRecursion,
696 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
697 pszSuffix);
698 break;
699 }
700
701 default:
702 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
703 break;
704 }
705 }
706#undef FIX_REC
707}
708
709
710/**
711 * Dump the lock stack.
712 *
713 * @param pThread The thread which lock stack we're gonna dump.
714 * @param cchIndent The indentation in chars.
715 * @param cMinFrames The minimum number of frames to consider
716 * dumping.
717 * @param pHighightRec Record that should be marked specially in the
718 * dump.
719 */
720static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
721 PRTLOCKVALRECUNION pHighightRec)
722{
723 if ( VALID_PTR(pThread)
724 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
725 && pThread->u32Magic == RTTHREADINT_MAGIC
726 )
727 {
728 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
729 if (cEntries >= cMinFrames)
730 {
731 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
732 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
733 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
734 for (uint32_t i = 0; VALID_PTR(pCur); i++)
735 {
736 char szPrefix[80];
737 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
738 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
739 switch (pCur->Core.u32Magic)
740 {
741 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
742 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
743 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
744 default:
745 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
746 pCur = NULL;
747 break;
748 }
749 }
750 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
751 }
752 }
753}
754
755
756/**
757 * Launch the initial complaint.
758 *
759 * @param pszWhat What we're complaining about.
760 * @param pSrcPos Where we are complaining from, as it were.
761 * @param pThreadSelf The calling thread.
762 * @param pRec The main lock involved. Can be NULL.
763 * @param fDumpStack Whether to dump the lock stack (true) or not
764 * (false).
765 */
766static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
767 PRTLOCKVALRECUNION pRec, bool fDumpStack)
768{
769 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
770 {
771 ASMCompilerBarrier(); /* paranoia */
772 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
773 if (pSrcPos && pSrcPos->uId)
774 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
775 else
776 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
777 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
778 if (fDumpStack)
779 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
780 }
781}
782
783
784/**
785 * Continue bitching.
786 *
787 * @param pszFormat Format string.
788 * @param ... Format arguments.
789 */
790static void rtLockValComplainMore(const char *pszFormat, ...)
791{
792 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
793 {
794 va_list va;
795 va_start(va, pszFormat);
796 RTAssertMsg2AddWeakV(pszFormat, va);
797 va_end(va);
798 }
799}
800
801
802/**
803 * Raise a panic if enabled.
804 */
805static void rtLockValComplainPanic(void)
806{
807 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
808 RTAssertPanic();
809}
810
811
812/**
813 * Copy a source position record.
814 *
815 * @param pDst The destination.
816 * @param pSrc The source. Can be NULL.
817 */
818DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
819{
820 if (pSrc)
821 {
822 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
823 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
824 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
825 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
826 }
827 else
828 {
829 ASMAtomicUoWriteU32(&pDst->uLine, 0);
830 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
831 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
832 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
833 }
834}
835
836
837/**
838 * Init a source position record.
839 *
840 * @param pSrcPos The source position record.
841 */
842DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
843{
844 pSrcPos->pszFile = NULL;
845 pSrcPos->pszFunction = NULL;
846 pSrcPos->uId = 0;
847 pSrcPos->uLine = 0;
848#if HC_ARCH_BITS == 64
849 pSrcPos->u32Padding = 0;
850#endif
851}
852
853
854/**
855 * Hashes the specified source position.
856 *
857 * @returns Hash.
858 * @param pSrcPos The source position record.
859 */
860static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
861{
862 uint32_t uHash;
863 if ( ( pSrcPos->pszFile
864 || pSrcPos->pszFunction)
865 && pSrcPos->uLine != 0)
866 {
867 uHash = 0;
868 if (pSrcPos->pszFile)
869 uHash = sdbmInc(pSrcPos->pszFile, uHash);
870 if (pSrcPos->pszFunction)
871 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
872 uHash += pSrcPos->uLine;
873 }
874 else
875 {
876 Assert(pSrcPos->uId);
877 uHash = (uint32_t)pSrcPos->uId;
878 }
879
880 return uHash;
881}
882
883
884/**
885 * Compares two source positions.
886 *
887 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
888 * otherwise.
889 * @param pSrcPos1 The first source position.
890 * @param pSrcPos2 The second source position.
891 */
892static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
893{
894 if (pSrcPos1->uLine != pSrcPos2->uLine)
895 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
896
897 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
898 if (iDiff != 0)
899 return iDiff;
900
901 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
902 if (iDiff != 0)
903 return iDiff;
904
905 if (pSrcPos1->uId != pSrcPos2->uId)
906 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
907 return 0;
908}
909
910
911
912/**
913 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
914 */
915DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
916{
917 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
918 if (hXRoads != NIL_RTSEMXROADS)
919 RTSemXRoadsNSEnter(hXRoads);
920}
921
922
923/**
924 * Call after rtLockValidatorSerializeDestructEnter.
925 */
926DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
927{
928 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
929 if (hXRoads != NIL_RTSEMXROADS)
930 RTSemXRoadsNSLeave(hXRoads);
931}
932
933
934/**
935 * Serializes deadlock detection against destruction of the objects being
936 * inspected.
937 */
938DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
939{
940 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
941 if (hXRoads != NIL_RTSEMXROADS)
942 RTSemXRoadsEWEnter(hXRoads);
943}
944
945
946/**
947 * Call after rtLockValidatorSerializeDetectionEnter.
948 */
949DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
950{
951 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
952 if (hXRoads != NIL_RTSEMXROADS)
953 RTSemXRoadsEWLeave(hXRoads);
954}
955
956
957/**
958 * Initializes the per thread lock validator data.
959 *
960 * @param pPerThread The data.
961 */
962DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
963{
964 pPerThread->bmFreeShrdOwners = UINT32_MAX;
965
966 /* ASSUMES the rest has already been zeroed. */
967 Assert(pPerThread->pRec == NULL);
968 Assert(pPerThread->cWriteLocks == 0);
969 Assert(pPerThread->cReadLocks == 0);
970 Assert(pPerThread->fInValidator == false);
971 Assert(pPerThread->pStackTop == NULL);
972}
973
974
975/**
976 * Delete the per thread lock validator data.
977 *
978 * @param pPerThread The data.
979 */
980DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
981{
982 /*
983 * Check that the thread doesn't own any locks at this time.
984 */
985 if (pPerThread->pStackTop)
986 {
987 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
988 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
989 pPerThread->pStackTop, true);
990 rtLockValComplainPanic();
991 }
992
993 /*
994 * Free the recursion records.
995 */
996 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
997 pPerThread->pFreeNestRecs = NULL;
998 while (pCur)
999 {
1000 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1001 RTMemFree(pCur);
1002 pCur = pNext;
1003 }
1004}
1005
1006RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1007 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1008 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1009 const char *pszNameFmt, ...)
1010{
1011 va_list va;
1012 va_start(va, pszNameFmt);
1013 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1014 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1015 va_end(va);
1016 return rc;
1017}
1018
1019
1020RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1021 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1022 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1023 const char *pszNameFmt, va_list va)
1024{
1025 Assert(cMsMinDeadlock >= 1);
1026 Assert(cMsMinOrder >= 1);
1027 AssertPtr(pSrcPos);
1028
1029 /*
1030 * Format the name and calc its length.
1031 */
1032 size_t cbName;
1033 char szName[32];
1034 if (pszNameFmt && *pszNameFmt)
1035 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1036 else
1037 {
1038 static uint32_t volatile s_cAnonymous = 0;
1039 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1040 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1041 }
1042
1043 /*
1044 * Figure out the file and function name lengths and allocate memory for
1045 * it all.
1046 */
1047 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1048 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1049 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVar(sizeof(*pThis) + cbFile + cbFunction + cbName);
1050 if (!pThis)
1051 return VERR_NO_MEMORY;
1052
1053 /*
1054 * Initialize the class data.
1055 */
1056 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1057 pThis->Core.uchHeight = 0;
1058 pThis->Core.pLeft = NULL;
1059 pThis->Core.pRight = NULL;
1060 pThis->Core.pList = NULL;
1061 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1062 pThis->cRefs = 1;
1063 pThis->fAutodidact = fAutodidact;
1064 pThis->fRecursionOk = fRecursionOk;
1065 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1066 pThis->fInTree = false;
1067 pThis->fDonateRefToNextRetainer = false;
1068 pThis->afReserved[0] = false;
1069 pThis->afReserved[1] = false;
1070 pThis->afReserved[2] = false;
1071 pThis->cMsMinDeadlock = cMsMinDeadlock;
1072 pThis->cMsMinOrder = cMsMinOrder;
1073 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1074 pThis->au32Reserved[i] = 0;
1075 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1076 {
1077 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1078 pThis->PriorLocks.aRefs[i].cLookups = 0;
1079 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1080 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1081 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1082 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1083 }
1084 pThis->PriorLocks.pNext = NULL;
1085 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1086 pThis->apPriorLocksHash[i] = NULL;
1087 char *pszDst = (char *)(pThis + 1);
1088 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1089 pszDst += cbName;
1090 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1091 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1092 pszDst += cbFile;
1093 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1094 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1095#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1096 pThis->cHashHits = 0;
1097 pThis->cHashMisses = 0;
1098#endif
1099
1100 *phClass = pThis;
1101 return VINF_SUCCESS;
1102}
1103
1104
1105RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1106{
1107 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1108 va_list va;
1109 va_start(va, pszNameFmt);
1110 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1111 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1112 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1113 pszNameFmt, va);
1114 va_end(va);
1115 return rc;
1116}
1117
1118
1119/**
1120 * Creates a new lock validator class with a reference that is consumed by the
1121 * first call to RTLockValidatorClassRetain.
1122 *
1123 * This is tailored for use in the parameter list of a semaphore constructor.
1124 *
1125 * @returns Class handle with a reference that is automatically consumed by the
1126 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1127 *
1128 * @param pszFile The source position of the call, file.
1129 * @param iLine The source position of the call, line.
1130 * @param pszFunction The source position of the call, function.
1131 * @param pszNameFmt Class name format string, optional (NULL). Max
1132 * length is 32 bytes.
1133 * @param ... Format string arguments.
1134 */
1135RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1136{
1137 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1138 RTLOCKVALCLASSINT *pClass;
1139 va_list va;
1140 va_start(va, pszNameFmt);
1141 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1142 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1143 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1144 pszNameFmt, va);
1145 va_end(va);
1146 if (RT_FAILURE(rc))
1147 return NIL_RTLOCKVALCLASS;
1148 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1149 return pClass;
1150}
1151
1152
1153/**
1154 * Internal class retainer.
1155 * @returns The new reference count.
1156 * @param pClass The class.
1157 */
1158DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1159{
1160 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1161 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1162 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1163 else if ( cRefs == 2
1164 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1165 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1166 return cRefs;
1167}
1168
1169
1170/**
1171 * Validates and retains a lock validator class.
1172 *
1173 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1174 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1175 */
1176DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1177{
1178 if (hClass == NIL_RTLOCKVALCLASS)
1179 return hClass;
1180 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1181 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1182 rtLockValidatorClassRetain(hClass);
1183 return hClass;
1184}
1185
1186
1187/**
1188 * Internal class releaser.
1189 * @returns The new reference count.
1190 * @param pClass The class.
1191 */
1192DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1193{
1194 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1195 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1196 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1197 else if (!cRefs)
1198 rtLockValidatorClassDestroy(pClass);
1199 return cRefs;
1200}
1201
1202
1203/**
1204 * Destroys a class once there are not more references to it.
1205 *
1206 * @param Class The class.
1207 */
1208static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1209{
1210 AssertReturnVoid(!pClass->fInTree);
1211 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1212
1213 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1214 while (pChunk)
1215 {
1216 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1217 {
1218 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1219 if (pClass2 != NIL_RTLOCKVALCLASS)
1220 {
1221 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1222 rtLockValidatorClassRelease(pClass2);
1223 }
1224 }
1225
1226 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1227 pChunk->pNext = NULL;
1228 if (pChunk != &pClass->PriorLocks)
1229 RTMemFree(pChunk);
1230 pChunk = pNext;
1231 }
1232
1233 RTMemFree(pClass);
1234}
1235
1236
1237RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1238{
1239 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1240 rtLockValidatorLazyInit();
1241 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1242
1243 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1244 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1245 while (pClass)
1246 {
1247 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1248 break;
1249 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1250 }
1251
1252 if (RT_SUCCESS(rcLock))
1253 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1254 return pClass;
1255}
1256
1257
1258RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1259{
1260 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1261 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1262 if (hClass == NIL_RTLOCKVALCLASS)
1263 {
1264 /*
1265 * Create a new class and insert it into the tree.
1266 */
1267 va_list va;
1268 va_start(va, pszNameFmt);
1269 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1270 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1271 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1272 pszNameFmt, va);
1273 va_end(va);
1274 if (RT_SUCCESS(rc))
1275 {
1276 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1277 rtLockValidatorLazyInit();
1278 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1279
1280 Assert(!hClass->fInTree);
1281 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1282 Assert(hClass->fInTree);
1283
1284 if (RT_SUCCESS(rcLock))
1285 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1286 return hClass;
1287 }
1288 }
1289 return hClass;
1290}
1291
1292
1293RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1294{
1295 RTLOCKVALCLASSINT *pClass = hClass;
1296 AssertPtrReturn(pClass, UINT32_MAX);
1297 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1298 return rtLockValidatorClassRetain(pClass);
1299}
1300
1301
1302RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1303{
1304 RTLOCKVALCLASSINT *pClass = hClass;
1305 if (pClass == NIL_RTLOCKVALCLASS)
1306 return 0;
1307 AssertPtrReturn(pClass, UINT32_MAX);
1308 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1309 return rtLockValidatorClassRelease(pClass);
1310}
1311
1312
1313/**
1314 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1315 * all the chunks for @a pPriorClass.
1316 *
1317 * @returns true / false.
1318 * @param pClass The class to search.
1319 * @param pPriorClass The class to search for.
1320 */
1321static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1322{
1323 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1324 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1325 {
1326 if (pChunk->aRefs[i].hClass == pPriorClass)
1327 {
1328 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1329 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1330 {
1331 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1332 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1333 }
1334
1335 /* update the hash table entry. */
1336 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1337 if ( !(*ppHashEntry)
1338 || (*ppHashEntry)->cLookups + 128 < cLookups)
1339 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1340
1341#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1342 ASMAtomicIncU32(&pClass->cHashMisses);
1343#endif
1344 return true;
1345 }
1346 }
1347
1348 return false;
1349}
1350
1351
1352/**
1353 * Checks if @a pPriorClass is a known prior class.
1354 *
1355 * @returns true / false.
1356 * @param pClass The class to search.
1357 * @param pPriorClass The class to search for.
1358 */
1359DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1360{
1361 /*
1362 * Hash lookup here.
1363 */
1364 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1365 if ( pRef
1366 && pRef->hClass == pPriorClass)
1367 {
1368 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1369 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1370 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1371#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1372 ASMAtomicIncU32(&pClass->cHashHits);
1373#endif
1374 return true;
1375 }
1376
1377 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1378}
1379
1380
1381/**
1382 * Adds a class to the prior list.
1383 *
1384 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1385 * @param pClass The class to work on.
1386 * @param pPriorClass The class to add.
1387 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1388 * somebody is teaching us via the API (false).
1389 * @param pSrcPos Where this rule was added (optional).
1390 */
1391static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1392 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1393{
1394 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1395 rtLockValidatorLazyInit();
1396 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1397
1398 /*
1399 * Check that there are no conflict (no assert since we might race each other).
1400 */
1401 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1402 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1403 {
1404 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1405 {
1406 /*
1407 * Scan the table for a free entry, allocating a new chunk if necessary.
1408 */
1409 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1410 {
1411 bool fDone = false;
1412 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1413 {
1414 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1415 if (fDone)
1416 {
1417 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1418 rtLockValidatorClassRetain(pPriorClass);
1419 rc = VINF_SUCCESS;
1420 break;
1421 }
1422 }
1423 if (fDone)
1424 break;
1425
1426 /* If no more chunks, allocate a new one and insert the class before linking it. */
1427 if (!pChunk->pNext)
1428 {
1429 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1430 if (!pNew)
1431 {
1432 rc = VERR_NO_MEMORY;
1433 break;
1434 }
1435 pNew->pNext = NULL;
1436 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1437 {
1438 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1439 pNew->aRefs[i].cLookups = 0;
1440 pNew->aRefs[i].fAutodidacticism = false;
1441 pNew->aRefs[i].afReserved[0] = false;
1442 pNew->aRefs[i].afReserved[1] = false;
1443 pNew->aRefs[i].afReserved[2] = false;
1444 }
1445
1446 pNew->aRefs[0].hClass = pPriorClass;
1447 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1448
1449 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1450 rtLockValidatorClassRetain(pPriorClass);
1451 rc = VINF_SUCCESS;
1452 break;
1453 }
1454 } /* chunk loop */
1455 }
1456 else
1457 rc = VINF_SUCCESS;
1458 }
1459 else
1460 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1461
1462 if (RT_SUCCESS(rcLock))
1463 RTCritSectLeave(&g_LockValClassTeachCS);
1464 return rc;
1465}
1466
1467
1468RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1469{
1470 RTLOCKVALCLASSINT *pClass = hClass;
1471 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1472 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1473
1474 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1475 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1476 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1477
1478 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1479}
1480
1481
1482RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1483{
1484 RTLOCKVALCLASSINT *pClass = hClass;
1485 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1486 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1487
1488 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1489 return VINF_SUCCESS;
1490}
1491
1492
1493/**
1494 * Unlinks all siblings.
1495 *
1496 * This is used during record deletion and assumes no races.
1497 *
1498 * @param pCore One of the siblings.
1499 */
1500static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1501{
1502 /* ASSUMES sibling destruction doesn't involve any races and that all
1503 related records are to be disposed off now. */
1504 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1505 while (pSibling)
1506 {
1507 PRTLOCKVALRECUNION volatile *ppCoreNext;
1508 switch (pSibling->Core.u32Magic)
1509 {
1510 case RTLOCKVALRECEXCL_MAGIC:
1511 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1512 ppCoreNext = &pSibling->Excl.pSibling;
1513 break;
1514
1515 case RTLOCKVALRECSHRD_MAGIC:
1516 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1517 ppCoreNext = &pSibling->Shared.pSibling;
1518 break;
1519
1520 default:
1521 AssertFailed();
1522 ppCoreNext = NULL;
1523 break;
1524 }
1525 if (RT_UNLIKELY(ppCoreNext))
1526 break;
1527 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1528 }
1529}
1530
1531
1532RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1533{
1534 /*
1535 * Validate input.
1536 */
1537 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1538 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1539
1540 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1541 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1542 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1543 , VERR_SEM_LV_INVALID_PARAMETER);
1544
1545 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1546 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1547 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1548 , VERR_SEM_LV_INVALID_PARAMETER);
1549
1550 /*
1551 * Link them (circular list).
1552 */
1553 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1554 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1555 {
1556 p1->Excl.pSibling = p2;
1557 p2->Shared.pSibling = p1;
1558 }
1559 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1560 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1561 {
1562 p1->Shared.pSibling = p2;
1563 p2->Excl.pSibling = p1;
1564 }
1565 else
1566 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1567
1568 return VINF_SUCCESS;
1569}
1570
1571
1572/**
1573 * Gets the lock name for the given record.
1574 *
1575 * @returns Read-only lock name.
1576 * @param pRec The lock record.
1577 */
1578DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1579{
1580 switch (pRec->Core.u32Magic)
1581 {
1582 case RTLOCKVALRECEXCL_MAGIC:
1583 return pRec->Excl.szName;
1584 case RTLOCKVALRECSHRD_MAGIC:
1585 return pRec->Shared.szName;
1586 case RTLOCKVALRECSHRDOWN_MAGIC:
1587 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1588 case RTLOCKVALRECNEST_MAGIC:
1589 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1590 if (VALID_PTR(pRec))
1591 {
1592 switch (pRec->Core.u32Magic)
1593 {
1594 case RTLOCKVALRECEXCL_MAGIC:
1595 return pRec->Excl.szName;
1596 case RTLOCKVALRECSHRD_MAGIC:
1597 return pRec->Shared.szName;
1598 case RTLOCKVALRECSHRDOWN_MAGIC:
1599 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1600 default:
1601 return "unknown-nested";
1602 }
1603 }
1604 return "orphaned-nested";
1605 default:
1606 return "unknown";
1607 }
1608}
1609
1610
1611/**
1612 * Gets the class for this locking record.
1613 *
1614 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1615 * @param pRec The lock validator record.
1616 */
1617DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1618{
1619 switch (pRec->Core.u32Magic)
1620 {
1621 case RTLOCKVALRECEXCL_MAGIC:
1622 return pRec->Excl.hClass;
1623
1624 case RTLOCKVALRECSHRD_MAGIC:
1625 return pRec->Shared.hClass;
1626
1627 case RTLOCKVALRECSHRDOWN_MAGIC:
1628 {
1629 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1630 if (RT_LIKELY( VALID_PTR(pSharedRec)
1631 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1632 return pSharedRec->hClass;
1633 return NIL_RTLOCKVALCLASS;
1634 }
1635
1636 case RTLOCKVALRECNEST_MAGIC:
1637 {
1638 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1639 if (VALID_PTR(pRealRec))
1640 {
1641 switch (pRealRec->Core.u32Magic)
1642 {
1643 case RTLOCKVALRECEXCL_MAGIC:
1644 return pRealRec->Excl.hClass;
1645
1646 case RTLOCKVALRECSHRDOWN_MAGIC:
1647 {
1648 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1649 if (RT_LIKELY( VALID_PTR(pSharedRec)
1650 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1651 return pSharedRec->hClass;
1652 break;
1653 }
1654
1655 default:
1656 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1657 break;
1658 }
1659 }
1660 return NIL_RTLOCKVALCLASS;
1661 }
1662
1663 default:
1664 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1665 return NIL_RTLOCKVALCLASS;
1666 }
1667}
1668
1669
1670/**
1671 * Gets the class for this locking record and the pointer to the one below it in
1672 * the stack.
1673 *
1674 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1675 * @param pRec The lock validator record.
1676 * @param puSubClass Where to return the sub-class.
1677 * @param ppDown Where to return the pointer to the record below.
1678 */
1679DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1680rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1681{
1682 switch (pRec->Core.u32Magic)
1683 {
1684 case RTLOCKVALRECEXCL_MAGIC:
1685 *ppDown = pRec->Excl.pDown;
1686 *puSubClass = pRec->Excl.uSubClass;
1687 return pRec->Excl.hClass;
1688
1689 case RTLOCKVALRECSHRD_MAGIC:
1690 *ppDown = NULL;
1691 *puSubClass = pRec->Shared.uSubClass;
1692 return pRec->Shared.hClass;
1693
1694 case RTLOCKVALRECSHRDOWN_MAGIC:
1695 {
1696 *ppDown = pRec->ShrdOwner.pDown;
1697
1698 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1699 if (RT_LIKELY( VALID_PTR(pSharedRec)
1700 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1701 {
1702 *puSubClass = pSharedRec->uSubClass;
1703 return pSharedRec->hClass;
1704 }
1705 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1706 return NIL_RTLOCKVALCLASS;
1707 }
1708
1709 case RTLOCKVALRECNEST_MAGIC:
1710 {
1711 *ppDown = pRec->Nest.pDown;
1712
1713 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1714 if (VALID_PTR(pRealRec))
1715 {
1716 switch (pRealRec->Core.u32Magic)
1717 {
1718 case RTLOCKVALRECEXCL_MAGIC:
1719 *puSubClass = pRealRec->Excl.uSubClass;
1720 return pRealRec->Excl.hClass;
1721
1722 case RTLOCKVALRECSHRDOWN_MAGIC:
1723 {
1724 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1725 if (RT_LIKELY( VALID_PTR(pSharedRec)
1726 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1727 {
1728 *puSubClass = pSharedRec->uSubClass;
1729 return pSharedRec->hClass;
1730 }
1731 break;
1732 }
1733
1734 default:
1735 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1736 break;
1737 }
1738 }
1739 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1740 return NIL_RTLOCKVALCLASS;
1741 }
1742
1743 default:
1744 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1745 *ppDown = NULL;
1746 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1747 return NIL_RTLOCKVALCLASS;
1748 }
1749}
1750
1751
1752/**
1753 * Gets the sub-class for a lock record.
1754 *
1755 * @returns the sub-class.
1756 * @param pRec The lock validator record.
1757 */
1758DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1759{
1760 switch (pRec->Core.u32Magic)
1761 {
1762 case RTLOCKVALRECEXCL_MAGIC:
1763 return pRec->Excl.uSubClass;
1764
1765 case RTLOCKVALRECSHRD_MAGIC:
1766 return pRec->Shared.uSubClass;
1767
1768 case RTLOCKVALRECSHRDOWN_MAGIC:
1769 {
1770 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1771 if (RT_LIKELY( VALID_PTR(pSharedRec)
1772 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1773 return pSharedRec->uSubClass;
1774 return RTLOCKVAL_SUB_CLASS_NONE;
1775 }
1776
1777 case RTLOCKVALRECNEST_MAGIC:
1778 {
1779 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1780 if (VALID_PTR(pRealRec))
1781 {
1782 switch (pRealRec->Core.u32Magic)
1783 {
1784 case RTLOCKVALRECEXCL_MAGIC:
1785 return pRec->Excl.uSubClass;
1786
1787 case RTLOCKVALRECSHRDOWN_MAGIC:
1788 {
1789 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1790 if (RT_LIKELY( VALID_PTR(pSharedRec)
1791 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1792 return pSharedRec->uSubClass;
1793 break;
1794 }
1795
1796 default:
1797 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1798 break;
1799 }
1800 }
1801 return RTLOCKVAL_SUB_CLASS_NONE;
1802 }
1803
1804 default:
1805 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1806 return RTLOCKVAL_SUB_CLASS_NONE;
1807 }
1808}
1809
1810
1811
1812
1813/**
1814 * Calculates the depth of a lock stack.
1815 *
1816 * @returns Number of stack frames.
1817 * @param pThread The thread.
1818 */
1819static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1820{
1821 uint32_t cEntries = 0;
1822 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1823 while (VALID_PTR(pCur))
1824 {
1825 switch (pCur->Core.u32Magic)
1826 {
1827 case RTLOCKVALRECEXCL_MAGIC:
1828 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1829 break;
1830
1831 case RTLOCKVALRECSHRDOWN_MAGIC:
1832 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1833 break;
1834
1835 case RTLOCKVALRECNEST_MAGIC:
1836 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1837 break;
1838
1839 default:
1840 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1841 }
1842 cEntries++;
1843 }
1844 return cEntries;
1845}
1846
1847
1848#ifdef RT_STRICT
1849/**
1850 * Checks if the stack contains @a pRec.
1851 *
1852 * @returns true / false.
1853 * @param pThreadSelf The current thread.
1854 * @param pRec The lock record.
1855 */
1856static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1857{
1858 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1859 while (pCur)
1860 {
1861 AssertPtrReturn(pCur, false);
1862 if (pCur == pRec)
1863 return true;
1864 switch (pCur->Core.u32Magic)
1865 {
1866 case RTLOCKVALRECEXCL_MAGIC:
1867 Assert(pCur->Excl.cRecursion >= 1);
1868 pCur = pCur->Excl.pDown;
1869 break;
1870
1871 case RTLOCKVALRECSHRDOWN_MAGIC:
1872 Assert(pCur->ShrdOwner.cRecursion >= 1);
1873 pCur = pCur->ShrdOwner.pDown;
1874 break;
1875
1876 case RTLOCKVALRECNEST_MAGIC:
1877 Assert(pCur->Nest.cRecursion > 1);
1878 pCur = pCur->Nest.pDown;
1879 break;
1880
1881 default:
1882 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1883 }
1884 }
1885 return false;
1886}
1887#endif /* RT_STRICT */
1888
1889
1890/**
1891 * Pushes a lock record onto the stack.
1892 *
1893 * @param pThreadSelf The current thread.
1894 * @param pRec The lock record.
1895 */
1896static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1897{
1898 Assert(pThreadSelf == RTThreadSelf());
1899 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1900
1901 switch (pRec->Core.u32Magic)
1902 {
1903 case RTLOCKVALRECEXCL_MAGIC:
1904 Assert(pRec->Excl.cRecursion == 1);
1905 Assert(pRec->Excl.pDown == NULL);
1906 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1907 break;
1908
1909 case RTLOCKVALRECSHRDOWN_MAGIC:
1910 Assert(pRec->ShrdOwner.cRecursion == 1);
1911 Assert(pRec->ShrdOwner.pDown == NULL);
1912 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1913 break;
1914
1915 default:
1916 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1917 }
1918 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1919}
1920
1921
1922/**
1923 * Pops a lock record off the stack.
1924 *
1925 * @param pThreadSelf The current thread.
1926 * @param pRec The lock.
1927 */
1928static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1929{
1930 Assert(pThreadSelf == RTThreadSelf());
1931
1932 PRTLOCKVALRECUNION pDown;
1933 switch (pRec->Core.u32Magic)
1934 {
1935 case RTLOCKVALRECEXCL_MAGIC:
1936 Assert(pRec->Excl.cRecursion == 0);
1937 pDown = pRec->Excl.pDown;
1938 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1939 break;
1940
1941 case RTLOCKVALRECSHRDOWN_MAGIC:
1942 Assert(pRec->ShrdOwner.cRecursion == 0);
1943 pDown = pRec->ShrdOwner.pDown;
1944 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1945 break;
1946
1947 default:
1948 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1949 }
1950 if (pThreadSelf->LockValidator.pStackTop == pRec)
1951 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1952 else
1953 {
1954 /* Find the pointer to our record and unlink ourselves. */
1955 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1956 while (pCur)
1957 {
1958 PRTLOCKVALRECUNION volatile *ppDown;
1959 switch (pCur->Core.u32Magic)
1960 {
1961 case RTLOCKVALRECEXCL_MAGIC:
1962 Assert(pCur->Excl.cRecursion >= 1);
1963 ppDown = &pCur->Excl.pDown;
1964 break;
1965
1966 case RTLOCKVALRECSHRDOWN_MAGIC:
1967 Assert(pCur->ShrdOwner.cRecursion >= 1);
1968 ppDown = &pCur->ShrdOwner.pDown;
1969 break;
1970
1971 case RTLOCKVALRECNEST_MAGIC:
1972 Assert(pCur->Nest.cRecursion >= 1);
1973 ppDown = &pCur->Nest.pDown;
1974 break;
1975
1976 default:
1977 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1978 }
1979 pCur = *ppDown;
1980 if (pCur == pRec)
1981 {
1982 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1983 return;
1984 }
1985 }
1986 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
1987 }
1988}
1989
1990
1991/**
1992 * Creates and pushes lock recursion record onto the stack.
1993 *
1994 * @param pThreadSelf The current thread.
1995 * @param pRec The lock record.
1996 * @param pSrcPos Where the recursion occurred.
1997 */
1998static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
1999{
2000 Assert(pThreadSelf == RTThreadSelf());
2001 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2002
2003#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2004 /*
2005 * Allocate a new recursion record
2006 */
2007 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2008 if (pRecursionRec)
2009 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2010 else
2011 {
2012 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2013 if (!pRecursionRec)
2014 return;
2015 }
2016
2017 /*
2018 * Initialize it.
2019 */
2020 switch (pRec->Core.u32Magic)
2021 {
2022 case RTLOCKVALRECEXCL_MAGIC:
2023 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2024 break;
2025
2026 case RTLOCKVALRECSHRDOWN_MAGIC:
2027 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2028 break;
2029
2030 default:
2031 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2032 rtLockValidatorSerializeDestructEnter();
2033 rtLockValidatorSerializeDestructLeave();
2034 RTMemFree(pRecursionRec);
2035 return;
2036 }
2037 Assert(pRecursionRec->cRecursion > 1);
2038 pRecursionRec->pRec = pRec;
2039 pRecursionRec->pDown = NULL;
2040 pRecursionRec->pNextFree = NULL;
2041 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2042 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2043
2044 /*
2045 * Link it.
2046 */
2047 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2048 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2049#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2050}
2051
2052
2053/**
2054 * Pops a lock recursion record off the stack.
2055 *
2056 * @param pThreadSelf The current thread.
2057 * @param pRec The lock record.
2058 */
2059static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2060{
2061 Assert(pThreadSelf == RTThreadSelf());
2062 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2063
2064 uint32_t cRecursion;
2065 switch (pRec->Core.u32Magic)
2066 {
2067 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2068 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2069 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2070 }
2071 Assert(cRecursion >= 1);
2072
2073#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2074 /*
2075 * Pop the recursion record.
2076 */
2077 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2078 if ( pNest != NULL
2079 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2080 && pNest->Nest.pRec == pRec
2081 )
2082 {
2083 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2084 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2085 }
2086 else
2087 {
2088 /* Find the record above ours. */
2089 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2090 for (;;)
2091 {
2092 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2093 switch (pNest->Core.u32Magic)
2094 {
2095 case RTLOCKVALRECEXCL_MAGIC:
2096 ppDown = &pNest->Excl.pDown;
2097 pNest = *ppDown;
2098 continue;
2099 case RTLOCKVALRECSHRDOWN_MAGIC:
2100 ppDown = &pNest->ShrdOwner.pDown;
2101 pNest = *ppDown;
2102 continue;
2103 case RTLOCKVALRECNEST_MAGIC:
2104 if (pNest->Nest.pRec == pRec)
2105 break;
2106 ppDown = &pNest->Nest.pDown;
2107 pNest = *ppDown;
2108 continue;
2109 default:
2110 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2111 }
2112 break; /* ugly */
2113 }
2114 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2115 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2116 }
2117
2118 /*
2119 * Invalidate and free the record.
2120 */
2121 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2122 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2123 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2124 pNest->Nest.cRecursion = 0;
2125 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2126 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2127#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2128}
2129
2130
2131/**
2132 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2133 * returns VERR_SEM_LV_WRONG_ORDER.
2134 */
2135static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2136 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2137 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2138
2139
2140{
2141 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2142 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2143 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2144 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2145 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2146 rtLockValComplainPanic();
2147 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2148}
2149
2150
2151/**
2152 * Checks if the sub-class order is ok or not.
2153 *
2154 * Used to deal with two locks from the same class.
2155 *
2156 * @returns true if ok, false if not.
2157 * @param uSubClass1 The sub-class of the lock that is being
2158 * considered.
2159 * @param uSubClass2 The sub-class of the lock that is already being
2160 * held.
2161 */
2162DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2163{
2164 if (uSubClass1 > uSubClass2)
2165 {
2166 /* NONE kills ANY. */
2167 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2168 return false;
2169 return true;
2170 }
2171
2172 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2173 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2174 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2175 return true;
2176 return false;
2177}
2178
2179
2180/**
2181 * Checks if the class and sub-class lock order is ok.
2182 *
2183 * @returns true if ok, false if not.
2184 * @param pClass1 The class of the lock that is being considered.
2185 * @param uSubClass1 The sub-class that goes with @a pClass1.
2186 * @param pClass2 The class of the lock that is already being
2187 * held.
2188 * @param uSubClass2 The sub-class that goes with @a pClass2.
2189 */
2190DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2191 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2192{
2193 if (pClass1 == pClass2)
2194 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2195 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2196}
2197
2198
2199/**
2200 * Checks the locking order, part two.
2201 *
2202 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2203 * @param pClass The lock class.
2204 * @param uSubClass The lock sub-class.
2205 * @param pThreadSelf The current thread.
2206 * @param pRec The lock record.
2207 * @param pSrcPos The source position of the locking operation.
2208 */
2209static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2210 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2211 PCRTLOCKVALSRCPOS const pSrcPos,
2212 RTLOCKVALCLASSINT * const pFirstBadClass,
2213 PRTLOCKVALRECUNION const pFirstBadRec,
2214 PRTLOCKVALRECUNION const pFirstBadDown)
2215{
2216 /*
2217 * Something went wrong, pCur is pointing to where.
2218 */
2219 if ( pClass == pFirstBadClass
2220 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2221 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2222 pRec, pFirstBadRec, pClass, pFirstBadClass);
2223 if (!pClass->fAutodidact)
2224 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2225 pRec, pFirstBadRec, pClass, pFirstBadClass);
2226
2227 /*
2228 * This class is an autodidact, so we have to check out the rest of the stack
2229 * for direct violations.
2230 */
2231 uint32_t cNewRules = 1;
2232 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2233 while (pCur)
2234 {
2235 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2236
2237 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2238 pCur = pCur->Nest.pDown;
2239 else
2240 {
2241 PRTLOCKVALRECUNION pDown;
2242 uint32_t uPriorSubClass;
2243 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2244 if (pPriorClass != NIL_RTLOCKVALCLASS)
2245 {
2246 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2247 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2248 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2249 {
2250 if ( pClass == pPriorClass
2251 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2252 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2253 pRec, pCur, pClass, pPriorClass);
2254 cNewRules++;
2255 }
2256 }
2257 pCur = pDown;
2258 }
2259 }
2260
2261 if (cNewRules == 1)
2262 {
2263 /*
2264 * Special case the simple operation, hoping that it will be a
2265 * frequent case.
2266 */
2267 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2268 if (rc == VERR_SEM_LV_WRONG_ORDER)
2269 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2270 pRec, pFirstBadRec, pClass, pFirstBadClass);
2271 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2272 }
2273 else
2274 {
2275 /*
2276 * We may be adding more than one rule, so we have to take the lock
2277 * before starting to add the rules. This means we have to check
2278 * the state after taking it since we might be racing someone adding
2279 * a conflicting rule.
2280 */
2281 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2282 rtLockValidatorLazyInit();
2283 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2284
2285 /* Check */
2286 pCur = pFirstBadRec;
2287 while (pCur)
2288 {
2289 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2290 pCur = pCur->Nest.pDown;
2291 else
2292 {
2293 uint32_t uPriorSubClass;
2294 PRTLOCKVALRECUNION pDown;
2295 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2296 if (pPriorClass != NIL_RTLOCKVALCLASS)
2297 {
2298 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2299 {
2300 if ( pClass == pPriorClass
2301 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2302 {
2303 if (RT_SUCCESS(rcLock))
2304 RTCritSectLeave(&g_LockValClassTeachCS);
2305 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2306 pRec, pCur, pClass, pPriorClass);
2307 }
2308 }
2309 }
2310 pCur = pDown;
2311 }
2312 }
2313
2314 /* Iterate the stack yet again, adding new rules this time. */
2315 pCur = pFirstBadRec;
2316 while (pCur)
2317 {
2318 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2319 pCur = pCur->Nest.pDown;
2320 else
2321 {
2322 uint32_t uPriorSubClass;
2323 PRTLOCKVALRECUNION pDown;
2324 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2325 if (pPriorClass != NIL_RTLOCKVALCLASS)
2326 {
2327 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2328 {
2329 Assert( pClass != pPriorClass
2330 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2331 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2332 if (RT_FAILURE(rc))
2333 {
2334 Assert(rc == VERR_NO_MEMORY);
2335 break;
2336 }
2337 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2338 }
2339 }
2340 pCur = pDown;
2341 }
2342 }
2343
2344 if (RT_SUCCESS(rcLock))
2345 RTCritSectLeave(&g_LockValClassTeachCS);
2346 }
2347
2348 return VINF_SUCCESS;
2349}
2350
2351
2352
2353/**
2354 * Checks the locking order.
2355 *
2356 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2357 * @param pClass The lock class.
2358 * @param uSubClass The lock sub-class.
2359 * @param pThreadSelf The current thread.
2360 * @param pRec The lock record.
2361 * @param pSrcPos The source position of the locking operation.
2362 */
2363static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2364 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2365 PCRTLOCKVALSRCPOS pSrcPos)
2366{
2367 /*
2368 * Some internal paranoia first.
2369 */
2370 AssertPtr(pClass);
2371 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2372 AssertPtr(pThreadSelf);
2373 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2374 AssertPtr(pRec);
2375 AssertPtrNull(pSrcPos);
2376
2377 /*
2378 * Walk the stack, delegate problems to a worker routine.
2379 */
2380 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2381 if (!pCur)
2382 return VINF_SUCCESS;
2383
2384 for (;;)
2385 {
2386 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2387
2388 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2389 pCur = pCur->Nest.pDown;
2390 else
2391 {
2392 uint32_t uPriorSubClass;
2393 PRTLOCKVALRECUNION pDown;
2394 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2395 if (pPriorClass != NIL_RTLOCKVALCLASS)
2396 {
2397 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2398 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2399 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2400 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2401 pPriorClass, pCur, pDown);
2402 }
2403 pCur = pDown;
2404 }
2405 if (!pCur)
2406 return VINF_SUCCESS;
2407 }
2408}
2409
2410
2411/**
2412 * Check that the lock record is the topmost one on the stack, complain and fail
2413 * if it isn't.
2414 *
2415 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2416 * VERR_SEM_LV_INVALID_PARAMETER.
2417 * @param pThreadSelf The current thread.
2418 * @param pRec The record.
2419 */
2420static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2421{
2422 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2423 Assert(pThreadSelf == RTThreadSelf());
2424
2425 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2426 if (RT_LIKELY( pTop == pRec
2427 || ( pTop
2428 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2429 && pTop->Nest.pRec == pRec) ))
2430 return VINF_SUCCESS;
2431
2432#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2433 /* Look for a recursion record so the right frame is dumped and marked. */
2434 while (pTop)
2435 {
2436 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2437 {
2438 if (pTop->Nest.pRec == pRec)
2439 {
2440 pRec = pTop;
2441 break;
2442 }
2443 pTop = pTop->Nest.pDown;
2444 }
2445 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2446 pTop = pTop->Excl.pDown;
2447 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2448 pTop = pTop->ShrdOwner.pDown;
2449 else
2450 break;
2451 }
2452#endif
2453
2454 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2455 rtLockValComplainPanic();
2456 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2457}
2458
2459
2460/**
2461 * Checks if all owners are blocked - shared record operated in signaller mode.
2462 *
2463 * @returns true / false accordingly.
2464 * @param pRec The record.
2465 * @param pThreadSelf The current thread.
2466 */
2467DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2468{
2469 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2470 uint32_t cAllocated = pRec->cAllocated;
2471 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2472 if (cEntries == 0)
2473 return false;
2474
2475 for (uint32_t i = 0; i < cAllocated; i++)
2476 {
2477 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2478 if ( pEntry
2479 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2480 {
2481 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2482 if (!pCurThread)
2483 return false;
2484 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2485 return false;
2486 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2487 && pCurThread != pThreadSelf)
2488 return false;
2489 if (--cEntries == 0)
2490 break;
2491 }
2492 else
2493 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2494 }
2495
2496 return true;
2497}
2498
2499
2500/**
2501 * Verifies the deadlock stack before calling it a deadlock.
2502 *
2503 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2504 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2505 * @retval VERR_TRY_AGAIN if something changed.
2506 *
2507 * @param pStack The deadlock detection stack.
2508 * @param pThreadSelf The current thread.
2509 */
2510static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2511{
2512 uint32_t const c = pStack->c;
2513 for (uint32_t iPass = 0; iPass < 3; iPass++)
2514 {
2515 for (uint32_t i = 1; i < c; i++)
2516 {
2517 PRTTHREADINT pThread = pStack->a[i].pThread;
2518 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2519 return VERR_TRY_AGAIN;
2520 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2521 return VERR_TRY_AGAIN;
2522 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2523 return VERR_TRY_AGAIN;
2524 /* ASSUMES the signaller records won't have siblings! */
2525 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2526 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2527 && pRec->Shared.fSignaller
2528 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2529 return VERR_TRY_AGAIN;
2530 }
2531 RTThreadYield();
2532 }
2533
2534 if (c == 1)
2535 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2536 return VERR_SEM_LV_DEADLOCK;
2537}
2538
2539
2540/**
2541 * Checks for stack cycles caused by another deadlock before returning.
2542 *
2543 * @retval VINF_SUCCESS if the stack is simply too small.
2544 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2545 *
2546 * @param pStack The deadlock detection stack.
2547 */
2548static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2549{
2550 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2551 {
2552 PRTTHREADINT pThread = pStack->a[i].pThread;
2553 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2554 if (pStack->a[j].pThread == pThread)
2555 return VERR_SEM_LV_EXISTING_DEADLOCK;
2556 }
2557 static bool volatile s_fComplained = false;
2558 if (!s_fComplained)
2559 {
2560 s_fComplained = true;
2561 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2562 }
2563 return VINF_SUCCESS;
2564}
2565
2566
2567/**
2568 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2569 * detection.
2570 *
2571 * @retval VINF_SUCCESS
2572 * @retval VERR_SEM_LV_DEADLOCK
2573 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2574 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2575 * @retval VERR_TRY_AGAIN
2576 *
2577 * @param pStack The stack to use.
2578 * @param pOriginalRec The original record.
2579 * @param pThreadSelf The calling thread.
2580 */
2581static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2582 PRTTHREADINT const pThreadSelf)
2583{
2584 pStack->c = 0;
2585
2586 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2587 compiler may make a better job of it when using individual variables. */
2588 PRTLOCKVALRECUNION pRec = pOriginalRec;
2589 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2590 uint32_t iEntry = UINT32_MAX;
2591 PRTTHREADINT pThread = NIL_RTTHREAD;
2592 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2593 for (uint32_t iLoop = 0; ; iLoop++)
2594 {
2595 /*
2596 * Process the current record.
2597 */
2598 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2599
2600 /* Find the next relevant owner thread and record. */
2601 PRTLOCKVALRECUNION pNextRec = NULL;
2602 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2603 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2604 switch (pRec->Core.u32Magic)
2605 {
2606 case RTLOCKVALRECEXCL_MAGIC:
2607 Assert(iEntry == UINT32_MAX);
2608 for (;;)
2609 {
2610 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2611 if ( !pNextThread
2612 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2613 break;
2614 enmNextState = rtThreadGetState(pNextThread);
2615 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2616 && pNextThread != pThreadSelf)
2617 break;
2618 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2619 if (RT_LIKELY( !pNextRec
2620 || enmNextState == rtThreadGetState(pNextThread)))
2621 break;
2622 pNextRec = NULL;
2623 }
2624 if (!pNextRec)
2625 {
2626 pRec = pRec->Excl.pSibling;
2627 if ( pRec
2628 && pRec != pFirstSibling)
2629 continue;
2630 pNextThread = NIL_RTTHREAD;
2631 }
2632 break;
2633
2634 case RTLOCKVALRECSHRD_MAGIC:
2635 if (!pRec->Shared.fSignaller)
2636 {
2637 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2638 /** @todo The read side of a read-write lock is problematic if
2639 * the implementation prioritizes writers over readers because
2640 * that means we should could deadlock against current readers
2641 * if a writer showed up. If the RW sem implementation is
2642 * wrapping some native API, it's not so easy to detect when we
2643 * should do this and when we shouldn't. Checking when we
2644 * shouldn't is subject to wakeup scheduling and cannot easily
2645 * be made reliable.
2646 *
2647 * At the moment we circumvent all this mess by declaring that
2648 * readers has priority. This is TRUE on linux, but probably
2649 * isn't on Solaris and FreeBSD. */
2650 if ( pRec == pFirstSibling
2651 && pRec->Shared.pSibling != NULL
2652 && pRec->Shared.pSibling != pFirstSibling)
2653 {
2654 pRec = pRec->Shared.pSibling;
2655 Assert(iEntry == UINT32_MAX);
2656 continue;
2657 }
2658 }
2659
2660 /* Scan the owner table for blocked owners. */
2661 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2662 && ( !pRec->Shared.fSignaller
2663 || iEntry != UINT32_MAX
2664 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2665 )
2666 )
2667 {
2668 uint32_t cAllocated = pRec->Shared.cAllocated;
2669 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2670 while (++iEntry < cAllocated)
2671 {
2672 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2673 if (pEntry)
2674 {
2675 for (;;)
2676 {
2677 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2678 break;
2679 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2680 if ( !pNextThread
2681 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2682 break;
2683 enmNextState = rtThreadGetState(pNextThread);
2684 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2685 && pNextThread != pThreadSelf)
2686 break;
2687 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2688 if (RT_LIKELY( !pNextRec
2689 || enmNextState == rtThreadGetState(pNextThread)))
2690 break;
2691 pNextRec = NULL;
2692 }
2693 if (pNextRec)
2694 break;
2695 }
2696 else
2697 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2698 }
2699 if (pNextRec)
2700 break;
2701 pNextThread = NIL_RTTHREAD;
2702 }
2703
2704 /* Advance to the next sibling, if any. */
2705 pRec = pRec->Shared.pSibling;
2706 if ( pRec != NULL
2707 && pRec != pFirstSibling)
2708 {
2709 iEntry = UINT32_MAX;
2710 continue;
2711 }
2712 break;
2713
2714 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2715 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2716 break;
2717
2718 case RTLOCKVALRECSHRDOWN_MAGIC:
2719 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2720 default:
2721 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2722 break;
2723 }
2724
2725 if (pNextRec)
2726 {
2727 /*
2728 * Recurse and check for deadlock.
2729 */
2730 uint32_t i = pStack->c;
2731 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2732 return rtLockValidatorDdHandleStackOverflow(pStack);
2733
2734 pStack->c++;
2735 pStack->a[i].pRec = pRec;
2736 pStack->a[i].iEntry = iEntry;
2737 pStack->a[i].enmState = enmState;
2738 pStack->a[i].pThread = pThread;
2739 pStack->a[i].pFirstSibling = pFirstSibling;
2740
2741 if (RT_UNLIKELY( pNextThread == pThreadSelf
2742 && ( i != 0
2743 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2744 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2745 )
2746 )
2747 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2748
2749 pRec = pNextRec;
2750 pFirstSibling = pNextRec;
2751 iEntry = UINT32_MAX;
2752 enmState = enmNextState;
2753 pThread = pNextThread;
2754 }
2755 else
2756 {
2757 /*
2758 * No deadlock here, unwind the stack and deal with any unfinished
2759 * business there.
2760 */
2761 uint32_t i = pStack->c;
2762 for (;;)
2763 {
2764 /* pop */
2765 if (i == 0)
2766 return VINF_SUCCESS;
2767 i--;
2768 pRec = pStack->a[i].pRec;
2769 iEntry = pStack->a[i].iEntry;
2770
2771 /* Examine it. */
2772 uint32_t u32Magic = pRec->Core.u32Magic;
2773 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2774 pRec = pRec->Excl.pSibling;
2775 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2776 {
2777 if (iEntry + 1 < pRec->Shared.cAllocated)
2778 break; /* continue processing this record. */
2779 pRec = pRec->Shared.pSibling;
2780 }
2781 else
2782 {
2783 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2784 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2785 continue;
2786 }
2787
2788 /* Any next record to advance to? */
2789 if ( !pRec
2790 || pRec == pStack->a[i].pFirstSibling)
2791 continue;
2792 iEntry = UINT32_MAX;
2793 break;
2794 }
2795
2796 /* Restore the rest of the state and update the stack. */
2797 pFirstSibling = pStack->a[i].pFirstSibling;
2798 enmState = pStack->a[i].enmState;
2799 pThread = pStack->a[i].pThread;
2800 pStack->c = i;
2801 }
2802
2803 Assert(iLoop != 1000000);
2804 }
2805}
2806
2807
2808/**
2809 * Check for the simple no-deadlock case.
2810 *
2811 * @returns true if no deadlock, false if further investigation is required.
2812 *
2813 * @param pOriginalRec The original record.
2814 */
2815DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2816{
2817 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2818 && !pOriginalRec->Excl.pSibling)
2819 {
2820 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2821 if ( !pThread
2822 || pThread->u32Magic != RTTHREADINT_MAGIC)
2823 return true;
2824 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2825 if (!RTTHREAD_IS_SLEEPING(enmState))
2826 return true;
2827 }
2828 return false;
2829}
2830
2831
2832/**
2833 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2834 *
2835 * @param pStack The chain of locks causing the deadlock.
2836 * @param pRec The record relating to the current thread's lock
2837 * operation.
2838 * @param pThreadSelf This thread.
2839 * @param pSrcPos Where we are going to deadlock.
2840 * @param rc The return code.
2841 */
2842static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2843 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2844{
2845 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2846 {
2847 const char *pszWhat;
2848 switch (rc)
2849 {
2850 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2851 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2852 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2853 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2854 }
2855 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2856 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2857 for (uint32_t i = 0; i < pStack->c; i++)
2858 {
2859 char szPrefix[24];
2860 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2861 PRTLOCKVALRECUNION pShrdOwner = NULL;
2862 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2863 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2864 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2865 {
2866 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2867 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2868 }
2869 else
2870 {
2871 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2872 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2873 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2874 }
2875 }
2876 rtLockValComplainMore("---- end of deadlock chain ----\n");
2877 }
2878
2879 rtLockValComplainPanic();
2880}
2881
2882
2883/**
2884 * Perform deadlock detection.
2885 *
2886 * @retval VINF_SUCCESS
2887 * @retval VERR_SEM_LV_DEADLOCK
2888 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2889 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2890 *
2891 * @param pRec The record relating to the current thread's lock
2892 * operation.
2893 * @param pThreadSelf The current thread.
2894 * @param pSrcPos The position of the current lock operation.
2895 */
2896static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2897{
2898 RTLOCKVALDDSTACK Stack;
2899 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2900 if (RT_SUCCESS(rc))
2901 return VINF_SUCCESS;
2902
2903 if (rc == VERR_TRY_AGAIN)
2904 {
2905 for (uint32_t iLoop = 0; ; iLoop++)
2906 {
2907 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2908 if (RT_SUCCESS_NP(rc))
2909 return VINF_SUCCESS;
2910 if (rc != VERR_TRY_AGAIN)
2911 break;
2912 RTThreadYield();
2913 if (iLoop >= 3)
2914 return VINF_SUCCESS;
2915 }
2916 }
2917
2918 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2919 return rc;
2920}
2921
2922
2923RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2924 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2925{
2926 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2927 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2928 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2929 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2930 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2931
2932 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2933 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2934 pRec->afReserved[0] = 0;
2935 pRec->afReserved[1] = 0;
2936 pRec->afReserved[2] = 0;
2937 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2938 pRec->hThread = NIL_RTTHREAD;
2939 pRec->pDown = NULL;
2940 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2941 pRec->uSubClass = uSubClass;
2942 pRec->cRecursion = 0;
2943 pRec->hLock = hLock;
2944 pRec->pSibling = NULL;
2945 if (pszNameFmt)
2946 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2947 else
2948 {
2949 static uint32_t volatile s_cAnonymous = 0;
2950 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2951 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2952 }
2953
2954 /* Lazy initialization. */
2955 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2956 rtLockValidatorLazyInit();
2957}
2958
2959
2960RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2961 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2962{
2963 va_list va;
2964 va_start(va, pszNameFmt);
2965 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2966 va_end(va);
2967}
2968
2969
2970RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2971 uint32_t uSubClass, void *pvLock, bool fEnabled,
2972 const char *pszNameFmt, va_list va)
2973{
2974 PRTLOCKVALRECEXCL pRec;
2975 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2976 if (!pRec)
2977 return VERR_NO_MEMORY;
2978 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2979 return VINF_SUCCESS;
2980}
2981
2982
2983RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2984 uint32_t uSubClass, void *pvLock, bool fEnabled,
2985 const char *pszNameFmt, ...)
2986{
2987 va_list va;
2988 va_start(va, pszNameFmt);
2989 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2990 va_end(va);
2991 return rc;
2992}
2993
2994
2995RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
2996{
2997 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
2998
2999 rtLockValidatorSerializeDestructEnter();
3000
3001 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3002 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3003 RTLOCKVALCLASS hClass;
3004 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3005 if (pRec->pSibling)
3006 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3007 rtLockValidatorSerializeDestructLeave();
3008 if (hClass != NIL_RTLOCKVALCLASS)
3009 RTLockValidatorClassRelease(hClass);
3010}
3011
3012
3013RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3014{
3015 PRTLOCKVALRECEXCL pRec = *ppRec;
3016 *ppRec = NULL;
3017 if (pRec)
3018 {
3019 RTLockValidatorRecExclDelete(pRec);
3020 RTMemFree(pRec);
3021 }
3022}
3023
3024
3025RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3026{
3027 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3028 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3029 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3030 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3031 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3032 RTLOCKVAL_SUB_CLASS_INVALID);
3033 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3034}
3035
3036
3037RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3038 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3039{
3040 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3041 if (!pRecU)
3042 return;
3043 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3044 if (!pRecU->Excl.fEnabled)
3045 return;
3046 if (hThreadSelf == NIL_RTTHREAD)
3047 {
3048 hThreadSelf = RTThreadSelfAutoAdopt();
3049 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3050 }
3051 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3052 Assert(hThreadSelf == RTThreadSelf());
3053
3054 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3055
3056 if (pRecU->Excl.hThread == hThreadSelf)
3057 {
3058 Assert(!fFirstRecursion);
3059 pRecU->Excl.cRecursion++;
3060 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3061 }
3062 else
3063 {
3064 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3065
3066 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3067 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3068 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3069
3070 rtLockValidatorStackPush(hThreadSelf, pRecU);
3071 }
3072}
3073
3074
3075/**
3076 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3077 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3078 */
3079static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3080{
3081 RTTHREADINT *pThread = pRec->Excl.hThread;
3082 AssertReturnVoid(pThread != NIL_RTTHREAD);
3083 Assert(pThread == RTThreadSelf());
3084
3085 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3086 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3087 if (c == 0)
3088 {
3089 rtLockValidatorStackPop(pThread, pRec);
3090 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3091 }
3092 else
3093 {
3094 Assert(c < UINT32_C(0xffff0000));
3095 Assert(!fFinalRecursion);
3096 rtLockValidatorStackPopRecursion(pThread, pRec);
3097 }
3098}
3099
3100RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3101{
3102 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3103 if (!pRecU)
3104 return VINF_SUCCESS;
3105 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3106 if (!pRecU->Excl.fEnabled)
3107 return VINF_SUCCESS;
3108
3109 /*
3110 * Check the release order.
3111 */
3112 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3113 && pRecU->Excl.hClass->fStrictReleaseOrder
3114 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3115 )
3116 {
3117 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3118 if (RT_FAILURE(rc))
3119 return rc;
3120 }
3121
3122 /*
3123 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3124 */
3125 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3126 return VINF_SUCCESS;
3127}
3128
3129
3130RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3131{
3132 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3133 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3134 if (pRecU->Excl.fEnabled)
3135 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3136}
3137
3138
3139RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3140{
3141 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3142 if (!pRecU)
3143 return VINF_SUCCESS;
3144 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3145 if (!pRecU->Excl.fEnabled)
3146 return VINF_SUCCESS;
3147 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3148 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3149
3150 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3151 && !pRecU->Excl.hClass->fRecursionOk)
3152 {
3153 rtLockValComplainFirst("Recursion not allowed by the class!",
3154 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3155 rtLockValComplainPanic();
3156 return VERR_SEM_LV_NESTED;
3157 }
3158
3159 Assert(pRecU->Excl.cRecursion < _1M);
3160 pRecU->Excl.cRecursion++;
3161 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3162 return VINF_SUCCESS;
3163}
3164
3165
3166RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3167{
3168 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3169 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3170 if (!pRecU->Excl.fEnabled)
3171 return VINF_SUCCESS;
3172 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3173 Assert(pRecU->Excl.hThread == RTThreadSelf());
3174 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3175
3176 /*
3177 * Check the release order.
3178 */
3179 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3180 && pRecU->Excl.hClass->fStrictReleaseOrder
3181 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3182 )
3183 {
3184 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3185 if (RT_FAILURE(rc))
3186 return rc;
3187 }
3188
3189 /*
3190 * Perform the unwind.
3191 */
3192 pRecU->Excl.cRecursion--;
3193 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3194 return VINF_SUCCESS;
3195}
3196
3197
3198RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3199{
3200 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3201 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3202 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3203 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3204 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3205 , VERR_SEM_LV_INVALID_PARAMETER);
3206 if (!pRecU->Excl.fEnabled)
3207 return VINF_SUCCESS;
3208 Assert(pRecU->Excl.hThread == RTThreadSelf());
3209 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3210 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3211
3212 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3213 && !pRecU->Excl.hClass->fRecursionOk)
3214 {
3215 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3216 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3217 rtLockValComplainPanic();
3218 return VERR_SEM_LV_NESTED;
3219 }
3220
3221 Assert(pRecU->Excl.cRecursion < _1M);
3222 pRecU->Excl.cRecursion++;
3223 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3224
3225 return VINF_SUCCESS;
3226}
3227
3228
3229RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3230{
3231 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3232 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3233 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3234 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3235 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3236 , VERR_SEM_LV_INVALID_PARAMETER);
3237 if (!pRecU->Excl.fEnabled)
3238 return VINF_SUCCESS;
3239 Assert(pRecU->Excl.hThread == RTThreadSelf());
3240 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3241 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3242
3243 /*
3244 * Check the release order.
3245 */
3246 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3247 && pRecU->Excl.hClass->fStrictReleaseOrder
3248 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3249 )
3250 {
3251 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3252 if (RT_FAILURE(rc))
3253 return rc;
3254 }
3255
3256 /*
3257 * Perform the unwind.
3258 */
3259 pRecU->Excl.cRecursion--;
3260 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3261 return VINF_SUCCESS;
3262}
3263
3264
3265RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3266 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3267{
3268 /*
3269 * Validate and adjust input. Quit early if order validation is disabled.
3270 */
3271 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3272 if (!pRecU)
3273 return VINF_SUCCESS;
3274 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3275 if ( !pRecU->Excl.fEnabled
3276 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3277 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3278 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3279 return VINF_SUCCESS;
3280
3281 if (hThreadSelf == NIL_RTTHREAD)
3282 {
3283 hThreadSelf = RTThreadSelfAutoAdopt();
3284 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3285 }
3286 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3287 Assert(hThreadSelf == RTThreadSelf());
3288
3289 /*
3290 * Detect recursion as it isn't subject to order restrictions.
3291 */
3292 if (pRec->hThread == hThreadSelf)
3293 return VINF_SUCCESS;
3294
3295 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3296}
3297
3298
3299RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3300 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3301 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3302{
3303 /*
3304 * Fend off wild life.
3305 */
3306 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3307 if (!pRecU)
3308 return VINF_SUCCESS;
3309 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3310 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3311 if (!pRec->fEnabled)
3312 return VINF_SUCCESS;
3313
3314 PRTTHREADINT pThreadSelf = hThreadSelf;
3315 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3316 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3317 Assert(pThreadSelf == RTThreadSelf());
3318
3319 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3320
3321 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3322 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3323 {
3324 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3325 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3326 , VERR_SEM_LV_INVALID_PARAMETER);
3327 enmSleepState = enmThreadState;
3328 }
3329
3330 /*
3331 * Record the location.
3332 */
3333 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3334 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3335 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3336 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3337 rtThreadSetState(pThreadSelf, enmSleepState);
3338
3339 /*
3340 * Don't do deadlock detection if we're recursing.
3341 *
3342 * On some hosts we don't do recursion accounting our selves and there
3343 * isn't any other place to check for this.
3344 */
3345 int rc = VINF_SUCCESS;
3346 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3347 {
3348 if ( !fRecursiveOk
3349 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3350 && !pRecU->Excl.hClass->fRecursionOk))
3351 {
3352 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3353 rtLockValComplainPanic();
3354 rc = VERR_SEM_LV_NESTED;
3355 }
3356 }
3357 /*
3358 * Perform deadlock detection.
3359 */
3360 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3361 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3362 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3363 rc = VINF_SUCCESS;
3364 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3365 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3366
3367 if (RT_SUCCESS(rc))
3368 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3369 else
3370 {
3371 rtThreadSetState(pThreadSelf, enmThreadState);
3372 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3373 }
3374 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3375 return rc;
3376}
3377RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3378
3379
3380RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3381 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3382 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3383{
3384 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3385 if (RT_SUCCESS(rc))
3386 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3387 enmSleepState, fReallySleeping);
3388 return rc;
3389}
3390RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3391
3392
3393RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3394 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3395{
3396 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3397 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3398 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3399 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3400 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3401
3402 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3403 pRec->uSubClass = uSubClass;
3404 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3405 pRec->hLock = hLock;
3406 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3407 pRec->fSignaller = fSignaller;
3408 pRec->pSibling = NULL;
3409
3410 /* the table */
3411 pRec->cEntries = 0;
3412 pRec->iLastEntry = 0;
3413 pRec->cAllocated = 0;
3414 pRec->fReallocating = false;
3415 pRec->fPadding = false;
3416 pRec->papOwners = NULL;
3417
3418 /* the name */
3419 if (pszNameFmt)
3420 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3421 else
3422 {
3423 static uint32_t volatile s_cAnonymous = 0;
3424 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3425 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3426 }
3427}
3428
3429
3430RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3431 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3432{
3433 va_list va;
3434 va_start(va, pszNameFmt);
3435 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3436 va_end(va);
3437}
3438
3439
3440RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3441{
3442 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3443
3444 /*
3445 * Flip it into table realloc mode and take the destruction lock.
3446 */
3447 rtLockValidatorSerializeDestructEnter();
3448 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3449 {
3450 rtLockValidatorSerializeDestructLeave();
3451
3452 rtLockValidatorSerializeDetectionEnter();
3453 rtLockValidatorSerializeDetectionLeave();
3454
3455 rtLockValidatorSerializeDestructEnter();
3456 }
3457
3458 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3459 RTLOCKVALCLASS hClass;
3460 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3461 if (pRec->papOwners)
3462 {
3463 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3464 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3465 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3466
3467 RTMemFree((void *)papOwners);
3468 }
3469 if (pRec->pSibling)
3470 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3471 ASMAtomicWriteBool(&pRec->fReallocating, false);
3472
3473 rtLockValidatorSerializeDestructLeave();
3474
3475 if (hClass != NIL_RTLOCKVALCLASS)
3476 RTLockValidatorClassRelease(hClass);
3477}
3478
3479
3480RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3481{
3482 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3483 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3484 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3485 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3486 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3487 RTLOCKVAL_SUB_CLASS_INVALID);
3488 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3489}
3490
3491
3492/**
3493 * Locates an owner (thread) in a shared lock record.
3494 *
3495 * @returns Pointer to the owner entry on success, NULL on failure..
3496 * @param pShared The shared lock record.
3497 * @param hThread The thread (owner) to find.
3498 * @param piEntry Where to optionally return the table in index.
3499 * Optional.
3500 */
3501DECLINLINE(PRTLOCKVALRECUNION)
3502rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3503{
3504 rtLockValidatorSerializeDetectionEnter();
3505
3506 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3507 if (papOwners)
3508 {
3509 uint32_t const cMax = pShared->cAllocated;
3510 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3511 {
3512 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3513 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3514 {
3515 rtLockValidatorSerializeDetectionLeave();
3516 if (piEntry)
3517 *piEntry = iEntry;
3518 return pEntry;
3519 }
3520 }
3521 }
3522
3523 rtLockValidatorSerializeDetectionLeave();
3524 return NULL;
3525}
3526
3527
3528RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3529 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3530{
3531 /*
3532 * Validate and adjust input. Quit early if order validation is disabled.
3533 */
3534 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3535 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3536 if ( !pRecU->Shared.fEnabled
3537 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3538 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3539 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3540 )
3541 return VINF_SUCCESS;
3542
3543 if (hThreadSelf == NIL_RTTHREAD)
3544 {
3545 hThreadSelf = RTThreadSelfAutoAdopt();
3546 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3547 }
3548 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3549 Assert(hThreadSelf == RTThreadSelf());
3550
3551 /*
3552 * Detect recursion as it isn't subject to order restrictions.
3553 */
3554 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3555 if (pEntry)
3556 return VINF_SUCCESS;
3557
3558 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3559}
3560
3561
3562RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3563 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3564 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3565{
3566 /*
3567 * Fend off wild life.
3568 */
3569 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3570 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3571 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3572 if (!pRecU->Shared.fEnabled)
3573 return VINF_SUCCESS;
3574
3575 PRTTHREADINT pThreadSelf = hThreadSelf;
3576 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3577 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3578 Assert(pThreadSelf == RTThreadSelf());
3579
3580 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3581
3582 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3583 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3584 {
3585 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3586 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3587 , VERR_SEM_LV_INVALID_PARAMETER);
3588 enmSleepState = enmThreadState;
3589 }
3590
3591 /*
3592 * Record the location.
3593 */
3594 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3595 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3596 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3597 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3598 rtThreadSetState(pThreadSelf, enmSleepState);
3599
3600 /*
3601 * Don't do deadlock detection if we're recursing.
3602 */
3603 int rc = VINF_SUCCESS;
3604 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3605 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3606 : NULL;
3607 if (pEntry)
3608 {
3609 if ( !fRecursiveOk
3610 || ( pRec->hClass
3611 && !pRec->hClass->fRecursionOk)
3612 )
3613 {
3614 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3615 rtLockValComplainPanic();
3616 rc = VERR_SEM_LV_NESTED;
3617 }
3618 }
3619 /*
3620 * Perform deadlock detection.
3621 */
3622 else if ( pRec->hClass
3623 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3624 || pRec->hClass->cMsMinDeadlock > cMillies))
3625 rc = VINF_SUCCESS;
3626 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3627 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3628
3629 if (RT_SUCCESS(rc))
3630 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3631 else
3632 {
3633 rtThreadSetState(pThreadSelf, enmThreadState);
3634 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3635 }
3636 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3637 return rc;
3638}
3639RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3640
3641
3642RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3643 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3644 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3645{
3646 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3647 if (RT_SUCCESS(rc))
3648 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3649 enmSleepState, fReallySleeping);
3650 return rc;
3651}
3652RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3653
3654
3655/**
3656 * Allocates and initializes an owner entry for the shared lock record.
3657 *
3658 * @returns The new owner entry.
3659 * @param pRec The shared lock record.
3660 * @param pThreadSelf The calling thread and owner. Used for record
3661 * initialization and allocation.
3662 * @param pSrcPos The source position.
3663 */
3664DECLINLINE(PRTLOCKVALRECUNION)
3665rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3666{
3667 PRTLOCKVALRECUNION pEntry;
3668
3669 /*
3670 * Check if the thread has any statically allocated records we can easily
3671 * make use of.
3672 */
3673 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3674 if ( iEntry > 0
3675 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3676 {
3677 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3678 Assert(!pEntry->ShrdOwner.fReserved);
3679 pEntry->ShrdOwner.fStaticAlloc = true;
3680 rtThreadGet(pThreadSelf);
3681 }
3682 else
3683 {
3684 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3685 if (RT_UNLIKELY(!pEntry))
3686 return NULL;
3687 pEntry->ShrdOwner.fStaticAlloc = false;
3688 }
3689
3690 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3691 pEntry->ShrdOwner.cRecursion = 1;
3692 pEntry->ShrdOwner.fReserved = true;
3693 pEntry->ShrdOwner.hThread = pThreadSelf;
3694 pEntry->ShrdOwner.pDown = NULL;
3695 pEntry->ShrdOwner.pSharedRec = pRec;
3696#if HC_ARCH_BITS == 32
3697 pEntry->ShrdOwner.pvReserved = NULL;
3698#endif
3699 if (pSrcPos)
3700 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3701 else
3702 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3703 return pEntry;
3704}
3705
3706
3707/**
3708 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3709 *
3710 * @param pEntry The owner entry.
3711 */
3712DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3713{
3714 if (pEntry)
3715 {
3716 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3717 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3718
3719 PRTTHREADINT pThread;
3720 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3721
3722 Assert(pEntry->fReserved);
3723 pEntry->fReserved = false;
3724
3725 if (pEntry->fStaticAlloc)
3726 {
3727 AssertPtrReturnVoid(pThread);
3728 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3729
3730 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3731 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3732
3733 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3734 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3735
3736 rtThreadRelease(pThread);
3737 }
3738 else
3739 {
3740 rtLockValidatorSerializeDestructEnter();
3741 rtLockValidatorSerializeDestructLeave();
3742
3743 RTMemFree(pEntry);
3744 }
3745 }
3746}
3747
3748
3749/**
3750 * Make more room in the table.
3751 *
3752 * @retval true on success
3753 * @retval false if we're out of memory or running into a bad race condition
3754 * (probably a bug somewhere). No longer holding the lock.
3755 *
3756 * @param pShared The shared lock record.
3757 */
3758static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3759{
3760 for (unsigned i = 0; i < 1000; i++)
3761 {
3762 /*
3763 * Switch to the other data access direction.
3764 */
3765 rtLockValidatorSerializeDetectionLeave();
3766 if (i >= 10)
3767 {
3768 Assert(i != 10 && i != 100);
3769 RTThreadSleep(i >= 100);
3770 }
3771 rtLockValidatorSerializeDestructEnter();
3772
3773 /*
3774 * Try grab the privilege to reallocating the table.
3775 */
3776 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3777 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3778 {
3779 uint32_t cAllocated = pShared->cAllocated;
3780 if (cAllocated < pShared->cEntries)
3781 {
3782 /*
3783 * Ok, still not enough space. Reallocate the table.
3784 */
3785#if 0 /** @todo enable this after making sure growing works flawlessly. */
3786 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3787#else
3788 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3789#endif
3790 PRTLOCKVALRECSHRDOWN *papOwners;
3791 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3792 (cAllocated + cInc) * sizeof(void *));
3793 if (!papOwners)
3794 {
3795 ASMAtomicWriteBool(&pShared->fReallocating, false);
3796 rtLockValidatorSerializeDestructLeave();
3797 /* RTMemRealloc will assert */
3798 return false;
3799 }
3800
3801 while (cInc-- > 0)
3802 {
3803 papOwners[cAllocated] = NULL;
3804 cAllocated++;
3805 }
3806
3807 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3808 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3809 }
3810 ASMAtomicWriteBool(&pShared->fReallocating, false);
3811 }
3812 rtLockValidatorSerializeDestructLeave();
3813
3814 rtLockValidatorSerializeDetectionEnter();
3815 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3816 break;
3817
3818 if (pShared->cAllocated >= pShared->cEntries)
3819 return true;
3820 }
3821
3822 rtLockValidatorSerializeDetectionLeave();
3823 AssertFailed(); /* too many iterations or destroyed while racing. */
3824 return false;
3825}
3826
3827
3828/**
3829 * Adds an owner entry to a shared lock record.
3830 *
3831 * @returns true on success, false on serious race or we're if out of memory.
3832 * @param pShared The shared lock record.
3833 * @param pEntry The owner entry.
3834 */
3835DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3836{
3837 rtLockValidatorSerializeDetectionEnter();
3838 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3839 {
3840 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3841 && !rtLockValidatorRecSharedMakeRoom(pShared))
3842 return false; /* the worker leave the lock */
3843
3844 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3845 uint32_t const cMax = pShared->cAllocated;
3846 for (unsigned i = 0; i < 100; i++)
3847 {
3848 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3849 {
3850 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3851 {
3852 rtLockValidatorSerializeDetectionLeave();
3853 return true;
3854 }
3855 }
3856 Assert(i != 25);
3857 }
3858 AssertFailed();
3859 }
3860 rtLockValidatorSerializeDetectionLeave();
3861 return false;
3862}
3863
3864
3865/**
3866 * Remove an owner entry from a shared lock record and free it.
3867 *
3868 * @param pShared The shared lock record.
3869 * @param pEntry The owner entry to remove.
3870 * @param iEntry The last known index.
3871 */
3872DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3873 uint32_t iEntry)
3874{
3875 /*
3876 * Remove it from the table.
3877 */
3878 rtLockValidatorSerializeDetectionEnter();
3879 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3880 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3881 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3882 {
3883 /* this shouldn't happen yet... */
3884 AssertFailed();
3885 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3886 uint32_t const cMax = pShared->cAllocated;
3887 for (iEntry = 0; iEntry < cMax; iEntry++)
3888 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3889 break;
3890 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3891 }
3892 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3893 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3894 rtLockValidatorSerializeDetectionLeave();
3895
3896 /*
3897 * Successfully removed, now free it.
3898 */
3899 rtLockValidatorRecSharedFreeOwner(pEntry);
3900}
3901
3902
3903RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3904{
3905 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3906 if (!pRec->fEnabled)
3907 return;
3908 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3909 AssertReturnVoid(pRec->fSignaller);
3910
3911 /*
3912 * Free all current owners.
3913 */
3914 rtLockValidatorSerializeDetectionEnter();
3915 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3916 {
3917 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3918 uint32_t iEntry = 0;
3919 uint32_t cEntries = pRec->cAllocated;
3920 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3921 while (iEntry < cEntries)
3922 {
3923 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3924 if (pEntry)
3925 {
3926 ASMAtomicDecU32(&pRec->cEntries);
3927 rtLockValidatorSerializeDetectionLeave();
3928
3929 rtLockValidatorRecSharedFreeOwner(pEntry);
3930
3931 rtLockValidatorSerializeDetectionEnter();
3932 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3933 break;
3934 cEntries = pRec->cAllocated;
3935 papEntries = pRec->papOwners;
3936 }
3937 iEntry++;
3938 }
3939 }
3940 rtLockValidatorSerializeDetectionLeave();
3941
3942 if (hThread != NIL_RTTHREAD)
3943 {
3944 /*
3945 * Allocate a new owner entry and insert it into the table.
3946 */
3947 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3948 if ( pEntry
3949 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3950 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3951 }
3952}
3953RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
3954
3955
3956RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3957{
3958 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3959 if (!pRec->fEnabled)
3960 return;
3961 if (hThread == NIL_RTTHREAD)
3962 {
3963 hThread = RTThreadSelfAutoAdopt();
3964 AssertReturnVoid(hThread != NIL_RTTHREAD);
3965 }
3966 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
3967
3968 /*
3969 * Recursive?
3970 *
3971 * Note! This code can be optimized to try avoid scanning the table on
3972 * insert. However, that's annoying work that makes the code big,
3973 * so it can wait til later sometime.
3974 */
3975 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
3976 if (pEntry)
3977 {
3978 Assert(!pRec->fSignaller);
3979 pEntry->ShrdOwner.cRecursion++;
3980 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
3981 return;
3982 }
3983
3984 /*
3985 * Allocate a new owner entry and insert it into the table.
3986 */
3987 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3988 if (pEntry)
3989 {
3990 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3991 {
3992 if (!pRec->fSignaller)
3993 rtLockValidatorStackPush(hThread, pEntry);
3994 }
3995 else
3996 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3997 }
3998}
3999RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4000
4001
4002RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4003{
4004 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4005 if (!pRec->fEnabled)
4006 return;
4007 if (hThread == NIL_RTTHREAD)
4008 {
4009 hThread = RTThreadSelfAutoAdopt();
4010 AssertReturnVoid(hThread != NIL_RTTHREAD);
4011 }
4012 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4013
4014 /*
4015 * Find the entry hope it's a recursive one.
4016 */
4017 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4018 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4019 AssertReturnVoid(pEntry);
4020 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4021
4022 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4023 if (c == 0)
4024 {
4025 if (!pRec->fSignaller)
4026 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4027 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4028 }
4029 else
4030 {
4031 Assert(!pRec->fSignaller);
4032 rtLockValidatorStackPopRecursion(hThread, pEntry);
4033 }
4034}
4035RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4036
4037
4038RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4039{
4040 /* Validate and resolve input. */
4041 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4042 if (!pRec->fEnabled)
4043 return false;
4044 if (hThread == NIL_RTTHREAD)
4045 {
4046 hThread = RTThreadSelfAutoAdopt();
4047 AssertReturn(hThread != NIL_RTTHREAD, false);
4048 }
4049 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4050
4051 /* Do the job. */
4052 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4053 return pEntry != NULL;
4054}
4055RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4056
4057
4058RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4059{
4060 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4061 if (!pRec->fEnabled)
4062 return VINF_SUCCESS;
4063 if (hThreadSelf == NIL_RTTHREAD)
4064 {
4065 hThreadSelf = RTThreadSelfAutoAdopt();
4066 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4067 }
4068 Assert(hThreadSelf == RTThreadSelf());
4069 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4070
4071 /*
4072 * Locate the entry for this thread in the table.
4073 */
4074 uint32_t iEntry = 0;
4075 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4076 if (RT_UNLIKELY(!pEntry))
4077 {
4078 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4079 rtLockValComplainPanic();
4080 return VERR_SEM_LV_NOT_OWNER;
4081 }
4082
4083 /*
4084 * Check the release order.
4085 */
4086 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4087 && pRec->hClass->fStrictReleaseOrder
4088 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4089 )
4090 {
4091 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4092 if (RT_FAILURE(rc))
4093 return rc;
4094 }
4095
4096 /*
4097 * Release the ownership or unwind a level of recursion.
4098 */
4099 Assert(pEntry->ShrdOwner.cRecursion > 0);
4100 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4101 if (c == 0)
4102 {
4103 rtLockValidatorStackPop(hThreadSelf, pEntry);
4104 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4105 }
4106 else
4107 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4108
4109 return VINF_SUCCESS;
4110}
4111
4112
4113RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4114{
4115 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4116 if (!pRec->fEnabled)
4117 return VINF_SUCCESS;
4118 if (hThreadSelf == NIL_RTTHREAD)
4119 {
4120 hThreadSelf = RTThreadSelfAutoAdopt();
4121 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4122 }
4123 Assert(hThreadSelf == RTThreadSelf());
4124 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4125
4126 /*
4127 * Locate the entry for this thread in the table.
4128 */
4129 uint32_t iEntry = 0;
4130 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4131 if (RT_UNLIKELY(!pEntry))
4132 {
4133 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4134 rtLockValComplainPanic();
4135 return VERR_SEM_LV_NOT_SIGNALLER;
4136 }
4137 return VINF_SUCCESS;
4138}
4139
4140
4141RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4142{
4143 if (Thread == NIL_RTTHREAD)
4144 return 0;
4145
4146 PRTTHREADINT pThread = rtThreadGet(Thread);
4147 if (!pThread)
4148 return VERR_INVALID_HANDLE;
4149 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4150 rtThreadRelease(pThread);
4151 return cWriteLocks;
4152}
4153RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4154
4155
4156RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4157{
4158 PRTTHREADINT pThread = rtThreadGet(Thread);
4159 AssertReturnVoid(pThread);
4160 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4161 rtThreadRelease(pThread);
4162}
4163RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4164
4165
4166RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4167{
4168 PRTTHREADINT pThread = rtThreadGet(Thread);
4169 AssertReturnVoid(pThread);
4170 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4171 rtThreadRelease(pThread);
4172}
4173RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4174
4175
4176RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4177{
4178 if (Thread == NIL_RTTHREAD)
4179 return 0;
4180
4181 PRTTHREADINT pThread = rtThreadGet(Thread);
4182 if (!pThread)
4183 return VERR_INVALID_HANDLE;
4184 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4185 rtThreadRelease(pThread);
4186 return cReadLocks;
4187}
4188RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4189
4190
4191RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4192{
4193 PRTTHREADINT pThread = rtThreadGet(Thread);
4194 Assert(pThread);
4195 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4196 rtThreadRelease(pThread);
4197}
4198RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4199
4200
4201RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4202{
4203 PRTTHREADINT pThread = rtThreadGet(Thread);
4204 Assert(pThread);
4205 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4206 rtThreadRelease(pThread);
4207}
4208RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4209
4210
4211RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4212{
4213 void *pvLock = NULL;
4214 PRTTHREADINT pThread = rtThreadGet(hThread);
4215 if (pThread)
4216 {
4217 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4218 if (RTTHREAD_IS_SLEEPING(enmState))
4219 {
4220 rtLockValidatorSerializeDetectionEnter();
4221
4222 enmState = rtThreadGetState(pThread);
4223 if (RTTHREAD_IS_SLEEPING(enmState))
4224 {
4225 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4226 if (pRec)
4227 {
4228 switch (pRec->Core.u32Magic)
4229 {
4230 case RTLOCKVALRECEXCL_MAGIC:
4231 pvLock = pRec->Excl.hLock;
4232 break;
4233
4234 case RTLOCKVALRECSHRDOWN_MAGIC:
4235 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4236 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4237 break;
4238 case RTLOCKVALRECSHRD_MAGIC:
4239 pvLock = pRec->Shared.hLock;
4240 break;
4241 }
4242 if (RTThreadGetState(pThread) != enmState)
4243 pvLock = NULL;
4244 }
4245 }
4246
4247 rtLockValidatorSerializeDetectionLeave();
4248 }
4249 rtThreadRelease(pThread);
4250 }
4251 return pvLock;
4252}
4253RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4254
4255
4256RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4257{
4258 bool fRet = false;
4259 PRTTHREADINT pThread = rtThreadGet(hThread);
4260 if (pThread)
4261 {
4262 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4263 rtThreadRelease(pThread);
4264 }
4265 return fRet;
4266}
4267RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4268
4269
4270RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4271{
4272 bool fRet = false;
4273 if (hCurrentThread == NIL_RTTHREAD)
4274 hCurrentThread = RTThreadSelf();
4275 else
4276 Assert(hCurrentThread == RTThreadSelf());
4277 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4278 if (pThread)
4279 {
4280 if (hClass != NIL_RTLOCKVALCLASS)
4281 {
4282 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4283 while (VALID_PTR(pCur) && !fRet)
4284 {
4285 switch (pCur->Core.u32Magic)
4286 {
4287 case RTLOCKVALRECEXCL_MAGIC:
4288 fRet = pCur->Excl.hClass == hClass;
4289 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4290 break;
4291 case RTLOCKVALRECSHRDOWN_MAGIC:
4292 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4293 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4294 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4295 break;
4296 case RTLOCKVALRECNEST_MAGIC:
4297 switch (pCur->Nest.pRec->Core.u32Magic)
4298 {
4299 case RTLOCKVALRECEXCL_MAGIC:
4300 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4301 break;
4302 case RTLOCKVALRECSHRDOWN_MAGIC:
4303 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4304 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4305 break;
4306 }
4307 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4308 break;
4309 default:
4310 pCur = NULL;
4311 break;
4312 }
4313 }
4314 }
4315
4316 rtThreadRelease(pThread);
4317 }
4318 return fRet;
4319}
4320RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4321
4322
4323RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4324{
4325 bool fRet = false;
4326 if (hCurrentThread == NIL_RTTHREAD)
4327 hCurrentThread = RTThreadSelf();
4328 else
4329 Assert(hCurrentThread == RTThreadSelf());
4330 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4331 if (pThread)
4332 {
4333 if (hClass != NIL_RTLOCKVALCLASS)
4334 {
4335 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4336 while (VALID_PTR(pCur) && !fRet)
4337 {
4338 switch (pCur->Core.u32Magic)
4339 {
4340 case RTLOCKVALRECEXCL_MAGIC:
4341 fRet = pCur->Excl.hClass == hClass
4342 && pCur->Excl.uSubClass == uSubClass;
4343 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4344 break;
4345 case RTLOCKVALRECSHRDOWN_MAGIC:
4346 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4347 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4348 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4349 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4350 break;
4351 case RTLOCKVALRECNEST_MAGIC:
4352 switch (pCur->Nest.pRec->Core.u32Magic)
4353 {
4354 case RTLOCKVALRECEXCL_MAGIC:
4355 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4356 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4357 break;
4358 case RTLOCKVALRECSHRDOWN_MAGIC:
4359 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4360 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4361 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4362 break;
4363 }
4364 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4365 break;
4366 default:
4367 pCur = NULL;
4368 break;
4369 }
4370 }
4371 }
4372
4373 rtThreadRelease(pThread);
4374 }
4375 return fRet;
4376}
4377RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4378
4379
4380RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4381{
4382 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4383}
4384RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4385
4386
4387RTDECL(bool) RTLockValidatorIsEnabled(void)
4388{
4389 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4390}
4391RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4392
4393
4394RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4395{
4396 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4397}
4398RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4399
4400
4401RTDECL(bool) RTLockValidatorIsQuiet(void)
4402{
4403 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4404}
4405RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4406
4407
4408RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4409{
4410 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4411}
4412RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4413
4414
4415RTDECL(bool) RTLockValidatorMayPanic(void)
4416{
4417 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4418}
4419RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4420
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette