VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 45221

Last change on this file since 45221 was 45110, checked in by vboxsync, 12 years ago

Raw conversion of semrw-lockless-generic.cpp into RTCritSectEx.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 158.8 KB
Line 
1/* $Id: lockvalidator.cpp 45110 2013-03-20 18:17:29Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/*******************************************************************************
28* Header Files *
29*******************************************************************************/
30#include <iprt/lockvalidator.h>
31#include "internal/iprt.h"
32
33#include <iprt/asm.h>
34#include <iprt/assert.h>
35#include <iprt/env.h>
36#include <iprt/err.h>
37#include <iprt/mem.h>
38#include <iprt/once.h>
39#include <iprt/semaphore.h>
40#include <iprt/string.h>
41#include <iprt/thread.h>
42
43#include "internal/lockvalidator.h"
44#include "internal/magics.h"
45#include "internal/strhash.h"
46#include "internal/thread.h"
47
48
49/*******************************************************************************
50* Defined Constants And Macros *
51*******************************************************************************/
52/** Macro that asserts that a pointer is aligned correctly.
53 * Only used when fighting bugs. */
54#if 1
55# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
56 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
57#else
58# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
59#endif
60
61/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
62#define RTLOCKVALCLASS_HASH(hClass) \
63 ( ((uintptr_t)(hClass) >> 6 ) \
64 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
65 / sizeof(PRTLOCKVALCLASSREF)) )
66
67/** The max value for RTLOCKVALCLASSINT::cRefs. */
68#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
69/** The max value for RTLOCKVALCLASSREF::cLookups. */
70#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
71/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
72 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
73#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
74
75
76/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
77 * Enable recursion records. */
78#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
79# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
80#endif
81
82/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
83 * Enables some extra verbosity in the lock dumping. */
84#if defined(DOXYGEN_RUNNING)
85# define RTLOCKVAL_WITH_VERBOSE_DUMPS
86#endif
87
88/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
89 * Enables collection prior class hash lookup statistics, dumping them when
90 * complaining about the class. */
91#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
92# define RTLOCKVAL_WITH_CLASS_HASH_STATS
93#endif
94
95
96/*******************************************************************************
97* Structures and Typedefs *
98*******************************************************************************/
99/**
100 * Deadlock detection stack entry.
101 */
102typedef struct RTLOCKVALDDENTRY
103{
104 /** The current record. */
105 PRTLOCKVALRECUNION pRec;
106 /** The current entry number if pRec is a shared one. */
107 uint32_t iEntry;
108 /** The thread state of the thread we followed to get to pFirstSibling.
109 * This is only used for validating a deadlock stack. */
110 RTTHREADSTATE enmState;
111 /** The thread we followed to get to pFirstSibling.
112 * This is only used for validating a deadlock stack. */
113 PRTTHREADINT pThread;
114 /** What pThread is waiting on, i.e. where we entered the circular list of
115 * siblings. This is used for validating a deadlock stack as well as
116 * terminating the sibling walk. */
117 PRTLOCKVALRECUNION pFirstSibling;
118} RTLOCKVALDDENTRY;
119
120
121/**
122 * Deadlock detection stack.
123 */
124typedef struct RTLOCKVALDDSTACK
125{
126 /** The number stack entries. */
127 uint32_t c;
128 /** The stack entries. */
129 RTLOCKVALDDENTRY a[32];
130} RTLOCKVALDDSTACK;
131/** Pointer to a deadlock detection stack. */
132typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
133
134
135/**
136 * Reference to another class.
137 */
138typedef struct RTLOCKVALCLASSREF
139{
140 /** The class. */
141 RTLOCKVALCLASS hClass;
142 /** The number of lookups of this class. */
143 uint32_t volatile cLookups;
144 /** Indicates whether the entry was added automatically during order checking
145 * (true) or manually via the API (false). */
146 bool fAutodidacticism;
147 /** Reserved / explicit alignment padding. */
148 bool afReserved[3];
149} RTLOCKVALCLASSREF;
150/** Pointer to a class reference. */
151typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
152
153
154/** Pointer to a chunk of class references. */
155typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
156/**
157 * Chunk of class references.
158 */
159typedef struct RTLOCKVALCLASSREFCHUNK
160{
161 /** Array of refs. */
162#if 0 /** @todo for testing allocation of new chunks. */
163 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
164#else
165 RTLOCKVALCLASSREF aRefs[2];
166#endif
167 /** Pointer to the next chunk. */
168 PRTLOCKVALCLASSREFCHUNK volatile pNext;
169} RTLOCKVALCLASSREFCHUNK;
170
171
172/**
173 * Lock class.
174 */
175typedef struct RTLOCKVALCLASSINT
176{
177 /** AVL node core. */
178 AVLLU32NODECORE Core;
179 /** Magic value (RTLOCKVALCLASS_MAGIC). */
180 uint32_t volatile u32Magic;
181 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
182 uint32_t volatile cRefs;
183 /** Whether the class is allowed to teach it self new locking order rules. */
184 bool fAutodidact;
185 /** Whether to allow recursion. */
186 bool fRecursionOk;
187 /** Strict release order. */
188 bool fStrictReleaseOrder;
189 /** Whether this class is in the tree. */
190 bool fInTree;
191 /** Donate a reference to the next retainer. This is a hack to make
192 * RTLockValidatorClassCreateUnique work. */
193 bool volatile fDonateRefToNextRetainer;
194 /** Reserved future use / explicit alignment. */
195 bool afReserved[3];
196 /** The minimum wait interval for which we do deadlock detection
197 * (milliseconds). */
198 RTMSINTERVAL cMsMinDeadlock;
199 /** The minimum wait interval for which we do order checks (milliseconds). */
200 RTMSINTERVAL cMsMinOrder;
201 /** More padding. */
202 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
203 /** Classes that may be taken prior to this one.
204 * This is a linked list where each node contains a chunk of locks so that we
205 * reduce the number of allocations as well as localize the data. */
206 RTLOCKVALCLASSREFCHUNK PriorLocks;
207 /** Hash table containing frequently encountered prior locks. */
208 PRTLOCKVALCLASSREF apPriorLocksHash[17];
209 /** Class name. (Allocated after the end of the block as usual.) */
210 char const *pszName;
211 /** Where this class was created.
212 * This is mainly used for finding automatically created lock classes.
213 * @remarks The strings are stored after this structure so we won't crash
214 * if the class lives longer than the module (dll/so/dylib) that
215 * spawned it. */
216 RTLOCKVALSRCPOS CreatePos;
217#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
218 /** Hash hits. */
219 uint32_t volatile cHashHits;
220 /** Hash misses. */
221 uint32_t volatile cHashMisses;
222#endif
223} RTLOCKVALCLASSINT;
224AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
225AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
226
227
228/*******************************************************************************
229* Global Variables *
230*******************************************************************************/
231/** Serializing object destruction and deadlock detection.
232 *
233 * This makes sure that none of the memory examined by the deadlock detection
234 * code will become invalid (reused for other purposes or made not present)
235 * while the detection is in progress.
236 *
237 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
238 * EW: Deadlock detection and some related activities.
239 */
240static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
241/** Serializing class tree insert and lookups. */
242static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
243/** Class tree. */
244static PAVLLU32NODECORE g_LockValClassTree = NULL;
245/** Critical section serializing the teaching new rules to the classes. */
246static RTCRITSECT g_LockValClassTeachCS;
247
248/** Whether the lock validator is enabled or disabled.
249 * Only applies to new locks. */
250static bool volatile g_fLockValidatorEnabled = true;
251/** Set if the lock validator is quiet. */
252#ifdef RT_STRICT
253static bool volatile g_fLockValidatorQuiet = false;
254#else
255static bool volatile g_fLockValidatorQuiet = true;
256#endif
257/** Set if the lock validator may panic. */
258#ifdef RT_STRICT
259static bool volatile g_fLockValidatorMayPanic = true;
260#else
261static bool volatile g_fLockValidatorMayPanic = false;
262#endif
263/** Whether to return an error status on wrong locking order. */
264static bool volatile g_fLockValSoftWrongOrder = false;
265
266
267/*******************************************************************************
268* Internal Functions *
269*******************************************************************************/
270static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
271static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
272
273
274/**
275 * Lazy initialization of the lock validator globals.
276 */
277static void rtLockValidatorLazyInit(void)
278{
279 static uint32_t volatile s_fInitializing = false;
280 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
281 {
282 /*
283 * The locks.
284 */
285 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
286 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
287 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
288
289 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
290 {
291 RTSEMRW hSemRW;
292 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
293 if (RT_SUCCESS(rc))
294 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
295 }
296
297 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
298 {
299 RTSEMXROADS hXRoads;
300 int rc = RTSemXRoadsCreate(&hXRoads);
301 if (RT_SUCCESS(rc))
302 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
303 }
304
305#ifdef IN_RING3
306 /*
307 * Check the environment for our config variables.
308 */
309 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
310 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
311 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
312 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
313
314 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
315 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
316 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
317 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
318
319 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
320 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
321 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
322 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
323
324 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
325 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
326 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
327 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
328#endif
329
330 /*
331 * Register cleanup
332 */
333 /** @todo register some cleanup callback if we care. */
334
335 ASMAtomicWriteU32(&s_fInitializing, false);
336 }
337}
338
339
340
341/** Wrapper around ASMAtomicReadPtr. */
342DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
343{
344 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
345 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
346 return p;
347}
348
349
350/** Wrapper around ASMAtomicWritePtr. */
351DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
352{
353 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
354 ASMAtomicWritePtr(ppRec, pRecNew);
355}
356
357
358/** Wrapper around ASMAtomicReadPtr. */
359DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
360{
361 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
362 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
363 return p;
364}
365
366
367/** Wrapper around ASMAtomicUoReadPtr. */
368DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
369{
370 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
371 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
372 return p;
373}
374
375
376/**
377 * Reads a volatile thread handle field and returns the thread name.
378 *
379 * @returns Thread name (read only).
380 * @param phThread The thread handle field.
381 */
382static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
383{
384 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
385 if (!pThread)
386 return "<NIL>";
387 if (!VALID_PTR(pThread))
388 return "<INVALID>";
389 if (pThread->u32Magic != RTTHREADINT_MAGIC)
390 return "<BAD-THREAD-MAGIC>";
391 return pThread->szName;
392}
393
394
395/**
396 * Launch a simple assertion like complaint w/ panic.
397 *
398 * @param pszFile Where from - file.
399 * @param iLine Where from - line.
400 * @param pszFunction Where from - function.
401 * @param pszWhat What we're complaining about.
402 * @param ... Format arguments.
403 */
404static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
405{
406 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
407 {
408 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
409 va_list va;
410 va_start(va, pszWhat);
411 RTAssertMsg2WeakV(pszWhat, va);
412 va_end(va);
413 }
414 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
415 RTAssertPanic();
416}
417
418
419/**
420 * Describes the class.
421 *
422 * @param pszPrefix Message prefix.
423 * @param pClass The class to complain about.
424 * @param uSubClass My sub-class.
425 * @param fVerbose Verbose description including relations to other
426 * classes.
427 */
428static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
429{
430 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
431 return;
432
433 /* Stringify the sub-class. */
434 const char *pszSubClass;
435 char szSubClass[32];
436 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
437 switch (uSubClass)
438 {
439 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
440 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
441 default:
442 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
443 pszSubClass = szSubClass;
444 break;
445 }
446 else
447 {
448 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
449 pszSubClass = szSubClass;
450 }
451
452 /* Validate the class pointer. */
453 if (!VALID_PTR(pClass))
454 {
455 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
456 return;
457 }
458 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
459 {
460 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
461 return;
462 }
463
464 /* OK, dump the class info. */
465 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
466 pClass,
467 pClass->pszName,
468 pClass->CreatePos.pszFile,
469 pClass->CreatePos.uLine,
470 pClass->CreatePos.pszFunction,
471 pClass->CreatePos.uId,
472 pszSubClass);
473 if (fVerbose)
474 {
475 uint32_t i = 0;
476 uint32_t cPrinted = 0;
477 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
478 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
479 {
480 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
481 if (pCurClass != NIL_RTLOCKVALCLASS)
482 {
483 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
484 cPrinted == 0
485 ? "Prior:"
486 : " ",
487 i,
488 pCurClass->pszName,
489 pChunk->aRefs[j].fAutodidacticism
490 ? "autodidactic"
491 : "manually ",
492 pChunk->aRefs[j].cLookups,
493 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
494 cPrinted++;
495 }
496 }
497 if (!cPrinted)
498 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
499#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
500 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
501#endif
502 }
503 else
504 {
505 uint32_t cPrinted = 0;
506 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
507 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
508 {
509 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
510 if (pCurClass != NIL_RTLOCKVALCLASS)
511 {
512 if ((cPrinted % 10) == 0)
513 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
514 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
515 else if ((cPrinted % 10) != 9)
516 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
517 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
518 else
519 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
520 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
521 cPrinted++;
522 }
523 }
524 if (!cPrinted)
525 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
526 else if ((cPrinted % 10) != 0)
527 RTAssertMsg2AddWeak("\n");
528 }
529}
530
531
532/**
533 * Helper for getting the class name.
534 * @returns Class name string.
535 * @param pClass The class.
536 */
537static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
538{
539 if (!pClass)
540 return "<nil-class>";
541 if (!VALID_PTR(pClass))
542 return "<bad-class-ptr>";
543 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
544 return "<bad-class-magic>";
545 if (!pClass->pszName)
546 return "<no-class-name>";
547 return pClass->pszName;
548}
549
550/**
551 * Formats the sub-class.
552 *
553 * @returns Stringified sub-class.
554 * @param uSubClass The name.
555 * @param pszBuf Buffer that is big enough.
556 */
557static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
558{
559 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
560 switch (uSubClass)
561 {
562 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
563 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
564 default:
565 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
566 break;
567 }
568 else
569 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
570 return pszBuf;
571}
572
573
574/**
575 * Helper for rtLockValComplainAboutLock.
576 */
577DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
578 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
579 const char *pszFrameType)
580{
581 char szBuf[32];
582 switch (u32Magic)
583 {
584 case RTLOCKVALRECEXCL_MAGIC:
585#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
586 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
587 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
588 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
589 rtLockValComplainGetClassName(pRec->Excl.hClass),
590 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
591 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
592 pszFrameType, pszSuffix);
593#else
594 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
595 pRec->Excl.hLock, pRec->Excl.szName,
596 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
597 rtLockValComplainGetClassName(pRec->Excl.hClass),
598 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
599 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
600 pszFrameType, pszSuffix);
601#endif
602 break;
603
604 case RTLOCKVALRECSHRD_MAGIC:
605 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
606 pRec->Shared.hLock, pRec->Shared.szName, pRec,
607 rtLockValComplainGetClassName(pRec->Shared.hClass),
608 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
609 pszFrameType, pszSuffix);
610 break;
611
612 case RTLOCKVALRECSHRDOWN_MAGIC:
613 {
614 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
615 if ( VALID_PTR(pShared)
616 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
617#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
618 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
619 pShared->hLock, pShared->pszName, pShared,
620 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
621 rtLockValComplainGetClassName(pShared->hClass),
622 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
623 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
624 pszSuffix2, pszSuffix);
625#else
626 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
627 pShared->hLock, pShared->szName,
628 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
629 rtLockValComplainGetClassName(pShared->hClass),
630 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
631 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
632 pszFrameType, pszSuffix);
633#endif
634 else
635 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
636 pShared,
637 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
638 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
639 pszFrameType, pszSuffix);
640 break;
641 }
642
643 default:
644 AssertMsgFailed(("%#x\n", u32Magic));
645 }
646}
647
648
649/**
650 * Describes the lock.
651 *
652 * @param pszPrefix Message prefix.
653 * @param pRec The lock record we're working on.
654 * @param pszSuffix Message suffix.
655 */
656static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
657{
658#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
659# define FIX_REC(r) 1
660#else
661# define FIX_REC(r) (r)
662#endif
663 if ( VALID_PTR(pRec)
664 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
665 {
666 switch (pRec->Core.u32Magic)
667 {
668 case RTLOCKVALRECEXCL_MAGIC:
669 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
670 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
671 break;
672
673 case RTLOCKVALRECSHRD_MAGIC:
674 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
675 break;
676
677 case RTLOCKVALRECSHRDOWN_MAGIC:
678 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
679 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
680 break;
681
682 case RTLOCKVALRECNEST_MAGIC:
683 {
684 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
685 uint32_t u32Magic;
686 if ( VALID_PTR(pRealRec)
687 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
688 || u32Magic == RTLOCKVALRECSHRD_MAGIC
689 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
690 )
691 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
692 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
693 else
694 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
695 pRealRec, pRec, pRec->Nest.cRecursion,
696 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
697 pszSuffix);
698 break;
699 }
700
701 default:
702 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
703 break;
704 }
705 }
706#undef FIX_REC
707}
708
709
710/**
711 * Dump the lock stack.
712 *
713 * @param pThread The thread which lock stack we're gonna dump.
714 * @param cchIndent The indentation in chars.
715 * @param cMinFrames The minimum number of frames to consider
716 * dumping.
717 * @param pHighightRec Record that should be marked specially in the
718 * dump.
719 */
720static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
721 PRTLOCKVALRECUNION pHighightRec)
722{
723 if ( VALID_PTR(pThread)
724 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
725 && pThread->u32Magic == RTTHREADINT_MAGIC
726 )
727 {
728 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
729 if (cEntries >= cMinFrames)
730 {
731 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
732 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
733 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
734 for (uint32_t i = 0; VALID_PTR(pCur); i++)
735 {
736 char szPrefix[80];
737 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
738 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
739 switch (pCur->Core.u32Magic)
740 {
741 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
742 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
743 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
744 default:
745 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
746 pCur = NULL;
747 break;
748 }
749 }
750 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
751 }
752 }
753}
754
755
756/**
757 * Launch the initial complaint.
758 *
759 * @param pszWhat What we're complaining about.
760 * @param pSrcPos Where we are complaining from, as it were.
761 * @param pThreadSelf The calling thread.
762 * @param pRec The main lock involved. Can be NULL.
763 * @param fDumpStack Whether to dump the lock stack (true) or not
764 * (false).
765 */
766static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
767 PRTLOCKVALRECUNION pRec, bool fDumpStack)
768{
769 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
770 {
771 ASMCompilerBarrier(); /* paranoia */
772 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
773 if (pSrcPos && pSrcPos->uId)
774 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
775 else
776 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
777 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
778 if (fDumpStack)
779 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
780 }
781}
782
783
784/**
785 * Continue bitching.
786 *
787 * @param pszFormat Format string.
788 * @param ... Format arguments.
789 */
790static void rtLockValComplainMore(const char *pszFormat, ...)
791{
792 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
793 {
794 va_list va;
795 va_start(va, pszFormat);
796 RTAssertMsg2AddWeakV(pszFormat, va);
797 va_end(va);
798 }
799}
800
801
802/**
803 * Raise a panic if enabled.
804 */
805static void rtLockValComplainPanic(void)
806{
807 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
808 RTAssertPanic();
809}
810
811
812/**
813 * Copy a source position record.
814 *
815 * @param pDst The destination.
816 * @param pSrc The source. Can be NULL.
817 */
818DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
819{
820 if (pSrc)
821 {
822 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
823 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
824 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
825 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
826 }
827 else
828 {
829 ASMAtomicUoWriteU32(&pDst->uLine, 0);
830 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
831 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
832 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
833 }
834}
835
836
837/**
838 * Init a source position record.
839 *
840 * @param pSrcPos The source position record.
841 */
842DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
843{
844 pSrcPos->pszFile = NULL;
845 pSrcPos->pszFunction = NULL;
846 pSrcPos->uId = 0;
847 pSrcPos->uLine = 0;
848#if HC_ARCH_BITS == 64
849 pSrcPos->u32Padding = 0;
850#endif
851}
852
853
854/**
855 * Hashes the specified source position.
856 *
857 * @returns Hash.
858 * @param pSrcPos The source position record.
859 */
860static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
861{
862 uint32_t uHash;
863 if ( ( pSrcPos->pszFile
864 || pSrcPos->pszFunction)
865 && pSrcPos->uLine != 0)
866 {
867 uHash = 0;
868 if (pSrcPos->pszFile)
869 uHash = sdbmInc(pSrcPos->pszFile, uHash);
870 if (pSrcPos->pszFunction)
871 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
872 uHash += pSrcPos->uLine;
873 }
874 else
875 {
876 Assert(pSrcPos->uId);
877 uHash = (uint32_t)pSrcPos->uId;
878 }
879
880 return uHash;
881}
882
883
884/**
885 * Compares two source positions.
886 *
887 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
888 * otherwise.
889 * @param pSrcPos1 The first source position.
890 * @param pSrcPos2 The second source position.
891 */
892static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
893{
894 if (pSrcPos1->uLine != pSrcPos2->uLine)
895 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
896
897 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
898 if (iDiff != 0)
899 return iDiff;
900
901 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
902 if (iDiff != 0)
903 return iDiff;
904
905 if (pSrcPos1->uId != pSrcPos2->uId)
906 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
907 return 0;
908}
909
910
911
912/**
913 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
914 */
915DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
916{
917 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
918 if (hXRoads != NIL_RTSEMXROADS)
919 RTSemXRoadsNSEnter(hXRoads);
920}
921
922
923/**
924 * Call after rtLockValidatorSerializeDestructEnter.
925 */
926DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
927{
928 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
929 if (hXRoads != NIL_RTSEMXROADS)
930 RTSemXRoadsNSLeave(hXRoads);
931}
932
933
934/**
935 * Serializes deadlock detection against destruction of the objects being
936 * inspected.
937 */
938DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
939{
940 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
941 if (hXRoads != NIL_RTSEMXROADS)
942 RTSemXRoadsEWEnter(hXRoads);
943}
944
945
946/**
947 * Call after rtLockValidatorSerializeDetectionEnter.
948 */
949DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
950{
951 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
952 if (hXRoads != NIL_RTSEMXROADS)
953 RTSemXRoadsEWLeave(hXRoads);
954}
955
956
957/**
958 * Initializes the per thread lock validator data.
959 *
960 * @param pPerThread The data.
961 */
962DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
963{
964 pPerThread->bmFreeShrdOwners = UINT32_MAX;
965
966 /* ASSUMES the rest has already been zeroed. */
967 Assert(pPerThread->pRec == NULL);
968 Assert(pPerThread->cWriteLocks == 0);
969 Assert(pPerThread->cReadLocks == 0);
970 Assert(pPerThread->fInValidator == false);
971 Assert(pPerThread->pStackTop == NULL);
972}
973
974
975/**
976 * Delete the per thread lock validator data.
977 *
978 * @param pPerThread The data.
979 */
980DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
981{
982 /*
983 * Check that the thread doesn't own any locks at this time.
984 */
985 if (pPerThread->pStackTop)
986 {
987 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
988 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
989 pPerThread->pStackTop, true);
990 rtLockValComplainPanic();
991 }
992
993 /*
994 * Free the recursion records.
995 */
996 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
997 pPerThread->pFreeNestRecs = NULL;
998 while (pCur)
999 {
1000 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1001 RTMemFree(pCur);
1002 pCur = pNext;
1003 }
1004}
1005
1006RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1007 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1008 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1009 const char *pszNameFmt, ...)
1010{
1011 va_list va;
1012 va_start(va, pszNameFmt);
1013 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1014 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1015 va_end(va);
1016 return rc;
1017}
1018
1019
1020RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1021 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1022 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1023 const char *pszNameFmt, va_list va)
1024{
1025 Assert(cMsMinDeadlock >= 1);
1026 Assert(cMsMinOrder >= 1);
1027 AssertPtr(pSrcPos);
1028
1029 /*
1030 * Format the name and calc its length.
1031 */
1032 size_t cbName;
1033 char szName[32];
1034 if (pszNameFmt && *pszNameFmt)
1035 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1036 else
1037 {
1038 static uint32_t volatile s_cAnonymous = 0;
1039 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1040 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1041 }
1042
1043 /*
1044 * Figure out the file and function name lengths and allocate memory for
1045 * it all.
1046 */
1047 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1048 size_t const cbFunction = pSrcPos->pszFile ? strlen(pSrcPos->pszFunction) + 1 : 0;
1049 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVar(sizeof(*pThis) + cbFile + cbFunction + cbName);
1050 if (!pThis)
1051 return VERR_NO_MEMORY;
1052
1053 /*
1054 * Initialize the class data.
1055 */
1056 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1057 pThis->Core.uchHeight = 0;
1058 pThis->Core.pLeft = NULL;
1059 pThis->Core.pRight = NULL;
1060 pThis->Core.pList = NULL;
1061 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1062 pThis->cRefs = 1;
1063 pThis->fAutodidact = fAutodidact;
1064 pThis->fRecursionOk = fRecursionOk;
1065 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1066 pThis->fInTree = false;
1067 pThis->fDonateRefToNextRetainer = false;
1068 pThis->afReserved[0] = false;
1069 pThis->afReserved[1] = false;
1070 pThis->afReserved[2] = false;
1071 pThis->cMsMinDeadlock = cMsMinDeadlock;
1072 pThis->cMsMinOrder = cMsMinOrder;
1073 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1074 pThis->au32Reserved[i] = 0;
1075 for (unsigned i = 0; i < RT_ELEMENTS(pThis->PriorLocks.aRefs); i++)
1076 {
1077 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1078 pThis->PriorLocks.aRefs[i].cLookups = 0;
1079 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1080 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1081 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1082 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1083 }
1084 pThis->PriorLocks.pNext = NULL;
1085 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1086 pThis->apPriorLocksHash[i] = NULL;
1087 char *pszDst = (char *)(pThis + 1);
1088 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1089 pszDst += cbName;
1090 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1091 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1092 pszDst += cbFile;
1093 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1094 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1095#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1096 pThis->cHashHits = 0;
1097 pThis->cHashMisses = 0;
1098#endif
1099
1100 *phClass = pThis;
1101 return VINF_SUCCESS;
1102}
1103
1104
1105RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1106{
1107 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1108 va_list va;
1109 va_start(va, pszNameFmt);
1110 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1111 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1112 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1113 pszNameFmt, va);
1114 va_end(va);
1115 return rc;
1116}
1117
1118
1119/**
1120 * Creates a new lock validator class with a reference that is consumed by the
1121 * first call to RTLockValidatorClassRetain.
1122 *
1123 * This is tailored for use in the parameter list of a semaphore constructor.
1124 *
1125 * @returns Class handle with a reference that is automatically consumed by the
1126 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1127 *
1128 * @param pszFile The source position of the call, file.
1129 * @param iLine The source position of the call, line.
1130 * @param pszFunction The source position of the call, function.
1131 * @param pszNameFmt Class name format string, optional (NULL). Max
1132 * length is 32 bytes.
1133 * @param ... Format string arguments.
1134 */
1135RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1136{
1137 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1138 RTLOCKVALCLASSINT *pClass;
1139 va_list va;
1140 va_start(va, pszNameFmt);
1141 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1142 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1143 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1144 pszNameFmt, va);
1145 va_end(va);
1146 if (RT_FAILURE(rc))
1147 return NIL_RTLOCKVALCLASS;
1148 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1149 return pClass;
1150}
1151
1152
1153/**
1154 * Internal class retainer.
1155 * @returns The new reference count.
1156 * @param pClass The class.
1157 */
1158DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1159{
1160 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1161 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1162 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1163 else if ( cRefs == 2
1164 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1165 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1166 return cRefs;
1167}
1168
1169
1170/**
1171 * Validates and retains a lock validator class.
1172 *
1173 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1174 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1175 */
1176DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1177{
1178 if (hClass == NIL_RTLOCKVALCLASS)
1179 return hClass;
1180 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1181 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1182 rtLockValidatorClassRetain(hClass);
1183 return hClass;
1184}
1185
1186
1187/**
1188 * Internal class releaser.
1189 * @returns The new reference count.
1190 * @param pClass The class.
1191 */
1192DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1193{
1194 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1195 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1196 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1197 else if (!cRefs)
1198 rtLockValidatorClassDestroy(pClass);
1199 return cRefs;
1200}
1201
1202
1203/**
1204 * Destroys a class once there are not more references to it.
1205 *
1206 * @param Class The class.
1207 */
1208static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1209{
1210 AssertReturnVoid(!pClass->fInTree);
1211 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1212
1213 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1214 while (pChunk)
1215 {
1216 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1217 {
1218 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1219 if (pClass2 != NIL_RTLOCKVALCLASS)
1220 {
1221 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1222 rtLockValidatorClassRelease(pClass2);
1223 }
1224 }
1225
1226 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1227 pChunk->pNext = NULL;
1228 if (pChunk != &pClass->PriorLocks)
1229 RTMemFree(pChunk);
1230 pChunk = pNext;
1231 }
1232
1233 RTMemFree(pClass);
1234}
1235
1236
1237RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1238{
1239 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1240 rtLockValidatorLazyInit();
1241 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1242
1243 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1244 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1245 while (pClass)
1246 {
1247 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1248 break;
1249 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1250 }
1251
1252 if (RT_SUCCESS(rcLock))
1253 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1254 return pClass;
1255}
1256
1257
1258RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1259{
1260 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1261 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1262 if (hClass == NIL_RTLOCKVALCLASS)
1263 {
1264 /*
1265 * Create a new class and insert it into the tree.
1266 */
1267 va_list va;
1268 va_start(va, pszNameFmt);
1269 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1270 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1271 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1272 pszNameFmt, va);
1273 va_end(va);
1274 if (RT_SUCCESS(rc))
1275 {
1276 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1277 rtLockValidatorLazyInit();
1278 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1279
1280 Assert(!hClass->fInTree);
1281 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1282 Assert(hClass->fInTree);
1283
1284 if (RT_SUCCESS(rcLock))
1285 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1286 return hClass;
1287 }
1288 }
1289 return hClass;
1290}
1291
1292
1293RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1294{
1295 RTLOCKVALCLASSINT *pClass = hClass;
1296 AssertPtrReturn(pClass, UINT32_MAX);
1297 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1298 return rtLockValidatorClassRetain(pClass);
1299}
1300
1301
1302RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1303{
1304 RTLOCKVALCLASSINT *pClass = hClass;
1305 if (pClass == NIL_RTLOCKVALCLASS)
1306 return 0;
1307 AssertPtrReturn(pClass, UINT32_MAX);
1308 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1309 return rtLockValidatorClassRelease(pClass);
1310}
1311
1312
1313/**
1314 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1315 * all the chunks for @a pPriorClass.
1316 *
1317 * @returns true / false.
1318 * @param pClass The class to search.
1319 * @param pPriorClass The class to search for.
1320 */
1321static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1322{
1323 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1324 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1325 {
1326 if (pChunk->aRefs[i].hClass == pPriorClass)
1327 {
1328 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1329 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1330 {
1331 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1332 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1333 }
1334
1335 /* update the hash table entry. */
1336 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1337 if ( !(*ppHashEntry)
1338 || (*ppHashEntry)->cLookups + 128 < cLookups)
1339 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1340
1341#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1342 ASMAtomicIncU32(&pClass->cHashMisses);
1343#endif
1344 return true;
1345 }
1346 }
1347
1348 return false;
1349}
1350
1351
1352/**
1353 * Checks if @a pPriorClass is a known prior class.
1354 *
1355 * @returns true / false.
1356 * @param pClass The class to search.
1357 * @param pPriorClass The class to search for.
1358 */
1359DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1360{
1361 /*
1362 * Hash lookup here.
1363 */
1364 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1365 if ( pRef
1366 && pRef->hClass == pPriorClass)
1367 {
1368 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1369 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1370 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1371#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1372 ASMAtomicIncU32(&pClass->cHashHits);
1373#endif
1374 return true;
1375 }
1376
1377 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1378}
1379
1380
1381/**
1382 * Adds a class to the prior list.
1383 *
1384 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1385 * @param pClass The class to work on.
1386 * @param pPriorClass The class to add.
1387 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1388 * somebody is teaching us via the API (false).
1389 * @param pSrcPos Where this rule was added (optional).
1390 */
1391static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1392 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1393{
1394 NOREF(pSrcPos);
1395 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1396 rtLockValidatorLazyInit();
1397 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1398
1399 /*
1400 * Check that there are no conflict (no assert since we might race each other).
1401 */
1402 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1403 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1404 {
1405 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1406 {
1407 /*
1408 * Scan the table for a free entry, allocating a new chunk if necessary.
1409 */
1410 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1411 {
1412 bool fDone = false;
1413 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1414 {
1415 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1416 if (fDone)
1417 {
1418 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1419 rtLockValidatorClassRetain(pPriorClass);
1420 rc = VINF_SUCCESS;
1421 break;
1422 }
1423 }
1424 if (fDone)
1425 break;
1426
1427 /* If no more chunks, allocate a new one and insert the class before linking it. */
1428 if (!pChunk->pNext)
1429 {
1430 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1431 if (!pNew)
1432 {
1433 rc = VERR_NO_MEMORY;
1434 break;
1435 }
1436 pNew->pNext = NULL;
1437 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1438 {
1439 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1440 pNew->aRefs[i].cLookups = 0;
1441 pNew->aRefs[i].fAutodidacticism = false;
1442 pNew->aRefs[i].afReserved[0] = false;
1443 pNew->aRefs[i].afReserved[1] = false;
1444 pNew->aRefs[i].afReserved[2] = false;
1445 }
1446
1447 pNew->aRefs[0].hClass = pPriorClass;
1448 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1449
1450 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1451 rtLockValidatorClassRetain(pPriorClass);
1452 rc = VINF_SUCCESS;
1453 break;
1454 }
1455 } /* chunk loop */
1456 }
1457 else
1458 rc = VINF_SUCCESS;
1459 }
1460 else
1461 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1462
1463 if (RT_SUCCESS(rcLock))
1464 RTCritSectLeave(&g_LockValClassTeachCS);
1465 return rc;
1466}
1467
1468
1469RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1470{
1471 RTLOCKVALCLASSINT *pClass = hClass;
1472 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1473 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1474
1475 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1476 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1477 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1478
1479 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1480}
1481
1482
1483RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1484{
1485 RTLOCKVALCLASSINT *pClass = hClass;
1486 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1487 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1488
1489 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1490 return VINF_SUCCESS;
1491}
1492
1493
1494/**
1495 * Unlinks all siblings.
1496 *
1497 * This is used during record deletion and assumes no races.
1498 *
1499 * @param pCore One of the siblings.
1500 */
1501static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1502{
1503 /* ASSUMES sibling destruction doesn't involve any races and that all
1504 related records are to be disposed off now. */
1505 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1506 while (pSibling)
1507 {
1508 PRTLOCKVALRECUNION volatile *ppCoreNext;
1509 switch (pSibling->Core.u32Magic)
1510 {
1511 case RTLOCKVALRECEXCL_MAGIC:
1512 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1513 ppCoreNext = &pSibling->Excl.pSibling;
1514 break;
1515
1516 case RTLOCKVALRECSHRD_MAGIC:
1517 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1518 ppCoreNext = &pSibling->Shared.pSibling;
1519 break;
1520
1521 default:
1522 AssertFailed();
1523 ppCoreNext = NULL;
1524 break;
1525 }
1526 if (RT_UNLIKELY(ppCoreNext))
1527 break;
1528 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1529 }
1530}
1531
1532
1533RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1534{
1535 /*
1536 * Validate input.
1537 */
1538 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1539 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1540
1541 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1542 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1543 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1544 , VERR_SEM_LV_INVALID_PARAMETER);
1545
1546 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1547 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1548 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1549 , VERR_SEM_LV_INVALID_PARAMETER);
1550
1551 /*
1552 * Link them (circular list).
1553 */
1554 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1555 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1556 {
1557 p1->Excl.pSibling = p2;
1558 p2->Shared.pSibling = p1;
1559 }
1560 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1561 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1562 {
1563 p1->Shared.pSibling = p2;
1564 p2->Excl.pSibling = p1;
1565 }
1566 else
1567 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1568
1569 return VINF_SUCCESS;
1570}
1571
1572
1573/**
1574 * Gets the lock name for the given record.
1575 *
1576 * @returns Read-only lock name.
1577 * @param pRec The lock record.
1578 */
1579DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1580{
1581 switch (pRec->Core.u32Magic)
1582 {
1583 case RTLOCKVALRECEXCL_MAGIC:
1584 return pRec->Excl.szName;
1585 case RTLOCKVALRECSHRD_MAGIC:
1586 return pRec->Shared.szName;
1587 case RTLOCKVALRECSHRDOWN_MAGIC:
1588 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1589 case RTLOCKVALRECNEST_MAGIC:
1590 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1591 if (VALID_PTR(pRec))
1592 {
1593 switch (pRec->Core.u32Magic)
1594 {
1595 case RTLOCKVALRECEXCL_MAGIC:
1596 return pRec->Excl.szName;
1597 case RTLOCKVALRECSHRD_MAGIC:
1598 return pRec->Shared.szName;
1599 case RTLOCKVALRECSHRDOWN_MAGIC:
1600 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1601 default:
1602 return "unknown-nested";
1603 }
1604 }
1605 return "orphaned-nested";
1606 default:
1607 return "unknown";
1608 }
1609}
1610
1611
1612/**
1613 * Gets the class for this locking record.
1614 *
1615 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1616 * @param pRec The lock validator record.
1617 */
1618DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1619{
1620 switch (pRec->Core.u32Magic)
1621 {
1622 case RTLOCKVALRECEXCL_MAGIC:
1623 return pRec->Excl.hClass;
1624
1625 case RTLOCKVALRECSHRD_MAGIC:
1626 return pRec->Shared.hClass;
1627
1628 case RTLOCKVALRECSHRDOWN_MAGIC:
1629 {
1630 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1631 if (RT_LIKELY( VALID_PTR(pSharedRec)
1632 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1633 return pSharedRec->hClass;
1634 return NIL_RTLOCKVALCLASS;
1635 }
1636
1637 case RTLOCKVALRECNEST_MAGIC:
1638 {
1639 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1640 if (VALID_PTR(pRealRec))
1641 {
1642 switch (pRealRec->Core.u32Magic)
1643 {
1644 case RTLOCKVALRECEXCL_MAGIC:
1645 return pRealRec->Excl.hClass;
1646
1647 case RTLOCKVALRECSHRDOWN_MAGIC:
1648 {
1649 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1650 if (RT_LIKELY( VALID_PTR(pSharedRec)
1651 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1652 return pSharedRec->hClass;
1653 break;
1654 }
1655
1656 default:
1657 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1658 break;
1659 }
1660 }
1661 return NIL_RTLOCKVALCLASS;
1662 }
1663
1664 default:
1665 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1666 return NIL_RTLOCKVALCLASS;
1667 }
1668}
1669
1670
1671/**
1672 * Gets the class for this locking record and the pointer to the one below it in
1673 * the stack.
1674 *
1675 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1676 * @param pRec The lock validator record.
1677 * @param puSubClass Where to return the sub-class.
1678 * @param ppDown Where to return the pointer to the record below.
1679 */
1680DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1681rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1682{
1683 switch (pRec->Core.u32Magic)
1684 {
1685 case RTLOCKVALRECEXCL_MAGIC:
1686 *ppDown = pRec->Excl.pDown;
1687 *puSubClass = pRec->Excl.uSubClass;
1688 return pRec->Excl.hClass;
1689
1690 case RTLOCKVALRECSHRD_MAGIC:
1691 *ppDown = NULL;
1692 *puSubClass = pRec->Shared.uSubClass;
1693 return pRec->Shared.hClass;
1694
1695 case RTLOCKVALRECSHRDOWN_MAGIC:
1696 {
1697 *ppDown = pRec->ShrdOwner.pDown;
1698
1699 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1700 if (RT_LIKELY( VALID_PTR(pSharedRec)
1701 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1702 {
1703 *puSubClass = pSharedRec->uSubClass;
1704 return pSharedRec->hClass;
1705 }
1706 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1707 return NIL_RTLOCKVALCLASS;
1708 }
1709
1710 case RTLOCKVALRECNEST_MAGIC:
1711 {
1712 *ppDown = pRec->Nest.pDown;
1713
1714 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1715 if (VALID_PTR(pRealRec))
1716 {
1717 switch (pRealRec->Core.u32Magic)
1718 {
1719 case RTLOCKVALRECEXCL_MAGIC:
1720 *puSubClass = pRealRec->Excl.uSubClass;
1721 return pRealRec->Excl.hClass;
1722
1723 case RTLOCKVALRECSHRDOWN_MAGIC:
1724 {
1725 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1726 if (RT_LIKELY( VALID_PTR(pSharedRec)
1727 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1728 {
1729 *puSubClass = pSharedRec->uSubClass;
1730 return pSharedRec->hClass;
1731 }
1732 break;
1733 }
1734
1735 default:
1736 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1737 break;
1738 }
1739 }
1740 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1741 return NIL_RTLOCKVALCLASS;
1742 }
1743
1744 default:
1745 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1746 *ppDown = NULL;
1747 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1748 return NIL_RTLOCKVALCLASS;
1749 }
1750}
1751
1752
1753/**
1754 * Gets the sub-class for a lock record.
1755 *
1756 * @returns the sub-class.
1757 * @param pRec The lock validator record.
1758 */
1759DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1760{
1761 switch (pRec->Core.u32Magic)
1762 {
1763 case RTLOCKVALRECEXCL_MAGIC:
1764 return pRec->Excl.uSubClass;
1765
1766 case RTLOCKVALRECSHRD_MAGIC:
1767 return pRec->Shared.uSubClass;
1768
1769 case RTLOCKVALRECSHRDOWN_MAGIC:
1770 {
1771 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1772 if (RT_LIKELY( VALID_PTR(pSharedRec)
1773 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1774 return pSharedRec->uSubClass;
1775 return RTLOCKVAL_SUB_CLASS_NONE;
1776 }
1777
1778 case RTLOCKVALRECNEST_MAGIC:
1779 {
1780 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1781 if (VALID_PTR(pRealRec))
1782 {
1783 switch (pRealRec->Core.u32Magic)
1784 {
1785 case RTLOCKVALRECEXCL_MAGIC:
1786 return pRec->Excl.uSubClass;
1787
1788 case RTLOCKVALRECSHRDOWN_MAGIC:
1789 {
1790 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1791 if (RT_LIKELY( VALID_PTR(pSharedRec)
1792 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1793 return pSharedRec->uSubClass;
1794 break;
1795 }
1796
1797 default:
1798 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1799 break;
1800 }
1801 }
1802 return RTLOCKVAL_SUB_CLASS_NONE;
1803 }
1804
1805 default:
1806 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1807 return RTLOCKVAL_SUB_CLASS_NONE;
1808 }
1809}
1810
1811
1812
1813
1814/**
1815 * Calculates the depth of a lock stack.
1816 *
1817 * @returns Number of stack frames.
1818 * @param pThread The thread.
1819 */
1820static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1821{
1822 uint32_t cEntries = 0;
1823 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1824 while (VALID_PTR(pCur))
1825 {
1826 switch (pCur->Core.u32Magic)
1827 {
1828 case RTLOCKVALRECEXCL_MAGIC:
1829 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1830 break;
1831
1832 case RTLOCKVALRECSHRDOWN_MAGIC:
1833 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1834 break;
1835
1836 case RTLOCKVALRECNEST_MAGIC:
1837 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1838 break;
1839
1840 default:
1841 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1842 }
1843 cEntries++;
1844 }
1845 return cEntries;
1846}
1847
1848
1849#ifdef RT_STRICT
1850/**
1851 * Checks if the stack contains @a pRec.
1852 *
1853 * @returns true / false.
1854 * @param pThreadSelf The current thread.
1855 * @param pRec The lock record.
1856 */
1857static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1858{
1859 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1860 while (pCur)
1861 {
1862 AssertPtrReturn(pCur, false);
1863 if (pCur == pRec)
1864 return true;
1865 switch (pCur->Core.u32Magic)
1866 {
1867 case RTLOCKVALRECEXCL_MAGIC:
1868 Assert(pCur->Excl.cRecursion >= 1);
1869 pCur = pCur->Excl.pDown;
1870 break;
1871
1872 case RTLOCKVALRECSHRDOWN_MAGIC:
1873 Assert(pCur->ShrdOwner.cRecursion >= 1);
1874 pCur = pCur->ShrdOwner.pDown;
1875 break;
1876
1877 case RTLOCKVALRECNEST_MAGIC:
1878 Assert(pCur->Nest.cRecursion > 1);
1879 pCur = pCur->Nest.pDown;
1880 break;
1881
1882 default:
1883 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1884 }
1885 }
1886 return false;
1887}
1888#endif /* RT_STRICT */
1889
1890
1891/**
1892 * Pushes a lock record onto the stack.
1893 *
1894 * @param pThreadSelf The current thread.
1895 * @param pRec The lock record.
1896 */
1897static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1898{
1899 Assert(pThreadSelf == RTThreadSelf());
1900 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1901
1902 switch (pRec->Core.u32Magic)
1903 {
1904 case RTLOCKVALRECEXCL_MAGIC:
1905 Assert(pRec->Excl.cRecursion == 1);
1906 Assert(pRec->Excl.pDown == NULL);
1907 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1908 break;
1909
1910 case RTLOCKVALRECSHRDOWN_MAGIC:
1911 Assert(pRec->ShrdOwner.cRecursion == 1);
1912 Assert(pRec->ShrdOwner.pDown == NULL);
1913 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1914 break;
1915
1916 default:
1917 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1918 }
1919 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1920}
1921
1922
1923/**
1924 * Pops a lock record off the stack.
1925 *
1926 * @param pThreadSelf The current thread.
1927 * @param pRec The lock.
1928 */
1929static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1930{
1931 Assert(pThreadSelf == RTThreadSelf());
1932
1933 PRTLOCKVALRECUNION pDown;
1934 switch (pRec->Core.u32Magic)
1935 {
1936 case RTLOCKVALRECEXCL_MAGIC:
1937 Assert(pRec->Excl.cRecursion == 0);
1938 pDown = pRec->Excl.pDown;
1939 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1940 break;
1941
1942 case RTLOCKVALRECSHRDOWN_MAGIC:
1943 Assert(pRec->ShrdOwner.cRecursion == 0);
1944 pDown = pRec->ShrdOwner.pDown;
1945 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1946 break;
1947
1948 default:
1949 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1950 }
1951 if (pThreadSelf->LockValidator.pStackTop == pRec)
1952 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1953 else
1954 {
1955 /* Find the pointer to our record and unlink ourselves. */
1956 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1957 while (pCur)
1958 {
1959 PRTLOCKVALRECUNION volatile *ppDown;
1960 switch (pCur->Core.u32Magic)
1961 {
1962 case RTLOCKVALRECEXCL_MAGIC:
1963 Assert(pCur->Excl.cRecursion >= 1);
1964 ppDown = &pCur->Excl.pDown;
1965 break;
1966
1967 case RTLOCKVALRECSHRDOWN_MAGIC:
1968 Assert(pCur->ShrdOwner.cRecursion >= 1);
1969 ppDown = &pCur->ShrdOwner.pDown;
1970 break;
1971
1972 case RTLOCKVALRECNEST_MAGIC:
1973 Assert(pCur->Nest.cRecursion >= 1);
1974 ppDown = &pCur->Nest.pDown;
1975 break;
1976
1977 default:
1978 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1979 }
1980 pCur = *ppDown;
1981 if (pCur == pRec)
1982 {
1983 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1984 return;
1985 }
1986 }
1987 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
1988 }
1989}
1990
1991
1992/**
1993 * Creates and pushes lock recursion record onto the stack.
1994 *
1995 * @param pThreadSelf The current thread.
1996 * @param pRec The lock record.
1997 * @param pSrcPos Where the recursion occurred.
1998 */
1999static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2000{
2001 Assert(pThreadSelf == RTThreadSelf());
2002 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2003
2004#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2005 /*
2006 * Allocate a new recursion record
2007 */
2008 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2009 if (pRecursionRec)
2010 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2011 else
2012 {
2013 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2014 if (!pRecursionRec)
2015 return;
2016 }
2017
2018 /*
2019 * Initialize it.
2020 */
2021 switch (pRec->Core.u32Magic)
2022 {
2023 case RTLOCKVALRECEXCL_MAGIC:
2024 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2025 break;
2026
2027 case RTLOCKVALRECSHRDOWN_MAGIC:
2028 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2029 break;
2030
2031 default:
2032 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2033 rtLockValidatorSerializeDestructEnter();
2034 rtLockValidatorSerializeDestructLeave();
2035 RTMemFree(pRecursionRec);
2036 return;
2037 }
2038 Assert(pRecursionRec->cRecursion > 1);
2039 pRecursionRec->pRec = pRec;
2040 pRecursionRec->pDown = NULL;
2041 pRecursionRec->pNextFree = NULL;
2042 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2043 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2044
2045 /*
2046 * Link it.
2047 */
2048 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2049 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2050#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2051}
2052
2053
2054/**
2055 * Pops a lock recursion record off the stack.
2056 *
2057 * @param pThreadSelf The current thread.
2058 * @param pRec The lock record.
2059 */
2060static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2061{
2062 Assert(pThreadSelf == RTThreadSelf());
2063 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2064
2065 uint32_t cRecursion;
2066 switch (pRec->Core.u32Magic)
2067 {
2068 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2069 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2070 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2071 }
2072 Assert(cRecursion >= 1);
2073
2074#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2075 /*
2076 * Pop the recursion record.
2077 */
2078 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2079 if ( pNest != NULL
2080 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2081 && pNest->Nest.pRec == pRec
2082 )
2083 {
2084 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2085 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2086 }
2087 else
2088 {
2089 /* Find the record above ours. */
2090 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2091 for (;;)
2092 {
2093 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2094 switch (pNest->Core.u32Magic)
2095 {
2096 case RTLOCKVALRECEXCL_MAGIC:
2097 ppDown = &pNest->Excl.pDown;
2098 pNest = *ppDown;
2099 continue;
2100 case RTLOCKVALRECSHRDOWN_MAGIC:
2101 ppDown = &pNest->ShrdOwner.pDown;
2102 pNest = *ppDown;
2103 continue;
2104 case RTLOCKVALRECNEST_MAGIC:
2105 if (pNest->Nest.pRec == pRec)
2106 break;
2107 ppDown = &pNest->Nest.pDown;
2108 pNest = *ppDown;
2109 continue;
2110 default:
2111 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2112 }
2113 break; /* ugly */
2114 }
2115 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2116 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2117 }
2118
2119 /*
2120 * Invalidate and free the record.
2121 */
2122 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2123 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2124 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2125 pNest->Nest.cRecursion = 0;
2126 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2127 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2128#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2129}
2130
2131
2132/**
2133 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2134 * returns VERR_SEM_LV_WRONG_ORDER.
2135 */
2136static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2137 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2138 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2139
2140
2141{
2142 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2143 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2144 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2145 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2146 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2147 rtLockValComplainPanic();
2148 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2149}
2150
2151
2152/**
2153 * Checks if the sub-class order is ok or not.
2154 *
2155 * Used to deal with two locks from the same class.
2156 *
2157 * @returns true if ok, false if not.
2158 * @param uSubClass1 The sub-class of the lock that is being
2159 * considered.
2160 * @param uSubClass2 The sub-class of the lock that is already being
2161 * held.
2162 */
2163DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2164{
2165 if (uSubClass1 > uSubClass2)
2166 {
2167 /* NONE kills ANY. */
2168 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2169 return false;
2170 return true;
2171 }
2172
2173 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2174 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2175 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2176 return true;
2177 return false;
2178}
2179
2180
2181/**
2182 * Checks if the class and sub-class lock order is ok.
2183 *
2184 * @returns true if ok, false if not.
2185 * @param pClass1 The class of the lock that is being considered.
2186 * @param uSubClass1 The sub-class that goes with @a pClass1.
2187 * @param pClass2 The class of the lock that is already being
2188 * held.
2189 * @param uSubClass2 The sub-class that goes with @a pClass2.
2190 */
2191DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2192 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2193{
2194 if (pClass1 == pClass2)
2195 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2196 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2197}
2198
2199
2200/**
2201 * Checks the locking order, part two.
2202 *
2203 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2204 * @param pClass The lock class.
2205 * @param uSubClass The lock sub-class.
2206 * @param pThreadSelf The current thread.
2207 * @param pRec The lock record.
2208 * @param pSrcPos The source position of the locking operation.
2209 */
2210static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2211 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2212 PCRTLOCKVALSRCPOS const pSrcPos,
2213 RTLOCKVALCLASSINT * const pFirstBadClass,
2214 PRTLOCKVALRECUNION const pFirstBadRec,
2215 PRTLOCKVALRECUNION const pFirstBadDown)
2216{
2217 /*
2218 * Something went wrong, pCur is pointing to where.
2219 */
2220 if ( pClass == pFirstBadClass
2221 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2222 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2223 pRec, pFirstBadRec, pClass, pFirstBadClass);
2224 if (!pClass->fAutodidact)
2225 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2226 pRec, pFirstBadRec, pClass, pFirstBadClass);
2227
2228 /*
2229 * This class is an autodidact, so we have to check out the rest of the stack
2230 * for direct violations.
2231 */
2232 uint32_t cNewRules = 1;
2233 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2234 while (pCur)
2235 {
2236 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2237
2238 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2239 pCur = pCur->Nest.pDown;
2240 else
2241 {
2242 PRTLOCKVALRECUNION pDown;
2243 uint32_t uPriorSubClass;
2244 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2245 if (pPriorClass != NIL_RTLOCKVALCLASS)
2246 {
2247 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2248 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2249 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2250 {
2251 if ( pClass == pPriorClass
2252 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2253 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2254 pRec, pCur, pClass, pPriorClass);
2255 cNewRules++;
2256 }
2257 }
2258 pCur = pDown;
2259 }
2260 }
2261
2262 if (cNewRules == 1)
2263 {
2264 /*
2265 * Special case the simple operation, hoping that it will be a
2266 * frequent case.
2267 */
2268 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2269 if (rc == VERR_SEM_LV_WRONG_ORDER)
2270 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2271 pRec, pFirstBadRec, pClass, pFirstBadClass);
2272 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2273 }
2274 else
2275 {
2276 /*
2277 * We may be adding more than one rule, so we have to take the lock
2278 * before starting to add the rules. This means we have to check
2279 * the state after taking it since we might be racing someone adding
2280 * a conflicting rule.
2281 */
2282 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2283 rtLockValidatorLazyInit();
2284 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2285
2286 /* Check */
2287 pCur = pFirstBadRec;
2288 while (pCur)
2289 {
2290 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2291 pCur = pCur->Nest.pDown;
2292 else
2293 {
2294 uint32_t uPriorSubClass;
2295 PRTLOCKVALRECUNION pDown;
2296 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2297 if (pPriorClass != NIL_RTLOCKVALCLASS)
2298 {
2299 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2300 {
2301 if ( pClass == pPriorClass
2302 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2303 {
2304 if (RT_SUCCESS(rcLock))
2305 RTCritSectLeave(&g_LockValClassTeachCS);
2306 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2307 pRec, pCur, pClass, pPriorClass);
2308 }
2309 }
2310 }
2311 pCur = pDown;
2312 }
2313 }
2314
2315 /* Iterate the stack yet again, adding new rules this time. */
2316 pCur = pFirstBadRec;
2317 while (pCur)
2318 {
2319 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2320 pCur = pCur->Nest.pDown;
2321 else
2322 {
2323 uint32_t uPriorSubClass;
2324 PRTLOCKVALRECUNION pDown;
2325 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2326 if (pPriorClass != NIL_RTLOCKVALCLASS)
2327 {
2328 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2329 {
2330 Assert( pClass != pPriorClass
2331 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2332 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2333 if (RT_FAILURE(rc))
2334 {
2335 Assert(rc == VERR_NO_MEMORY);
2336 break;
2337 }
2338 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2339 }
2340 }
2341 pCur = pDown;
2342 }
2343 }
2344
2345 if (RT_SUCCESS(rcLock))
2346 RTCritSectLeave(&g_LockValClassTeachCS);
2347 }
2348
2349 return VINF_SUCCESS;
2350}
2351
2352
2353
2354/**
2355 * Checks the locking order.
2356 *
2357 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2358 * @param pClass The lock class.
2359 * @param uSubClass The lock sub-class.
2360 * @param pThreadSelf The current thread.
2361 * @param pRec The lock record.
2362 * @param pSrcPos The source position of the locking operation.
2363 */
2364static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2365 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2366 PCRTLOCKVALSRCPOS pSrcPos)
2367{
2368 /*
2369 * Some internal paranoia first.
2370 */
2371 AssertPtr(pClass);
2372 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2373 AssertPtr(pThreadSelf);
2374 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2375 AssertPtr(pRec);
2376 AssertPtrNull(pSrcPos);
2377
2378 /*
2379 * Walk the stack, delegate problems to a worker routine.
2380 */
2381 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2382 if (!pCur)
2383 return VINF_SUCCESS;
2384
2385 for (;;)
2386 {
2387 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2388
2389 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2390 pCur = pCur->Nest.pDown;
2391 else
2392 {
2393 uint32_t uPriorSubClass;
2394 PRTLOCKVALRECUNION pDown;
2395 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2396 if (pPriorClass != NIL_RTLOCKVALCLASS)
2397 {
2398 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2399 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2400 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2401 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2402 pPriorClass, pCur, pDown);
2403 }
2404 pCur = pDown;
2405 }
2406 if (!pCur)
2407 return VINF_SUCCESS;
2408 }
2409}
2410
2411
2412/**
2413 * Check that the lock record is the topmost one on the stack, complain and fail
2414 * if it isn't.
2415 *
2416 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2417 * VERR_SEM_LV_INVALID_PARAMETER.
2418 * @param pThreadSelf The current thread.
2419 * @param pRec The record.
2420 */
2421static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2422{
2423 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2424 Assert(pThreadSelf == RTThreadSelf());
2425
2426 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2427 if (RT_LIKELY( pTop == pRec
2428 || ( pTop
2429 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2430 && pTop->Nest.pRec == pRec) ))
2431 return VINF_SUCCESS;
2432
2433#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2434 /* Look for a recursion record so the right frame is dumped and marked. */
2435 while (pTop)
2436 {
2437 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2438 {
2439 if (pTop->Nest.pRec == pRec)
2440 {
2441 pRec = pTop;
2442 break;
2443 }
2444 pTop = pTop->Nest.pDown;
2445 }
2446 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2447 pTop = pTop->Excl.pDown;
2448 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2449 pTop = pTop->ShrdOwner.pDown;
2450 else
2451 break;
2452 }
2453#endif
2454
2455 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2456 rtLockValComplainPanic();
2457 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2458}
2459
2460
2461/**
2462 * Checks if all owners are blocked - shared record operated in signaller mode.
2463 *
2464 * @returns true / false accordingly.
2465 * @param pRec The record.
2466 * @param pThreadSelf The current thread.
2467 */
2468DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2469{
2470 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2471 uint32_t cAllocated = pRec->cAllocated;
2472 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2473 if (cEntries == 0)
2474 return false;
2475
2476 for (uint32_t i = 0; i < cAllocated; i++)
2477 {
2478 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2479 if ( pEntry
2480 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2481 {
2482 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2483 if (!pCurThread)
2484 return false;
2485 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2486 return false;
2487 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2488 && pCurThread != pThreadSelf)
2489 return false;
2490 if (--cEntries == 0)
2491 break;
2492 }
2493 else
2494 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2495 }
2496
2497 return true;
2498}
2499
2500
2501/**
2502 * Verifies the deadlock stack before calling it a deadlock.
2503 *
2504 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2505 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2506 * @retval VERR_TRY_AGAIN if something changed.
2507 *
2508 * @param pStack The deadlock detection stack.
2509 * @param pThreadSelf The current thread.
2510 */
2511static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2512{
2513 uint32_t const c = pStack->c;
2514 for (uint32_t iPass = 0; iPass < 3; iPass++)
2515 {
2516 for (uint32_t i = 1; i < c; i++)
2517 {
2518 PRTTHREADINT pThread = pStack->a[i].pThread;
2519 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2520 return VERR_TRY_AGAIN;
2521 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2522 return VERR_TRY_AGAIN;
2523 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2524 return VERR_TRY_AGAIN;
2525 /* ASSUMES the signaller records won't have siblings! */
2526 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2527 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2528 && pRec->Shared.fSignaller
2529 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2530 return VERR_TRY_AGAIN;
2531 }
2532 RTThreadYield();
2533 }
2534
2535 if (c == 1)
2536 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2537 return VERR_SEM_LV_DEADLOCK;
2538}
2539
2540
2541/**
2542 * Checks for stack cycles caused by another deadlock before returning.
2543 *
2544 * @retval VINF_SUCCESS if the stack is simply too small.
2545 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2546 *
2547 * @param pStack The deadlock detection stack.
2548 */
2549static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2550{
2551 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2552 {
2553 PRTTHREADINT pThread = pStack->a[i].pThread;
2554 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2555 if (pStack->a[j].pThread == pThread)
2556 return VERR_SEM_LV_EXISTING_DEADLOCK;
2557 }
2558 static bool volatile s_fComplained = false;
2559 if (!s_fComplained)
2560 {
2561 s_fComplained = true;
2562 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2563 }
2564 return VINF_SUCCESS;
2565}
2566
2567
2568/**
2569 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2570 * detection.
2571 *
2572 * @retval VINF_SUCCESS
2573 * @retval VERR_SEM_LV_DEADLOCK
2574 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2575 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2576 * @retval VERR_TRY_AGAIN
2577 *
2578 * @param pStack The stack to use.
2579 * @param pOriginalRec The original record.
2580 * @param pThreadSelf The calling thread.
2581 */
2582static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2583 PRTTHREADINT const pThreadSelf)
2584{
2585 pStack->c = 0;
2586
2587 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2588 compiler may make a better job of it when using individual variables. */
2589 PRTLOCKVALRECUNION pRec = pOriginalRec;
2590 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2591 uint32_t iEntry = UINT32_MAX;
2592 PRTTHREADINT pThread = NIL_RTTHREAD;
2593 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2594 for (uint32_t iLoop = 0; ; iLoop++)
2595 {
2596 /*
2597 * Process the current record.
2598 */
2599 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2600
2601 /* Find the next relevant owner thread and record. */
2602 PRTLOCKVALRECUNION pNextRec = NULL;
2603 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2604 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2605 switch (pRec->Core.u32Magic)
2606 {
2607 case RTLOCKVALRECEXCL_MAGIC:
2608 Assert(iEntry == UINT32_MAX);
2609 for (;;)
2610 {
2611 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2612 if ( !pNextThread
2613 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2614 break;
2615 enmNextState = rtThreadGetState(pNextThread);
2616 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2617 && pNextThread != pThreadSelf)
2618 break;
2619 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2620 if (RT_LIKELY( !pNextRec
2621 || enmNextState == rtThreadGetState(pNextThread)))
2622 break;
2623 pNextRec = NULL;
2624 }
2625 if (!pNextRec)
2626 {
2627 pRec = pRec->Excl.pSibling;
2628 if ( pRec
2629 && pRec != pFirstSibling)
2630 continue;
2631 pNextThread = NIL_RTTHREAD;
2632 }
2633 break;
2634
2635 case RTLOCKVALRECSHRD_MAGIC:
2636 if (!pRec->Shared.fSignaller)
2637 {
2638 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2639 /** @todo The read side of a read-write lock is problematic if
2640 * the implementation prioritizes writers over readers because
2641 * that means we should could deadlock against current readers
2642 * if a writer showed up. If the RW sem implementation is
2643 * wrapping some native API, it's not so easy to detect when we
2644 * should do this and when we shouldn't. Checking when we
2645 * shouldn't is subject to wakeup scheduling and cannot easily
2646 * be made reliable.
2647 *
2648 * At the moment we circumvent all this mess by declaring that
2649 * readers has priority. This is TRUE on linux, but probably
2650 * isn't on Solaris and FreeBSD. */
2651 if ( pRec == pFirstSibling
2652 && pRec->Shared.pSibling != NULL
2653 && pRec->Shared.pSibling != pFirstSibling)
2654 {
2655 pRec = pRec->Shared.pSibling;
2656 Assert(iEntry == UINT32_MAX);
2657 continue;
2658 }
2659 }
2660
2661 /* Scan the owner table for blocked owners. */
2662 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2663 && ( !pRec->Shared.fSignaller
2664 || iEntry != UINT32_MAX
2665 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2666 )
2667 )
2668 {
2669 uint32_t cAllocated = pRec->Shared.cAllocated;
2670 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2671 while (++iEntry < cAllocated)
2672 {
2673 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2674 if (pEntry)
2675 {
2676 for (;;)
2677 {
2678 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2679 break;
2680 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2681 if ( !pNextThread
2682 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2683 break;
2684 enmNextState = rtThreadGetState(pNextThread);
2685 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2686 && pNextThread != pThreadSelf)
2687 break;
2688 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2689 if (RT_LIKELY( !pNextRec
2690 || enmNextState == rtThreadGetState(pNextThread)))
2691 break;
2692 pNextRec = NULL;
2693 }
2694 if (pNextRec)
2695 break;
2696 }
2697 else
2698 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2699 }
2700 if (pNextRec)
2701 break;
2702 pNextThread = NIL_RTTHREAD;
2703 }
2704
2705 /* Advance to the next sibling, if any. */
2706 pRec = pRec->Shared.pSibling;
2707 if ( pRec != NULL
2708 && pRec != pFirstSibling)
2709 {
2710 iEntry = UINT32_MAX;
2711 continue;
2712 }
2713 break;
2714
2715 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2716 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2717 break;
2718
2719 case RTLOCKVALRECSHRDOWN_MAGIC:
2720 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2721 default:
2722 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
2723 break;
2724 }
2725
2726 if (pNextRec)
2727 {
2728 /*
2729 * Recurse and check for deadlock.
2730 */
2731 uint32_t i = pStack->c;
2732 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2733 return rtLockValidatorDdHandleStackOverflow(pStack);
2734
2735 pStack->c++;
2736 pStack->a[i].pRec = pRec;
2737 pStack->a[i].iEntry = iEntry;
2738 pStack->a[i].enmState = enmState;
2739 pStack->a[i].pThread = pThread;
2740 pStack->a[i].pFirstSibling = pFirstSibling;
2741
2742 if (RT_UNLIKELY( pNextThread == pThreadSelf
2743 && ( i != 0
2744 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2745 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2746 )
2747 )
2748 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2749
2750 pRec = pNextRec;
2751 pFirstSibling = pNextRec;
2752 iEntry = UINT32_MAX;
2753 enmState = enmNextState;
2754 pThread = pNextThread;
2755 }
2756 else
2757 {
2758 /*
2759 * No deadlock here, unwind the stack and deal with any unfinished
2760 * business there.
2761 */
2762 uint32_t i = pStack->c;
2763 for (;;)
2764 {
2765 /* pop */
2766 if (i == 0)
2767 return VINF_SUCCESS;
2768 i--;
2769 pRec = pStack->a[i].pRec;
2770 iEntry = pStack->a[i].iEntry;
2771
2772 /* Examine it. */
2773 uint32_t u32Magic = pRec->Core.u32Magic;
2774 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2775 pRec = pRec->Excl.pSibling;
2776 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2777 {
2778 if (iEntry + 1 < pRec->Shared.cAllocated)
2779 break; /* continue processing this record. */
2780 pRec = pRec->Shared.pSibling;
2781 }
2782 else
2783 {
2784 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2785 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2786 continue;
2787 }
2788
2789 /* Any next record to advance to? */
2790 if ( !pRec
2791 || pRec == pStack->a[i].pFirstSibling)
2792 continue;
2793 iEntry = UINT32_MAX;
2794 break;
2795 }
2796
2797 /* Restore the rest of the state and update the stack. */
2798 pFirstSibling = pStack->a[i].pFirstSibling;
2799 enmState = pStack->a[i].enmState;
2800 pThread = pStack->a[i].pThread;
2801 pStack->c = i;
2802 }
2803
2804 Assert(iLoop != 1000000);
2805 }
2806}
2807
2808
2809/**
2810 * Check for the simple no-deadlock case.
2811 *
2812 * @returns true if no deadlock, false if further investigation is required.
2813 *
2814 * @param pOriginalRec The original record.
2815 */
2816DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2817{
2818 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2819 && !pOriginalRec->Excl.pSibling)
2820 {
2821 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2822 if ( !pThread
2823 || pThread->u32Magic != RTTHREADINT_MAGIC)
2824 return true;
2825 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2826 if (!RTTHREAD_IS_SLEEPING(enmState))
2827 return true;
2828 }
2829 return false;
2830}
2831
2832
2833/**
2834 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2835 *
2836 * @param pStack The chain of locks causing the deadlock.
2837 * @param pRec The record relating to the current thread's lock
2838 * operation.
2839 * @param pThreadSelf This thread.
2840 * @param pSrcPos Where we are going to deadlock.
2841 * @param rc The return code.
2842 */
2843static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2844 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2845{
2846 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2847 {
2848 const char *pszWhat;
2849 switch (rc)
2850 {
2851 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2852 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2853 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2854 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2855 }
2856 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2857 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2858 for (uint32_t i = 0; i < pStack->c; i++)
2859 {
2860 char szPrefix[24];
2861 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2862 PRTLOCKVALRECUNION pShrdOwner = NULL;
2863 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2864 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2865 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2866 {
2867 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2868 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2869 }
2870 else
2871 {
2872 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2873 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2874 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2875 }
2876 }
2877 rtLockValComplainMore("---- end of deadlock chain ----\n");
2878 }
2879
2880 rtLockValComplainPanic();
2881}
2882
2883
2884/**
2885 * Perform deadlock detection.
2886 *
2887 * @retval VINF_SUCCESS
2888 * @retval VERR_SEM_LV_DEADLOCK
2889 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2890 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2891 *
2892 * @param pRec The record relating to the current thread's lock
2893 * operation.
2894 * @param pThreadSelf The current thread.
2895 * @param pSrcPos The position of the current lock operation.
2896 */
2897static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2898{
2899 RTLOCKVALDDSTACK Stack;
2900 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2901 if (RT_SUCCESS(rc))
2902 return VINF_SUCCESS;
2903
2904 if (rc == VERR_TRY_AGAIN)
2905 {
2906 for (uint32_t iLoop = 0; ; iLoop++)
2907 {
2908 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2909 if (RT_SUCCESS_NP(rc))
2910 return VINF_SUCCESS;
2911 if (rc != VERR_TRY_AGAIN)
2912 break;
2913 RTThreadYield();
2914 if (iLoop >= 3)
2915 return VINF_SUCCESS;
2916 }
2917 }
2918
2919 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2920 return rc;
2921}
2922
2923
2924RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2925 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2926{
2927 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2928 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2929 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2930 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2931 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2932
2933 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2934 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2935 pRec->afReserved[0] = 0;
2936 pRec->afReserved[1] = 0;
2937 pRec->afReserved[2] = 0;
2938 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2939 pRec->hThread = NIL_RTTHREAD;
2940 pRec->pDown = NULL;
2941 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2942 pRec->uSubClass = uSubClass;
2943 pRec->cRecursion = 0;
2944 pRec->hLock = hLock;
2945 pRec->pSibling = NULL;
2946 if (pszNameFmt)
2947 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2948 else
2949 {
2950 static uint32_t volatile s_cAnonymous = 0;
2951 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2952 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2953 }
2954
2955 /* Lazy initialization. */
2956 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2957 rtLockValidatorLazyInit();
2958}
2959
2960
2961RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2962 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2963{
2964 va_list va;
2965 va_start(va, pszNameFmt);
2966 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2967 va_end(va);
2968}
2969
2970
2971RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2972 uint32_t uSubClass, void *pvLock, bool fEnabled,
2973 const char *pszNameFmt, va_list va)
2974{
2975 PRTLOCKVALRECEXCL pRec;
2976 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2977 if (!pRec)
2978 return VERR_NO_MEMORY;
2979 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2980 return VINF_SUCCESS;
2981}
2982
2983
2984RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2985 uint32_t uSubClass, void *pvLock, bool fEnabled,
2986 const char *pszNameFmt, ...)
2987{
2988 va_list va;
2989 va_start(va, pszNameFmt);
2990 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
2991 va_end(va);
2992 return rc;
2993}
2994
2995
2996RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
2997{
2998 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
2999
3000 rtLockValidatorSerializeDestructEnter();
3001
3002 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3003 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3004 RTLOCKVALCLASS hClass;
3005 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3006 if (pRec->pSibling)
3007 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3008 rtLockValidatorSerializeDestructLeave();
3009 if (hClass != NIL_RTLOCKVALCLASS)
3010 RTLockValidatorClassRelease(hClass);
3011}
3012
3013
3014RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3015{
3016 PRTLOCKVALRECEXCL pRec = *ppRec;
3017 *ppRec = NULL;
3018 if (pRec)
3019 {
3020 RTLockValidatorRecExclDelete(pRec);
3021 RTMemFree(pRec);
3022 }
3023}
3024
3025
3026RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3027{
3028 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3029 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3030 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3031 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3032 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3033 RTLOCKVAL_SUB_CLASS_INVALID);
3034 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3035}
3036
3037
3038RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3039 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3040{
3041 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3042 if (!pRecU)
3043 return;
3044 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3045 if (!pRecU->Excl.fEnabled)
3046 return;
3047 if (hThreadSelf == NIL_RTTHREAD)
3048 {
3049 hThreadSelf = RTThreadSelfAutoAdopt();
3050 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3051 }
3052 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3053 Assert(hThreadSelf == RTThreadSelf());
3054
3055 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3056
3057 if (pRecU->Excl.hThread == hThreadSelf)
3058 {
3059 Assert(!fFirstRecursion);
3060 pRecU->Excl.cRecursion++;
3061 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3062 }
3063 else
3064 {
3065 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3066
3067 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3068 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3069 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3070
3071 rtLockValidatorStackPush(hThreadSelf, pRecU);
3072 }
3073}
3074
3075
3076/**
3077 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3078 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3079 */
3080static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3081{
3082 RTTHREADINT *pThread = pRec->Excl.hThread;
3083 AssertReturnVoid(pThread != NIL_RTTHREAD);
3084 Assert(pThread == RTThreadSelf());
3085
3086 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3087 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3088 if (c == 0)
3089 {
3090 rtLockValidatorStackPop(pThread, pRec);
3091 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3092 }
3093 else
3094 {
3095 Assert(c < UINT32_C(0xffff0000));
3096 Assert(!fFinalRecursion);
3097 rtLockValidatorStackPopRecursion(pThread, pRec);
3098 }
3099}
3100
3101RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3102{
3103 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3104 if (!pRecU)
3105 return VINF_SUCCESS;
3106 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3107 if (!pRecU->Excl.fEnabled)
3108 return VINF_SUCCESS;
3109
3110 /*
3111 * Check the release order.
3112 */
3113 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3114 && pRecU->Excl.hClass->fStrictReleaseOrder
3115 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3116 )
3117 {
3118 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3119 if (RT_FAILURE(rc))
3120 return rc;
3121 }
3122
3123 /*
3124 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3125 */
3126 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3127 return VINF_SUCCESS;
3128}
3129
3130
3131RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3132{
3133 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3134 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3135 if (pRecU->Excl.fEnabled)
3136 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3137}
3138
3139
3140RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3141{
3142 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3143 if (!pRecU)
3144 return VINF_SUCCESS;
3145 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3146 if (!pRecU->Excl.fEnabled)
3147 return VINF_SUCCESS;
3148 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3149 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3150
3151 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3152 && !pRecU->Excl.hClass->fRecursionOk)
3153 {
3154 rtLockValComplainFirst("Recursion not allowed by the class!",
3155 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3156 rtLockValComplainPanic();
3157 return VERR_SEM_LV_NESTED;
3158 }
3159
3160 Assert(pRecU->Excl.cRecursion < _1M);
3161 pRecU->Excl.cRecursion++;
3162 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3163 return VINF_SUCCESS;
3164}
3165
3166
3167RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3168{
3169 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3170 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3171 if (!pRecU->Excl.fEnabled)
3172 return VINF_SUCCESS;
3173 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3174 Assert(pRecU->Excl.hThread == RTThreadSelf());
3175 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3176
3177 /*
3178 * Check the release order.
3179 */
3180 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3181 && pRecU->Excl.hClass->fStrictReleaseOrder
3182 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3183 )
3184 {
3185 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3186 if (RT_FAILURE(rc))
3187 return rc;
3188 }
3189
3190 /*
3191 * Perform the unwind.
3192 */
3193 pRecU->Excl.cRecursion--;
3194 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3195 return VINF_SUCCESS;
3196}
3197
3198
3199RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3200{
3201 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3202 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3203 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3204 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3205 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3206 , VERR_SEM_LV_INVALID_PARAMETER);
3207 if (!pRecU->Excl.fEnabled)
3208 return VINF_SUCCESS;
3209 Assert(pRecU->Excl.hThread == RTThreadSelf());
3210 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3211 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3212
3213 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3214 && !pRecU->Excl.hClass->fRecursionOk)
3215 {
3216 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3217 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3218 rtLockValComplainPanic();
3219 return VERR_SEM_LV_NESTED;
3220 }
3221
3222 Assert(pRecU->Excl.cRecursion < _1M);
3223 pRecU->Excl.cRecursion++;
3224 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3225
3226 return VINF_SUCCESS;
3227}
3228
3229
3230RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3231{
3232 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3233 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3234 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3235 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3236 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3237 , VERR_SEM_LV_INVALID_PARAMETER);
3238 if (!pRecU->Excl.fEnabled)
3239 return VINF_SUCCESS;
3240 Assert(pRecU->Excl.hThread == RTThreadSelf());
3241 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3242 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3243
3244 /*
3245 * Check the release order.
3246 */
3247 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3248 && pRecU->Excl.hClass->fStrictReleaseOrder
3249 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3250 )
3251 {
3252 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3253 if (RT_FAILURE(rc))
3254 return rc;
3255 }
3256
3257 /*
3258 * Perform the unwind.
3259 */
3260 pRecU->Excl.cRecursion--;
3261 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3262 return VINF_SUCCESS;
3263}
3264
3265
3266RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3267 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3268{
3269 /*
3270 * Validate and adjust input. Quit early if order validation is disabled.
3271 */
3272 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3273 if (!pRecU)
3274 return VINF_SUCCESS;
3275 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3276 if ( !pRecU->Excl.fEnabled
3277 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3278 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3279 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3280 return VINF_SUCCESS;
3281
3282 if (hThreadSelf == NIL_RTTHREAD)
3283 {
3284 hThreadSelf = RTThreadSelfAutoAdopt();
3285 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3286 }
3287 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3288 Assert(hThreadSelf == RTThreadSelf());
3289
3290 /*
3291 * Detect recursion as it isn't subject to order restrictions.
3292 */
3293 if (pRec->hThread == hThreadSelf)
3294 return VINF_SUCCESS;
3295
3296 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3297}
3298
3299
3300RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3301 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3302 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3303{
3304 /*
3305 * Fend off wild life.
3306 */
3307 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3308 if (!pRecU)
3309 return VINF_SUCCESS;
3310 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3311 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3312 if (!pRec->fEnabled)
3313 return VINF_SUCCESS;
3314
3315 PRTTHREADINT pThreadSelf = hThreadSelf;
3316 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3317 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3318 Assert(pThreadSelf == RTThreadSelf());
3319
3320 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3321
3322 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3323 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3324 {
3325 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3326 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3327 , VERR_SEM_LV_INVALID_PARAMETER);
3328 enmSleepState = enmThreadState;
3329 }
3330
3331 /*
3332 * Record the location.
3333 */
3334 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3335 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3336 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3337 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3338 rtThreadSetState(pThreadSelf, enmSleepState);
3339
3340 /*
3341 * Don't do deadlock detection if we're recursing.
3342 *
3343 * On some hosts we don't do recursion accounting our selves and there
3344 * isn't any other place to check for this.
3345 */
3346 int rc = VINF_SUCCESS;
3347 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3348 {
3349 if ( !fRecursiveOk
3350 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3351 && !pRecU->Excl.hClass->fRecursionOk))
3352 {
3353 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3354 rtLockValComplainPanic();
3355 rc = VERR_SEM_LV_NESTED;
3356 }
3357 }
3358 /*
3359 * Perform deadlock detection.
3360 */
3361 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3362 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3363 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3364 rc = VINF_SUCCESS;
3365 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3366 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3367
3368 if (RT_SUCCESS(rc))
3369 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3370 else
3371 {
3372 rtThreadSetState(pThreadSelf, enmThreadState);
3373 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3374 }
3375 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3376 return rc;
3377}
3378RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3379
3380
3381RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3382 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3383 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3384{
3385 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3386 if (RT_SUCCESS(rc))
3387 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3388 enmSleepState, fReallySleeping);
3389 return rc;
3390}
3391RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3392
3393
3394RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3395 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3396{
3397 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3398 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3399 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3400 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3401 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3402
3403 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3404 pRec->uSubClass = uSubClass;
3405 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3406 pRec->hLock = hLock;
3407 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3408 pRec->fSignaller = fSignaller;
3409 pRec->pSibling = NULL;
3410
3411 /* the table */
3412 pRec->cEntries = 0;
3413 pRec->iLastEntry = 0;
3414 pRec->cAllocated = 0;
3415 pRec->fReallocating = false;
3416 pRec->fPadding = false;
3417 pRec->papOwners = NULL;
3418
3419 /* the name */
3420 if (pszNameFmt)
3421 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3422 else
3423 {
3424 static uint32_t volatile s_cAnonymous = 0;
3425 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3426 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3427 }
3428}
3429
3430
3431RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3432 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3433{
3434 va_list va;
3435 va_start(va, pszNameFmt);
3436 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3437 va_end(va);
3438}
3439
3440
3441RTDECL(int) RTLockValidatorRecSharedCreateV(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3442 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3443 const char *pszNameFmt, va_list va)
3444{
3445 PRTLOCKVALRECSHRD pRec;
3446 *ppRec = pRec = (PRTLOCKVALRECSHRD)RTMemAlloc(sizeof(*pRec));
3447 if (!pRec)
3448 return VERR_NO_MEMORY;
3449 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3450 return VINF_SUCCESS;
3451}
3452
3453
3454RTDECL(int) RTLockValidatorRecSharedCreate(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3455 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3456 const char *pszNameFmt, ...)
3457{
3458 va_list va;
3459 va_start(va, pszNameFmt);
3460 int rc = RTLockValidatorRecSharedCreateV(ppRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3461 va_end(va);
3462 return rc;
3463}
3464
3465
3466RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3467{
3468 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3469
3470 /*
3471 * Flip it into table realloc mode and take the destruction lock.
3472 */
3473 rtLockValidatorSerializeDestructEnter();
3474 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3475 {
3476 rtLockValidatorSerializeDestructLeave();
3477
3478 rtLockValidatorSerializeDetectionEnter();
3479 rtLockValidatorSerializeDetectionLeave();
3480
3481 rtLockValidatorSerializeDestructEnter();
3482 }
3483
3484 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3485 RTLOCKVALCLASS hClass;
3486 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3487 if (pRec->papOwners)
3488 {
3489 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3490 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3491 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3492
3493 RTMemFree((void *)papOwners);
3494 }
3495 if (pRec->pSibling)
3496 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3497 ASMAtomicWriteBool(&pRec->fReallocating, false);
3498
3499 rtLockValidatorSerializeDestructLeave();
3500
3501 if (hClass != NIL_RTLOCKVALCLASS)
3502 RTLockValidatorClassRelease(hClass);
3503}
3504
3505
3506RTDECL(void) RTLockValidatorRecSharedDestroy(PRTLOCKVALRECSHRD *ppRec)
3507{
3508 PRTLOCKVALRECSHRD pRec = *ppRec;
3509 *ppRec = NULL;
3510 if (pRec)
3511 {
3512 RTLockValidatorRecSharedDelete(pRec);
3513 RTMemFree(pRec);
3514 }
3515}
3516
3517
3518RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3519{
3520 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3521 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3522 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3523 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3524 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3525 RTLOCKVAL_SUB_CLASS_INVALID);
3526 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3527}
3528
3529
3530/**
3531 * Locates an owner (thread) in a shared lock record.
3532 *
3533 * @returns Pointer to the owner entry on success, NULL on failure..
3534 * @param pShared The shared lock record.
3535 * @param hThread The thread (owner) to find.
3536 * @param piEntry Where to optionally return the table in index.
3537 * Optional.
3538 */
3539DECLINLINE(PRTLOCKVALRECUNION)
3540rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3541{
3542 rtLockValidatorSerializeDetectionEnter();
3543
3544 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3545 if (papOwners)
3546 {
3547 uint32_t const cMax = pShared->cAllocated;
3548 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3549 {
3550 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3551 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3552 {
3553 rtLockValidatorSerializeDetectionLeave();
3554 if (piEntry)
3555 *piEntry = iEntry;
3556 return pEntry;
3557 }
3558 }
3559 }
3560
3561 rtLockValidatorSerializeDetectionLeave();
3562 return NULL;
3563}
3564
3565
3566RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3567 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3568{
3569 /*
3570 * Validate and adjust input. Quit early if order validation is disabled.
3571 */
3572 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3573 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3574 if ( !pRecU->Shared.fEnabled
3575 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3576 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3577 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3578 )
3579 return VINF_SUCCESS;
3580
3581 if (hThreadSelf == NIL_RTTHREAD)
3582 {
3583 hThreadSelf = RTThreadSelfAutoAdopt();
3584 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3585 }
3586 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3587 Assert(hThreadSelf == RTThreadSelf());
3588
3589 /*
3590 * Detect recursion as it isn't subject to order restrictions.
3591 */
3592 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3593 if (pEntry)
3594 return VINF_SUCCESS;
3595
3596 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3597}
3598
3599
3600RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3601 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3602 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3603{
3604 /*
3605 * Fend off wild life.
3606 */
3607 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3608 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3609 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3610 if (!pRecU->Shared.fEnabled)
3611 return VINF_SUCCESS;
3612
3613 PRTTHREADINT pThreadSelf = hThreadSelf;
3614 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3615 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3616 Assert(pThreadSelf == RTThreadSelf());
3617
3618 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3619
3620 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3621 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3622 {
3623 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3624 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3625 , VERR_SEM_LV_INVALID_PARAMETER);
3626 enmSleepState = enmThreadState;
3627 }
3628
3629 /*
3630 * Record the location.
3631 */
3632 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3633 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3634 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3635 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3636 rtThreadSetState(pThreadSelf, enmSleepState);
3637
3638 /*
3639 * Don't do deadlock detection if we're recursing.
3640 */
3641 int rc = VINF_SUCCESS;
3642 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3643 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3644 : NULL;
3645 if (pEntry)
3646 {
3647 if ( !fRecursiveOk
3648 || ( pRec->hClass
3649 && !pRec->hClass->fRecursionOk)
3650 )
3651 {
3652 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3653 rtLockValComplainPanic();
3654 rc = VERR_SEM_LV_NESTED;
3655 }
3656 }
3657 /*
3658 * Perform deadlock detection.
3659 */
3660 else if ( pRec->hClass
3661 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3662 || pRec->hClass->cMsMinDeadlock > cMillies))
3663 rc = VINF_SUCCESS;
3664 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3665 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3666
3667 if (RT_SUCCESS(rc))
3668 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3669 else
3670 {
3671 rtThreadSetState(pThreadSelf, enmThreadState);
3672 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3673 }
3674 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3675 return rc;
3676}
3677RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3678
3679
3680RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3681 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3682 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3683{
3684 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3685 if (RT_SUCCESS(rc))
3686 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3687 enmSleepState, fReallySleeping);
3688 return rc;
3689}
3690RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3691
3692
3693/**
3694 * Allocates and initializes an owner entry for the shared lock record.
3695 *
3696 * @returns The new owner entry.
3697 * @param pRec The shared lock record.
3698 * @param pThreadSelf The calling thread and owner. Used for record
3699 * initialization and allocation.
3700 * @param pSrcPos The source position.
3701 */
3702DECLINLINE(PRTLOCKVALRECUNION)
3703rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3704{
3705 PRTLOCKVALRECUNION pEntry;
3706
3707 /*
3708 * Check if the thread has any statically allocated records we can easily
3709 * make use of.
3710 */
3711 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3712 if ( iEntry > 0
3713 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3714 {
3715 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3716 Assert(!pEntry->ShrdOwner.fReserved);
3717 pEntry->ShrdOwner.fStaticAlloc = true;
3718 rtThreadGet(pThreadSelf);
3719 }
3720 else
3721 {
3722 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3723 if (RT_UNLIKELY(!pEntry))
3724 return NULL;
3725 pEntry->ShrdOwner.fStaticAlloc = false;
3726 }
3727
3728 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3729 pEntry->ShrdOwner.cRecursion = 1;
3730 pEntry->ShrdOwner.fReserved = true;
3731 pEntry->ShrdOwner.hThread = pThreadSelf;
3732 pEntry->ShrdOwner.pDown = NULL;
3733 pEntry->ShrdOwner.pSharedRec = pRec;
3734#if HC_ARCH_BITS == 32
3735 pEntry->ShrdOwner.pvReserved = NULL;
3736#endif
3737 if (pSrcPos)
3738 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3739 else
3740 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3741 return pEntry;
3742}
3743
3744
3745/**
3746 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3747 *
3748 * @param pEntry The owner entry.
3749 */
3750DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3751{
3752 if (pEntry)
3753 {
3754 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3755 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3756
3757 PRTTHREADINT pThread;
3758 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3759
3760 Assert(pEntry->fReserved);
3761 pEntry->fReserved = false;
3762
3763 if (pEntry->fStaticAlloc)
3764 {
3765 AssertPtrReturnVoid(pThread);
3766 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3767
3768 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3769 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3770
3771 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3772 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3773
3774 rtThreadRelease(pThread);
3775 }
3776 else
3777 {
3778 rtLockValidatorSerializeDestructEnter();
3779 rtLockValidatorSerializeDestructLeave();
3780
3781 RTMemFree(pEntry);
3782 }
3783 }
3784}
3785
3786
3787/**
3788 * Make more room in the table.
3789 *
3790 * @retval true on success
3791 * @retval false if we're out of memory or running into a bad race condition
3792 * (probably a bug somewhere). No longer holding the lock.
3793 *
3794 * @param pShared The shared lock record.
3795 */
3796static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3797{
3798 for (unsigned i = 0; i < 1000; i++)
3799 {
3800 /*
3801 * Switch to the other data access direction.
3802 */
3803 rtLockValidatorSerializeDetectionLeave();
3804 if (i >= 10)
3805 {
3806 Assert(i != 10 && i != 100);
3807 RTThreadSleep(i >= 100);
3808 }
3809 rtLockValidatorSerializeDestructEnter();
3810
3811 /*
3812 * Try grab the privilege to reallocating the table.
3813 */
3814 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3815 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3816 {
3817 uint32_t cAllocated = pShared->cAllocated;
3818 if (cAllocated < pShared->cEntries)
3819 {
3820 /*
3821 * Ok, still not enough space. Reallocate the table.
3822 */
3823#if 0 /** @todo enable this after making sure growing works flawlessly. */
3824 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3825#else
3826 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
3827#endif
3828 PRTLOCKVALRECSHRDOWN *papOwners;
3829 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3830 (cAllocated + cInc) * sizeof(void *));
3831 if (!papOwners)
3832 {
3833 ASMAtomicWriteBool(&pShared->fReallocating, false);
3834 rtLockValidatorSerializeDestructLeave();
3835 /* RTMemRealloc will assert */
3836 return false;
3837 }
3838
3839 while (cInc-- > 0)
3840 {
3841 papOwners[cAllocated] = NULL;
3842 cAllocated++;
3843 }
3844
3845 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3846 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3847 }
3848 ASMAtomicWriteBool(&pShared->fReallocating, false);
3849 }
3850 rtLockValidatorSerializeDestructLeave();
3851
3852 rtLockValidatorSerializeDetectionEnter();
3853 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3854 break;
3855
3856 if (pShared->cAllocated >= pShared->cEntries)
3857 return true;
3858 }
3859
3860 rtLockValidatorSerializeDetectionLeave();
3861 AssertFailed(); /* too many iterations or destroyed while racing. */
3862 return false;
3863}
3864
3865
3866/**
3867 * Adds an owner entry to a shared lock record.
3868 *
3869 * @returns true on success, false on serious race or we're if out of memory.
3870 * @param pShared The shared lock record.
3871 * @param pEntry The owner entry.
3872 */
3873DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3874{
3875 rtLockValidatorSerializeDetectionEnter();
3876 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3877 {
3878 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3879 && !rtLockValidatorRecSharedMakeRoom(pShared))
3880 return false; /* the worker leave the lock */
3881
3882 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3883 uint32_t const cMax = pShared->cAllocated;
3884 for (unsigned i = 0; i < 100; i++)
3885 {
3886 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3887 {
3888 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3889 {
3890 rtLockValidatorSerializeDetectionLeave();
3891 return true;
3892 }
3893 }
3894 Assert(i != 25);
3895 }
3896 AssertFailed();
3897 }
3898 rtLockValidatorSerializeDetectionLeave();
3899 return false;
3900}
3901
3902
3903/**
3904 * Remove an owner entry from a shared lock record and free it.
3905 *
3906 * @param pShared The shared lock record.
3907 * @param pEntry The owner entry to remove.
3908 * @param iEntry The last known index.
3909 */
3910DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3911 uint32_t iEntry)
3912{
3913 /*
3914 * Remove it from the table.
3915 */
3916 rtLockValidatorSerializeDetectionEnter();
3917 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3918 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3919 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3920 {
3921 /* this shouldn't happen yet... */
3922 AssertFailed();
3923 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3924 uint32_t const cMax = pShared->cAllocated;
3925 for (iEntry = 0; iEntry < cMax; iEntry++)
3926 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3927 break;
3928 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3929 }
3930 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3931 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3932 rtLockValidatorSerializeDetectionLeave();
3933
3934 /*
3935 * Successfully removed, now free it.
3936 */
3937 rtLockValidatorRecSharedFreeOwner(pEntry);
3938}
3939
3940
3941RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3942{
3943 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3944 if (!pRec->fEnabled)
3945 return;
3946 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3947 AssertReturnVoid(pRec->fSignaller);
3948
3949 /*
3950 * Free all current owners.
3951 */
3952 rtLockValidatorSerializeDetectionEnter();
3953 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3954 {
3955 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3956 uint32_t iEntry = 0;
3957 uint32_t cEntries = pRec->cAllocated;
3958 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3959 while (iEntry < cEntries)
3960 {
3961 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3962 if (pEntry)
3963 {
3964 ASMAtomicDecU32(&pRec->cEntries);
3965 rtLockValidatorSerializeDetectionLeave();
3966
3967 rtLockValidatorRecSharedFreeOwner(pEntry);
3968
3969 rtLockValidatorSerializeDetectionEnter();
3970 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3971 break;
3972 cEntries = pRec->cAllocated;
3973 papEntries = pRec->papOwners;
3974 }
3975 iEntry++;
3976 }
3977 }
3978 rtLockValidatorSerializeDetectionLeave();
3979
3980 if (hThread != NIL_RTTHREAD)
3981 {
3982 /*
3983 * Allocate a new owner entry and insert it into the table.
3984 */
3985 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
3986 if ( pEntry
3987 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
3988 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
3989 }
3990}
3991RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
3992
3993
3994RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3995{
3996 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3997 if (!pRec->fEnabled)
3998 return;
3999 if (hThread == NIL_RTTHREAD)
4000 {
4001 hThread = RTThreadSelfAutoAdopt();
4002 AssertReturnVoid(hThread != NIL_RTTHREAD);
4003 }
4004 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4005
4006 /*
4007 * Recursive?
4008 *
4009 * Note! This code can be optimized to try avoid scanning the table on
4010 * insert. However, that's annoying work that makes the code big,
4011 * so it can wait til later sometime.
4012 */
4013 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4014 if (pEntry)
4015 {
4016 Assert(!pRec->fSignaller);
4017 pEntry->ShrdOwner.cRecursion++;
4018 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
4019 return;
4020 }
4021
4022 /*
4023 * Allocate a new owner entry and insert it into the table.
4024 */
4025 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4026 if (pEntry)
4027 {
4028 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4029 {
4030 if (!pRec->fSignaller)
4031 rtLockValidatorStackPush(hThread, pEntry);
4032 }
4033 else
4034 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4035 }
4036}
4037RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4038
4039
4040RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4041{
4042 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4043 if (!pRec->fEnabled)
4044 return;
4045 if (hThread == NIL_RTTHREAD)
4046 {
4047 hThread = RTThreadSelfAutoAdopt();
4048 AssertReturnVoid(hThread != NIL_RTTHREAD);
4049 }
4050 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4051
4052 /*
4053 * Find the entry hope it's a recursive one.
4054 */
4055 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4056 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4057 AssertReturnVoid(pEntry);
4058 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4059
4060 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4061 if (c == 0)
4062 {
4063 if (!pRec->fSignaller)
4064 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4065 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4066 }
4067 else
4068 {
4069 Assert(!pRec->fSignaller);
4070 rtLockValidatorStackPopRecursion(hThread, pEntry);
4071 }
4072}
4073RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4074
4075
4076RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4077{
4078 /* Validate and resolve input. */
4079 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4080 if (!pRec->fEnabled)
4081 return false;
4082 if (hThread == NIL_RTTHREAD)
4083 {
4084 hThread = RTThreadSelfAutoAdopt();
4085 AssertReturn(hThread != NIL_RTTHREAD, false);
4086 }
4087 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4088
4089 /* Do the job. */
4090 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4091 return pEntry != NULL;
4092}
4093RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4094
4095
4096RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4097{
4098 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4099 if (!pRec->fEnabled)
4100 return VINF_SUCCESS;
4101 if (hThreadSelf == NIL_RTTHREAD)
4102 {
4103 hThreadSelf = RTThreadSelfAutoAdopt();
4104 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4105 }
4106 Assert(hThreadSelf == RTThreadSelf());
4107 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4108
4109 /*
4110 * Locate the entry for this thread in the table.
4111 */
4112 uint32_t iEntry = 0;
4113 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4114 if (RT_UNLIKELY(!pEntry))
4115 {
4116 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4117 rtLockValComplainPanic();
4118 return VERR_SEM_LV_NOT_OWNER;
4119 }
4120
4121 /*
4122 * Check the release order.
4123 */
4124 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4125 && pRec->hClass->fStrictReleaseOrder
4126 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4127 )
4128 {
4129 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4130 if (RT_FAILURE(rc))
4131 return rc;
4132 }
4133
4134 /*
4135 * Release the ownership or unwind a level of recursion.
4136 */
4137 Assert(pEntry->ShrdOwner.cRecursion > 0);
4138 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4139 if (c == 0)
4140 {
4141 rtLockValidatorStackPop(hThreadSelf, pEntry);
4142 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4143 }
4144 else
4145 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4146
4147 return VINF_SUCCESS;
4148}
4149
4150
4151RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4152{
4153 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4154 if (!pRec->fEnabled)
4155 return VINF_SUCCESS;
4156 if (hThreadSelf == NIL_RTTHREAD)
4157 {
4158 hThreadSelf = RTThreadSelfAutoAdopt();
4159 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4160 }
4161 Assert(hThreadSelf == RTThreadSelf());
4162 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4163
4164 /*
4165 * Locate the entry for this thread in the table.
4166 */
4167 uint32_t iEntry = 0;
4168 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4169 if (RT_UNLIKELY(!pEntry))
4170 {
4171 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4172 rtLockValComplainPanic();
4173 return VERR_SEM_LV_NOT_SIGNALLER;
4174 }
4175 return VINF_SUCCESS;
4176}
4177
4178
4179RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4180{
4181 if (Thread == NIL_RTTHREAD)
4182 return 0;
4183
4184 PRTTHREADINT pThread = rtThreadGet(Thread);
4185 if (!pThread)
4186 return VERR_INVALID_HANDLE;
4187 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4188 rtThreadRelease(pThread);
4189 return cWriteLocks;
4190}
4191RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4192
4193
4194RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4195{
4196 PRTTHREADINT pThread = rtThreadGet(Thread);
4197 AssertReturnVoid(pThread);
4198 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4199 rtThreadRelease(pThread);
4200}
4201RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4202
4203
4204RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4205{
4206 PRTTHREADINT pThread = rtThreadGet(Thread);
4207 AssertReturnVoid(pThread);
4208 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4209 rtThreadRelease(pThread);
4210}
4211RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4212
4213
4214RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4215{
4216 if (Thread == NIL_RTTHREAD)
4217 return 0;
4218
4219 PRTTHREADINT pThread = rtThreadGet(Thread);
4220 if (!pThread)
4221 return VERR_INVALID_HANDLE;
4222 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4223 rtThreadRelease(pThread);
4224 return cReadLocks;
4225}
4226RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4227
4228
4229RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4230{
4231 PRTTHREADINT pThread = rtThreadGet(Thread);
4232 Assert(pThread);
4233 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4234 rtThreadRelease(pThread);
4235}
4236RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4237
4238
4239RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4240{
4241 PRTTHREADINT pThread = rtThreadGet(Thread);
4242 Assert(pThread);
4243 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4244 rtThreadRelease(pThread);
4245}
4246RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4247
4248
4249RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4250{
4251 void *pvLock = NULL;
4252 PRTTHREADINT pThread = rtThreadGet(hThread);
4253 if (pThread)
4254 {
4255 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4256 if (RTTHREAD_IS_SLEEPING(enmState))
4257 {
4258 rtLockValidatorSerializeDetectionEnter();
4259
4260 enmState = rtThreadGetState(pThread);
4261 if (RTTHREAD_IS_SLEEPING(enmState))
4262 {
4263 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4264 if (pRec)
4265 {
4266 switch (pRec->Core.u32Magic)
4267 {
4268 case RTLOCKVALRECEXCL_MAGIC:
4269 pvLock = pRec->Excl.hLock;
4270 break;
4271
4272 case RTLOCKVALRECSHRDOWN_MAGIC:
4273 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4274 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4275 break;
4276 case RTLOCKVALRECSHRD_MAGIC:
4277 pvLock = pRec->Shared.hLock;
4278 break;
4279 }
4280 if (RTThreadGetState(pThread) != enmState)
4281 pvLock = NULL;
4282 }
4283 }
4284
4285 rtLockValidatorSerializeDetectionLeave();
4286 }
4287 rtThreadRelease(pThread);
4288 }
4289 return pvLock;
4290}
4291RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4292
4293
4294RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4295{
4296 bool fRet = false;
4297 PRTTHREADINT pThread = rtThreadGet(hThread);
4298 if (pThread)
4299 {
4300 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4301 rtThreadRelease(pThread);
4302 }
4303 return fRet;
4304}
4305RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4306
4307
4308RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4309{
4310 bool fRet = false;
4311 if (hCurrentThread == NIL_RTTHREAD)
4312 hCurrentThread = RTThreadSelf();
4313 else
4314 Assert(hCurrentThread == RTThreadSelf());
4315 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4316 if (pThread)
4317 {
4318 if (hClass != NIL_RTLOCKVALCLASS)
4319 {
4320 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4321 while (VALID_PTR(pCur) && !fRet)
4322 {
4323 switch (pCur->Core.u32Magic)
4324 {
4325 case RTLOCKVALRECEXCL_MAGIC:
4326 fRet = pCur->Excl.hClass == hClass;
4327 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4328 break;
4329 case RTLOCKVALRECSHRDOWN_MAGIC:
4330 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4331 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4332 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4333 break;
4334 case RTLOCKVALRECNEST_MAGIC:
4335 switch (pCur->Nest.pRec->Core.u32Magic)
4336 {
4337 case RTLOCKVALRECEXCL_MAGIC:
4338 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4339 break;
4340 case RTLOCKVALRECSHRDOWN_MAGIC:
4341 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4342 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4343 break;
4344 }
4345 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4346 break;
4347 default:
4348 pCur = NULL;
4349 break;
4350 }
4351 }
4352 }
4353
4354 rtThreadRelease(pThread);
4355 }
4356 return fRet;
4357}
4358RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4359
4360
4361RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4362{
4363 bool fRet = false;
4364 if (hCurrentThread == NIL_RTTHREAD)
4365 hCurrentThread = RTThreadSelf();
4366 else
4367 Assert(hCurrentThread == RTThreadSelf());
4368 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4369 if (pThread)
4370 {
4371 if (hClass != NIL_RTLOCKVALCLASS)
4372 {
4373 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4374 while (VALID_PTR(pCur) && !fRet)
4375 {
4376 switch (pCur->Core.u32Magic)
4377 {
4378 case RTLOCKVALRECEXCL_MAGIC:
4379 fRet = pCur->Excl.hClass == hClass
4380 && pCur->Excl.uSubClass == uSubClass;
4381 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4382 break;
4383 case RTLOCKVALRECSHRDOWN_MAGIC:
4384 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4385 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4386 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4387 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4388 break;
4389 case RTLOCKVALRECNEST_MAGIC:
4390 switch (pCur->Nest.pRec->Core.u32Magic)
4391 {
4392 case RTLOCKVALRECEXCL_MAGIC:
4393 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4394 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4395 break;
4396 case RTLOCKVALRECSHRDOWN_MAGIC:
4397 fRet = VALID_PTR(pCur->ShrdOwner.pSharedRec)
4398 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4399 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4400 break;
4401 }
4402 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4403 break;
4404 default:
4405 pCur = NULL;
4406 break;
4407 }
4408 }
4409 }
4410
4411 rtThreadRelease(pThread);
4412 }
4413 return fRet;
4414}
4415RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4416
4417
4418RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4419{
4420 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4421}
4422RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4423
4424
4425RTDECL(bool) RTLockValidatorIsEnabled(void)
4426{
4427 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4428}
4429RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4430
4431
4432RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4433{
4434 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4435}
4436RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4437
4438
4439RTDECL(bool) RTLockValidatorIsQuiet(void)
4440{
4441 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4442}
4443RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4444
4445
4446RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4447{
4448 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4449}
4450RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4451
4452
4453RTDECL(bool) RTLockValidatorMayPanic(void)
4454{
4455 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4456}
4457RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4458
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette