VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 99246

Last change on this file since 99246 was 98103, checked in by vboxsync, 2 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 160.6 KB
Line 
1/* $Id: lockvalidator.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include <iprt/lockvalidator.h>
42#include "internal/iprt.h"
43
44#include <iprt/asm.h>
45#include <iprt/assert.h>
46#include <iprt/env.h>
47#include <iprt/err.h>
48#include <iprt/mem.h>
49#include <iprt/once.h>
50#include <iprt/semaphore.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53
54#include "internal/lockvalidator.h"
55#include "internal/magics.h"
56#include "internal/strhash.h"
57#include "internal/thread.h"
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63/** Macro that asserts that a pointer is aligned correctly.
64 * Only used when fighting bugs. */
65#if 1
66# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
67 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
68#else
69# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
70#endif
71
72/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
73#define RTLOCKVALCLASS_HASH(hClass) \
74 ( ((uintptr_t)(hClass) >> 6 ) \
75 % ( RT_SIZEOFMEMB(RTLOCKVALCLASSINT, apPriorLocksHash) \
76 / sizeof(PRTLOCKVALCLASSREF)) )
77
78/** The max value for RTLOCKVALCLASSINT::cRefs. */
79#define RTLOCKVALCLASS_MAX_REFS UINT32_C(0xffff0000)
80/** The max value for RTLOCKVALCLASSREF::cLookups. */
81#define RTLOCKVALCLASSREF_MAX_LOOKUPS UINT32_C(0xfffe0000)
82/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
83 * be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
84#define RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX UINT32_C(0xffff0000)
85
86
87/** @def RTLOCKVAL_WITH_RECURSION_RECORDS
88 * Enable recursion records. */
89#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
90# define RTLOCKVAL_WITH_RECURSION_RECORDS 1
91#endif
92
93/** @def RTLOCKVAL_WITH_VERBOSE_DUMPS
94 * Enables some extra verbosity in the lock dumping. */
95#if defined(DOXYGEN_RUNNING)
96# define RTLOCKVAL_WITH_VERBOSE_DUMPS
97#endif
98
99/** @def RTLOCKVAL_WITH_CLASS_HASH_STATS
100 * Enables collection prior class hash lookup statistics, dumping them when
101 * complaining about the class. */
102#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
103# define RTLOCKVAL_WITH_CLASS_HASH_STATS
104#endif
105
106
107/*********************************************************************************************************************************
108* Structures and Typedefs *
109*********************************************************************************************************************************/
110/**
111 * Deadlock detection stack entry.
112 */
113typedef struct RTLOCKVALDDENTRY
114{
115 /** The current record. */
116 PRTLOCKVALRECUNION pRec;
117 /** The current entry number if pRec is a shared one. */
118 uint32_t iEntry;
119 /** The thread state of the thread we followed to get to pFirstSibling.
120 * This is only used for validating a deadlock stack. */
121 RTTHREADSTATE enmState;
122 /** The thread we followed to get to pFirstSibling.
123 * This is only used for validating a deadlock stack. */
124 PRTTHREADINT pThread;
125 /** What pThread is waiting on, i.e. where we entered the circular list of
126 * siblings. This is used for validating a deadlock stack as well as
127 * terminating the sibling walk. */
128 PRTLOCKVALRECUNION pFirstSibling;
129} RTLOCKVALDDENTRY;
130
131
132/**
133 * Deadlock detection stack.
134 */
135typedef struct RTLOCKVALDDSTACK
136{
137 /** The number stack entries. */
138 uint32_t c;
139 /** The stack entries. */
140 RTLOCKVALDDENTRY a[32];
141} RTLOCKVALDDSTACK;
142/** Pointer to a deadlock detection stack. */
143typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
144
145
146/**
147 * Reference to another class.
148 */
149typedef struct RTLOCKVALCLASSREF
150{
151 /** The class. */
152 RTLOCKVALCLASS hClass;
153 /** The number of lookups of this class. */
154 uint32_t volatile cLookups;
155 /** Indicates whether the entry was added automatically during order checking
156 * (true) or manually via the API (false). */
157 bool fAutodidacticism;
158 /** Reserved / explicit alignment padding. */
159 bool afReserved[3];
160} RTLOCKVALCLASSREF;
161/** Pointer to a class reference. */
162typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
163
164
165/** Pointer to a chunk of class references. */
166typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
167/**
168 * Chunk of class references.
169 */
170typedef struct RTLOCKVALCLASSREFCHUNK
171{
172 /** Array of refs. */
173#if 0 /** @todo for testing allocation of new chunks. */
174 RTLOCKVALCLASSREF aRefs[ARCH_BITS == 32 ? 10 : 8];
175#else
176 RTLOCKVALCLASSREF aRefs[2];
177#endif
178 /** Pointer to the next chunk. */
179 PRTLOCKVALCLASSREFCHUNK volatile pNext;
180} RTLOCKVALCLASSREFCHUNK;
181
182
183/**
184 * Lock class.
185 */
186typedef struct RTLOCKVALCLASSINT
187{
188 /** AVL node core. */
189 AVLLU32NODECORE Core;
190 /** Magic value (RTLOCKVALCLASS_MAGIC). */
191 uint32_t volatile u32Magic;
192 /** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
193 uint32_t volatile cRefs;
194 /** Whether the class is allowed to teach it self new locking order rules. */
195 bool fAutodidact;
196 /** Whether to allow recursion. */
197 bool fRecursionOk;
198 /** Strict release order. */
199 bool fStrictReleaseOrder;
200 /** Whether this class is in the tree. */
201 bool fInTree;
202 /** Donate a reference to the next retainer. This is a hack to make
203 * RTLockValidatorClassCreateUnique work. */
204 bool volatile fDonateRefToNextRetainer;
205 /** Reserved future use / explicit alignment. */
206 bool afReserved[3];
207 /** The minimum wait interval for which we do deadlock detection
208 * (milliseconds). */
209 RTMSINTERVAL cMsMinDeadlock;
210 /** The minimum wait interval for which we do order checks (milliseconds). */
211 RTMSINTERVAL cMsMinOrder;
212 /** More padding. */
213 uint32_t au32Reserved[ARCH_BITS == 32 ? 5 : 2];
214 /** Classes that may be taken prior to this one.
215 * This is a linked list where each node contains a chunk of locks so that we
216 * reduce the number of allocations as well as localize the data. */
217 RTLOCKVALCLASSREFCHUNK PriorLocks;
218 /** Hash table containing frequently encountered prior locks. */
219 PRTLOCKVALCLASSREF apPriorLocksHash[17];
220 /** Class name. (Allocated after the end of the block as usual.) */
221 char const *pszName;
222 /** Where this class was created.
223 * This is mainly used for finding automatically created lock classes.
224 * @remarks The strings are stored after this structure so we won't crash
225 * if the class lives longer than the module (dll/so/dylib) that
226 * spawned it. */
227 RTLOCKVALSRCPOS CreatePos;
228#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
229 /** Hash hits. */
230 uint32_t volatile cHashHits;
231 /** Hash misses. */
232 uint32_t volatile cHashMisses;
233#endif
234} RTLOCKVALCLASSINT;
235AssertCompileSize(AVLLU32NODECORE, ARCH_BITS == 32 ? 20 : 32);
236AssertCompileMemberOffset(RTLOCKVALCLASSINT, PriorLocks, 64);
237
238
239/*********************************************************************************************************************************
240* Global Variables *
241*********************************************************************************************************************************/
242/** Serializing object destruction and deadlock detection.
243 *
244 * This makes sure that none of the memory examined by the deadlock detection
245 * code will become invalid (reused for other purposes or made not present)
246 * while the detection is in progress.
247 *
248 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
249 * EW: Deadlock detection and some related activities.
250 */
251static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
252/** Serializing class tree insert and lookups. */
253static RTSEMRW g_hLockValClassTreeRWLock= NIL_RTSEMRW;
254/** Class tree. */
255static PAVLLU32NODECORE g_LockValClassTree = NULL;
256/** Critical section serializing the teaching new rules to the classes. */
257static RTCRITSECT g_LockValClassTeachCS;
258
259/** Whether the lock validator is enabled or disabled.
260 * Only applies to new locks. */
261static bool volatile g_fLockValidatorEnabled = true;
262/** Set if the lock validator is quiet. */
263#ifdef RT_STRICT
264static bool volatile g_fLockValidatorQuiet = false;
265#else
266static bool volatile g_fLockValidatorQuiet = true;
267#endif
268/** Set if the lock validator may panic. */
269#ifdef RT_STRICT
270static bool volatile g_fLockValidatorMayPanic = true;
271#else
272static bool volatile g_fLockValidatorMayPanic = false;
273#endif
274/** Whether to return an error status on wrong locking order. */
275static bool volatile g_fLockValSoftWrongOrder = false;
276
277
278/*********************************************************************************************************************************
279* Internal Functions *
280*********************************************************************************************************************************/
281static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass);
282static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread);
283
284
285/**
286 * Lazy initialization of the lock validator globals.
287 */
288static void rtLockValidatorLazyInit(void)
289{
290 static uint32_t volatile s_fInitializing = false;
291 if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
292 {
293 /*
294 * The locks.
295 */
296 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
297 RTCritSectInitEx(&g_LockValClassTeachCS, RTCRITSECT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS,
298 RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Teach");
299
300 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
301 {
302 RTSEMRW hSemRW;
303 int rc = RTSemRWCreateEx(&hSemRW, RTSEMRW_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_ANY, "RTLockVal-Tree");
304 if (RT_SUCCESS(rc))
305 ASMAtomicWriteHandle(&g_hLockValClassTreeRWLock, hSemRW);
306 }
307
308 if (g_hLockValidatorXRoads == NIL_RTSEMXROADS)
309 {
310 RTSEMXROADS hXRoads;
311 int rc = RTSemXRoadsCreate(&hXRoads);
312 if (RT_SUCCESS(rc))
313 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
314 }
315
316#ifdef IN_RING3
317 /*
318 * Check the environment for our config variables.
319 */
320 if (RTEnvExist("IPRT_LOCK_VALIDATOR_ENABLED"))
321 ASMAtomicWriteBool(&g_fLockValidatorEnabled, true);
322 if (RTEnvExist("IPRT_LOCK_VALIDATOR_DISABLED"))
323 ASMAtomicWriteBool(&g_fLockValidatorEnabled, false);
324
325 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_PANIC"))
326 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, true);
327 if (RTEnvExist("IPRT_LOCK_VALIDATOR_MAY_NOT_PANIC"))
328 ASMAtomicWriteBool(&g_fLockValidatorMayPanic, false);
329
330 if (RTEnvExist("IPRT_LOCK_VALIDATOR_NOT_QUIET"))
331 ASMAtomicWriteBool(&g_fLockValidatorQuiet, false);
332 if (RTEnvExist("IPRT_LOCK_VALIDATOR_QUIET"))
333 ASMAtomicWriteBool(&g_fLockValidatorQuiet, true);
334
335 if (RTEnvExist("IPRT_LOCK_VALIDATOR_STRICT_ORDER"))
336 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, false);
337 if (RTEnvExist("IPRT_LOCK_VALIDATOR_SOFT_ORDER"))
338 ASMAtomicWriteBool(&g_fLockValSoftWrongOrder, true);
339#endif
340
341 /*
342 * Register cleanup
343 */
344 /** @todo register some cleanup callback if we care. */
345
346 ASMAtomicWriteU32(&s_fInitializing, false);
347 }
348}
349
350
351
352/** Wrapper around ASMAtomicReadPtr. */
353DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
354{
355 PRTLOCKVALRECUNION p = ASMAtomicReadPtrT(ppRec, PRTLOCKVALRECUNION);
356 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
357 return p;
358}
359
360
361/** Wrapper around ASMAtomicWritePtr. */
362DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
363{
364 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
365 ASMAtomicWritePtr(ppRec, pRecNew);
366}
367
368
369/** Wrapper around ASMAtomicReadPtr. */
370DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
371{
372 PRTTHREADINT p = ASMAtomicReadPtrT(phThread, PRTTHREADINT);
373 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
374 return p;
375}
376
377
378/** Wrapper around ASMAtomicUoReadPtr. */
379DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
380{
381 PRTLOCKVALRECSHRDOWN p = ASMAtomicUoReadPtrT(ppOwner, PRTLOCKVALRECSHRDOWN);
382 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
383 return p;
384}
385
386
387/**
388 * Reads a volatile thread handle field and returns the thread name.
389 *
390 * @returns Thread name (read only).
391 * @param phThread The thread handle field.
392 */
393static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
394{
395 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
396 if (!pThread)
397 return "<NIL>";
398 if (!RT_VALID_PTR(pThread))
399 return "<INVALID>";
400 if (pThread->u32Magic != RTTHREADINT_MAGIC)
401 return "<BAD-THREAD-MAGIC>";
402 return pThread->szName;
403}
404
405
406/**
407 * Launch a simple assertion like complaint w/ panic.
408 *
409 * @param SRC_POS The source position where call is being made from.
410 * @param pszWhat What we're complaining about.
411 * @param ... Format arguments.
412 */
413static void rtLockValComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
414{
415 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
416 {
417 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
418 va_list va;
419 va_start(va, pszWhat);
420 RTAssertMsg2WeakV(pszWhat, va);
421 va_end(va);
422 }
423 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
424 RTAssertPanic();
425}
426
427
428/**
429 * Describes the class.
430 *
431 * @param pszPrefix Message prefix.
432 * @param pClass The class to complain about.
433 * @param uSubClass My sub-class.
434 * @param fVerbose Verbose description including relations to other
435 * classes.
436 */
437static void rtLockValComplainAboutClass(const char *pszPrefix, RTLOCKVALCLASSINT *pClass, uint32_t uSubClass, bool fVerbose)
438{
439 if (ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
440 return;
441
442 /* Stringify the sub-class. */
443 const char *pszSubClass;
444 char szSubClass[32];
445 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
446 switch (uSubClass)
447 {
448 case RTLOCKVAL_SUB_CLASS_NONE: pszSubClass = "none"; break;
449 case RTLOCKVAL_SUB_CLASS_ANY: pszSubClass = "any"; break;
450 default:
451 RTStrPrintf(szSubClass, sizeof(szSubClass), "invl-%u", uSubClass);
452 pszSubClass = szSubClass;
453 break;
454 }
455 else
456 {
457 RTStrPrintf(szSubClass, sizeof(szSubClass), "%u", uSubClass);
458 pszSubClass = szSubClass;
459 }
460
461 /* Validate the class pointer. */
462 if (!RT_VALID_PTR(pClass))
463 {
464 RTAssertMsg2AddWeak("%sbad class=%p sub-class=%s\n", pszPrefix, pClass, pszSubClass);
465 return;
466 }
467 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
468 {
469 RTAssertMsg2AddWeak("%sbad class=%p magic=%#x sub-class=%s\n", pszPrefix, pClass, pClass->u32Magic, pszSubClass);
470 return;
471 }
472
473 /* OK, dump the class info. */
474 RTAssertMsg2AddWeak("%sclass=%p %s created={%Rbn(%u) %Rfn %p} sub-class=%s\n", pszPrefix,
475 pClass,
476 pClass->pszName,
477 pClass->CreatePos.pszFile,
478 pClass->CreatePos.uLine,
479 pClass->CreatePos.pszFunction,
480 pClass->CreatePos.uId,
481 pszSubClass);
482 if (fVerbose)
483 {
484 uint32_t i = 0;
485 uint32_t cPrinted = 0;
486 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
487 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++, i++)
488 {
489 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
490 if (pCurClass != NIL_RTLOCKVALCLASS)
491 {
492 RTAssertMsg2AddWeak("%s%s #%02u: %s, %s, %u lookup%s\n", pszPrefix,
493 cPrinted == 0
494 ? "Prior:"
495 : " ",
496 i,
497 pCurClass->pszName,
498 pChunk->aRefs[j].fAutodidacticism
499 ? "autodidactic"
500 : "manually ",
501 pChunk->aRefs[j].cLookups,
502 pChunk->aRefs[j].cLookups != 1 ? "s" : "");
503 cPrinted++;
504 }
505 }
506 if (!cPrinted)
507 RTAssertMsg2AddWeak("%sPrior: none\n", pszPrefix);
508#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
509 RTAssertMsg2AddWeak("%sHash Stats: %u hits, %u misses\n", pszPrefix, pClass->cHashHits, pClass->cHashMisses);
510#endif
511 }
512 else
513 {
514 uint32_t cPrinted = 0;
515 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
516 for (unsigned j = 0; j < RT_ELEMENTS(pChunk->aRefs); j++)
517 {
518 RTLOCKVALCLASSINT *pCurClass = pChunk->aRefs[j].hClass;
519 if (pCurClass != NIL_RTLOCKVALCLASS)
520 {
521 if ((cPrinted % 10) == 0)
522 RTAssertMsg2AddWeak("%sPrior classes: %s%s", pszPrefix, pCurClass->pszName,
523 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
524 else if ((cPrinted % 10) != 9)
525 RTAssertMsg2AddWeak(", %s%s", pCurClass->pszName,
526 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
527 else
528 RTAssertMsg2AddWeak(", %s%s\n", pCurClass->pszName,
529 pChunk->aRefs[j].fAutodidacticism ? "*" : "");
530 cPrinted++;
531 }
532 }
533 if (!cPrinted)
534 RTAssertMsg2AddWeak("%sPrior classes: none\n", pszPrefix);
535 else if ((cPrinted % 10) != 0)
536 RTAssertMsg2AddWeak("\n");
537 }
538}
539
540
541/**
542 * Helper for getting the class name.
543 * @returns Class name string.
544 * @param pClass The class.
545 */
546static const char *rtLockValComplainGetClassName(RTLOCKVALCLASSINT *pClass)
547{
548 if (!pClass)
549 return "<nil-class>";
550 if (!RT_VALID_PTR(pClass))
551 return "<bad-class-ptr>";
552 if (pClass->u32Magic != RTLOCKVALCLASS_MAGIC)
553 return "<bad-class-magic>";
554 if (!pClass->pszName)
555 return "<no-class-name>";
556 return pClass->pszName;
557}
558
559/**
560 * Formats the sub-class.
561 *
562 * @returns Stringified sub-class.
563 * @param uSubClass The name.
564 * @param pszBuf Buffer that is big enough.
565 */
566static const char *rtLockValComplainGetSubClassName(uint32_t uSubClass, char *pszBuf)
567{
568 if (uSubClass < RTLOCKVAL_SUB_CLASS_USER)
569 switch (uSubClass)
570 {
571 case RTLOCKVAL_SUB_CLASS_NONE: return "none";
572 case RTLOCKVAL_SUB_CLASS_ANY: return "any";
573 default:
574 RTStrPrintf(pszBuf, 32, "invl-%u", uSubClass);
575 break;
576 }
577 else
578 RTStrPrintf(pszBuf, 32, "%x", uSubClass);
579 return pszBuf;
580}
581
582
583/**
584 * Helper for rtLockValComplainAboutLock.
585 */
586DECL_FORCE_INLINE(void) rtLockValComplainAboutLockHlp(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix,
587 uint32_t u32Magic, PCRTLOCKVALSRCPOS pSrcPos, uint32_t cRecursion,
588 const char *pszFrameType)
589{
590 char szBuf[32];
591 switch (u32Magic)
592 {
593 case RTLOCKVALRECEXCL_MAGIC:
594#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
595 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
596 pRec->Excl.hLock, pRec->Excl.szName, pRec,
597 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
598 rtLockValComplainGetClassName(pRec->Excl.hClass),
599 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
600 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
601 pszFrameType, pszSuffix);
602#else
603 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
604 pRec->Excl.hLock, pRec->Excl.szName,
605 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), cRecursion,
606 rtLockValComplainGetClassName(pRec->Excl.hClass),
607 rtLockValComplainGetSubClassName(pRec->Excl.uSubClass, szBuf),
608 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
609 pszFrameType, pszSuffix);
610#endif
611 break;
612
613 case RTLOCKVALRECSHRD_MAGIC:
614 RTAssertMsg2AddWeak("%ss %p %s srec=%p cls=%s/%s [s%s]%s", pszPrefix,
615 pRec->Shared.hLock, pRec->Shared.szName, pRec,
616 rtLockValComplainGetClassName(pRec->Shared.hClass),
617 rtLockValComplainGetSubClassName(pRec->Shared.uSubClass, szBuf),
618 pszFrameType, pszSuffix);
619 break;
620
621 case RTLOCKVALRECSHRDOWN_MAGIC:
622 {
623 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
624 if ( RT_VALID_PTR(pShared)
625 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
626#ifdef RTLOCKVAL_WITH_VERBOSE_DUMPS
627 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
628 pShared->hLock, pShared->szName, pShared,
629 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
630 rtLockValComplainGetClassName(pShared->hClass),
631 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
632 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
633 pszSuffix, pszSuffix);
634#else
635 RTAssertMsg2AddWeak("%s%p %s own=%s r=%u cls=%s/%s pos={%Rbn(%u) %Rfn %p} [o%s]%s", pszPrefix,
636 pShared->hLock, pShared->szName,
637 rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
638 rtLockValComplainGetClassName(pShared->hClass),
639 rtLockValComplainGetSubClassName(pShared->uSubClass, szBuf),
640 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
641 pszFrameType, pszSuffix);
642#endif
643 else
644 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p own=%s r=%u pos={%Rbn(%u) %Rfn %p} [x%s]%s", pszPrefix,
645 pShared,
646 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), cRecursion,
647 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
648 pszFrameType, pszSuffix);
649 break;
650 }
651
652 default:
653 AssertMsgFailed(("%#x\n", u32Magic));
654 }
655}
656
657
658/**
659 * Describes the lock.
660 *
661 * @param pszPrefix Message prefix.
662 * @param pRec The lock record we're working on.
663 * @param pszSuffix Message suffix.
664 */
665static void rtLockValComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
666{
667#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
668# define FIX_REC(r) 1
669#else
670# define FIX_REC(r) (r)
671#endif
672 if ( RT_VALID_PTR(pRec)
673 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
674 {
675 switch (pRec->Core.u32Magic)
676 {
677 case RTLOCKVALRECEXCL_MAGIC:
678 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECEXCL_MAGIC,
679 &pRec->Excl.SrcPos, FIX_REC(pRec->Excl.cRecursion), "");
680 break;
681
682 case RTLOCKVALRECSHRD_MAGIC:
683 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRD_MAGIC, NULL, 0, "");
684 break;
685
686 case RTLOCKVALRECSHRDOWN_MAGIC:
687 rtLockValComplainAboutLockHlp(pszPrefix, pRec, pszSuffix, RTLOCKVALRECSHRDOWN_MAGIC,
688 &pRec->ShrdOwner.SrcPos, FIX_REC(pRec->ShrdOwner.cRecursion), "");
689 break;
690
691 case RTLOCKVALRECNEST_MAGIC:
692 {
693 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
694 uint32_t u32Magic;
695 if ( RT_VALID_PTR(pRealRec)
696 && ( (u32Magic = pRealRec->Core.u32Magic) == RTLOCKVALRECEXCL_MAGIC
697 || u32Magic == RTLOCKVALRECSHRD_MAGIC
698 || u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
699 )
700 rtLockValComplainAboutLockHlp(pszPrefix, pRealRec, pszSuffix, u32Magic,
701 &pRec->Nest.SrcPos, pRec->Nest.cRecursion, "/r");
702 else
703 RTAssertMsg2AddWeak("%sbad rrec=%p nrec=%p r=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
704 pRealRec, pRec, pRec->Nest.cRecursion,
705 pRec->Nest.SrcPos.pszFile, pRec->Nest.SrcPos.uLine, pRec->Nest.SrcPos.pszFunction, pRec->Nest.SrcPos.uId,
706 pszSuffix);
707 break;
708 }
709
710 default:
711 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
712 break;
713 }
714 }
715#undef FIX_REC
716}
717
718
719/**
720 * Dump the lock stack.
721 *
722 * @param pThread The thread which lock stack we're gonna dump.
723 * @param cchIndent The indentation in chars.
724 * @param cMinFrames The minimum number of frames to consider
725 * dumping.
726 * @param pHighightRec Record that should be marked specially in the
727 * dump.
728 */
729static void rtLockValComplainAboutLockStack(PRTTHREADINT pThread, unsigned cchIndent, uint32_t cMinFrames,
730 PRTLOCKVALRECUNION pHighightRec)
731{
732 if ( RT_VALID_PTR(pThread)
733 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet)
734 && pThread->u32Magic == RTTHREADINT_MAGIC
735 )
736 {
737 uint32_t cEntries = rtLockValidatorStackDepth(pThread);
738 if (cEntries >= cMinFrames)
739 {
740 RTAssertMsg2AddWeak("%*s---- start of lock stack for %p %s - %u entr%s ----\n", cchIndent, "",
741 pThread, pThread->szName, cEntries, cEntries == 1 ? "y" : "ies");
742 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
743 for (uint32_t i = 0; RT_VALID_PTR(pCur); i++)
744 {
745 char szPrefix[80];
746 RTStrPrintf(szPrefix, sizeof(szPrefix), "%*s#%02u: ", cchIndent, "", i);
747 rtLockValComplainAboutLock(szPrefix, pCur, pHighightRec != pCur ? "\n" : " (*)\n");
748 switch (pCur->Core.u32Magic)
749 {
750 case RTLOCKVALRECEXCL_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown); break;
751 case RTLOCKVALRECSHRDOWN_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown); break;
752 case RTLOCKVALRECNEST_MAGIC: pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown); break;
753 default:
754 RTAssertMsg2AddWeak("%*s<bad stack frame>\n", cchIndent, "");
755 pCur = NULL;
756 break;
757 }
758 }
759 RTAssertMsg2AddWeak("%*s---- end of lock stack ----\n", cchIndent, "");
760 }
761 }
762}
763
764
765/**
766 * Launch the initial complaint.
767 *
768 * @param pszWhat What we're complaining about.
769 * @param pSrcPos Where we are complaining from, as it were.
770 * @param pThreadSelf The calling thread.
771 * @param pRec The main lock involved. Can be NULL.
772 * @param fDumpStack Whether to dump the lock stack (true) or not
773 * (false).
774 */
775static void rtLockValComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
776 PRTLOCKVALRECUNION pRec, bool fDumpStack)
777{
778 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
779 {
780 ASMCompilerBarrier(); /* paranoia */
781 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
782 if (pSrcPos && pSrcPos->uId)
783 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, RT_VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
784 else
785 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, RT_VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
786 rtLockValComplainAboutLock("Lock: ", pRec, "\n");
787 if (fDumpStack)
788 rtLockValComplainAboutLockStack(pThreadSelf, 0, 1, pRec);
789 }
790}
791
792
793/**
794 * Continue bitching.
795 *
796 * @param pszFormat Format string.
797 * @param ... Format arguments.
798 */
799static void rtLockValComplainMore(const char *pszFormat, ...)
800{
801 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
802 {
803 va_list va;
804 va_start(va, pszFormat);
805 RTAssertMsg2AddWeakV(pszFormat, va);
806 va_end(va);
807 }
808}
809
810
811/**
812 * Raise a panic if enabled.
813 */
814static void rtLockValComplainPanic(void)
815{
816 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
817 RTAssertPanic();
818}
819
820
821/**
822 * Copy a source position record.
823 *
824 * @param pDst The destination.
825 * @param pSrc The source. Can be NULL.
826 */
827DECL_FORCE_INLINE(void) rtLockValidatorSrcPosCopy(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
828{
829 if (pSrc)
830 {
831 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
832 ASMAtomicUoWritePtr(&pDst->pszFile, pSrc->pszFile);
833 ASMAtomicUoWritePtr(&pDst->pszFunction, pSrc->pszFunction);
834 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
835 }
836 else
837 {
838 ASMAtomicUoWriteU32(&pDst->uLine, 0);
839 ASMAtomicUoWriteNullPtr(&pDst->pszFile);
840 ASMAtomicUoWriteNullPtr(&pDst->pszFunction);
841 ASMAtomicUoWritePtr(&pDst->uId, (RTHCUINTPTR)0);
842 }
843}
844
845
846/**
847 * Init a source position record.
848 *
849 * @param pSrcPos The source position record.
850 */
851DECL_FORCE_INLINE(void) rtLockValidatorSrcPosInit(PRTLOCKVALSRCPOS pSrcPos)
852{
853 pSrcPos->pszFile = NULL;
854 pSrcPos->pszFunction = NULL;
855 pSrcPos->uId = 0;
856 pSrcPos->uLine = 0;
857#if HC_ARCH_BITS == 64
858 pSrcPos->u32Padding = 0;
859#endif
860}
861
862
863/**
864 * Hashes the specified source position.
865 *
866 * @returns Hash.
867 * @param pSrcPos The source position record.
868 */
869static uint32_t rtLockValidatorSrcPosHash(PCRTLOCKVALSRCPOS pSrcPos)
870{
871 uint32_t uHash;
872 if ( ( pSrcPos->pszFile
873 || pSrcPos->pszFunction)
874 && pSrcPos->uLine != 0)
875 {
876 uHash = 0;
877 if (pSrcPos->pszFile)
878 uHash = sdbmInc(pSrcPos->pszFile, uHash);
879 if (pSrcPos->pszFunction)
880 uHash = sdbmInc(pSrcPos->pszFunction, uHash);
881 uHash += pSrcPos->uLine;
882 }
883 else
884 {
885 Assert(pSrcPos->uId);
886 uHash = (uint32_t)pSrcPos->uId;
887 }
888
889 return uHash;
890}
891
892
893/**
894 * Compares two source positions.
895 *
896 * @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
897 * otherwise.
898 * @param pSrcPos1 The first source position.
899 * @param pSrcPos2 The second source position.
900 */
901static int rtLockValidatorSrcPosCompare(PCRTLOCKVALSRCPOS pSrcPos1, PCRTLOCKVALSRCPOS pSrcPos2)
902{
903 if (pSrcPos1->uLine != pSrcPos2->uLine)
904 return pSrcPos1->uLine < pSrcPos2->uLine ? -1 : 1;
905
906 int iDiff = RTStrCmp(pSrcPos1->pszFile, pSrcPos2->pszFile);
907 if (iDiff != 0)
908 return iDiff;
909
910 iDiff = RTStrCmp(pSrcPos1->pszFunction, pSrcPos2->pszFunction);
911 if (iDiff != 0)
912 return iDiff;
913
914 if (pSrcPos1->uId != pSrcPos2->uId)
915 return pSrcPos1->uId < pSrcPos2->uId ? -1 : 1;
916 return 0;
917}
918
919
920
921/**
922 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
923 */
924DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
925{
926 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
927 if (hXRoads != NIL_RTSEMXROADS)
928 RTSemXRoadsNSEnter(hXRoads);
929}
930
931
932/**
933 * Call after rtLockValidatorSerializeDestructEnter.
934 */
935DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
936{
937 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
938 if (hXRoads != NIL_RTSEMXROADS)
939 RTSemXRoadsNSLeave(hXRoads);
940}
941
942
943/**
944 * Serializes deadlock detection against destruction of the objects being
945 * inspected.
946 */
947DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
948{
949 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
950 if (hXRoads != NIL_RTSEMXROADS)
951 RTSemXRoadsEWEnter(hXRoads);
952}
953
954
955/**
956 * Call after rtLockValidatorSerializeDetectionEnter.
957 */
958DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
959{
960 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
961 if (hXRoads != NIL_RTSEMXROADS)
962 RTSemXRoadsEWLeave(hXRoads);
963}
964
965
966/**
967 * Initializes the per thread lock validator data.
968 *
969 * @param pPerThread The data.
970 */
971DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
972{
973 pPerThread->bmFreeShrdOwners = UINT32_MAX;
974
975 /* ASSUMES the rest has already been zeroed. */
976 Assert(pPerThread->pRec == NULL);
977 Assert(pPerThread->cWriteLocks == 0);
978 Assert(pPerThread->cReadLocks == 0);
979 Assert(pPerThread->fInValidator == false);
980 Assert(pPerThread->pStackTop == NULL);
981}
982
983
984/**
985 * Delete the per thread lock validator data.
986 *
987 * @param pPerThread The data.
988 */
989DECLHIDDEN(void) rtLockValidatorDeletePerThread(RTLOCKVALPERTHREAD *pPerThread)
990{
991 /*
992 * Check that the thread doesn't own any locks at this time.
993 */
994 if (pPerThread->pStackTop)
995 {
996 rtLockValComplainFirst("Thread terminating owning locks!", NULL,
997 RT_FROM_MEMBER(pPerThread, RTTHREADINT, LockValidator),
998 pPerThread->pStackTop, true);
999 rtLockValComplainPanic();
1000 }
1001
1002 /*
1003 * Free the recursion records.
1004 */
1005 PRTLOCKVALRECNEST pCur = pPerThread->pFreeNestRecs;
1006 pPerThread->pFreeNestRecs = NULL;
1007 while (pCur)
1008 {
1009 PRTLOCKVALRECNEST pNext = pCur->pNextFree;
1010 RTMemFree(pCur);
1011 pCur = pNext;
1012 }
1013}
1014
1015RTDECL(int) RTLockValidatorClassCreateEx(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1016 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1017 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1018 const char *pszNameFmt, ...)
1019{
1020 va_list va;
1021 va_start(va, pszNameFmt);
1022 int rc = RTLockValidatorClassCreateExV(phClass, pSrcPos, fAutodidact, fRecursionOk, fStrictReleaseOrder,
1023 cMsMinDeadlock, cMsMinOrder, pszNameFmt, va);
1024 va_end(va);
1025 return rc;
1026}
1027
1028
1029RTDECL(int) RTLockValidatorClassCreateExV(PRTLOCKVALCLASS phClass, PCRTLOCKVALSRCPOS pSrcPos,
1030 bool fAutodidact, bool fRecursionOk, bool fStrictReleaseOrder,
1031 RTMSINTERVAL cMsMinDeadlock, RTMSINTERVAL cMsMinOrder,
1032 const char *pszNameFmt, va_list va)
1033{
1034 Assert(cMsMinDeadlock >= 1);
1035 Assert(cMsMinOrder >= 1);
1036 AssertPtr(pSrcPos);
1037
1038 /*
1039 * Format the name and calc its length.
1040 */
1041 size_t cbName;
1042 char szName[32];
1043 if (pszNameFmt && *pszNameFmt)
1044 cbName = RTStrPrintfV(szName, sizeof(szName), pszNameFmt, va) + 1;
1045 else
1046 {
1047 static uint32_t volatile s_cAnonymous = 0;
1048 uint32_t i = ASMAtomicIncU32(&s_cAnonymous);
1049 cbName = RTStrPrintf(szName, sizeof(szName), "anon-%u", i - 1) + 1;
1050 }
1051
1052 /*
1053 * Figure out the file and function name lengths and allocate memory for
1054 * it all.
1055 */
1056 size_t const cbFile = pSrcPos->pszFile ? strlen(pSrcPos->pszFile) + 1 : 0;
1057 size_t const cbFunction = pSrcPos->pszFunction ? strlen(pSrcPos->pszFunction) + 1 : 0;
1058 RTLOCKVALCLASSINT *pThis = (RTLOCKVALCLASSINT *)RTMemAllocVarTag(sizeof(*pThis) + cbFile + cbFunction + cbName,
1059 "may-leak:RTLockValidatorClassCreateExV");
1060 if (!pThis)
1061 return VERR_NO_MEMORY;
1062 RTMEM_MAY_LEAK(pThis);
1063
1064 /*
1065 * Initialize the class data.
1066 */
1067 pThis->Core.Key = rtLockValidatorSrcPosHash(pSrcPos);
1068 pThis->Core.uchHeight = 0;
1069 pThis->Core.pLeft = NULL;
1070 pThis->Core.pRight = NULL;
1071 pThis->Core.pList = NULL;
1072 pThis->u32Magic = RTLOCKVALCLASS_MAGIC;
1073 pThis->cRefs = 1;
1074 pThis->fAutodidact = fAutodidact;
1075 pThis->fRecursionOk = fRecursionOk;
1076 pThis->fStrictReleaseOrder = fStrictReleaseOrder;
1077 pThis->fInTree = false;
1078 pThis->fDonateRefToNextRetainer = false;
1079 pThis->afReserved[0] = false;
1080 pThis->afReserved[1] = false;
1081 pThis->afReserved[2] = false;
1082 pThis->cMsMinDeadlock = cMsMinDeadlock;
1083 pThis->cMsMinOrder = cMsMinOrder;
1084 for (unsigned i = 0; i < RT_ELEMENTS(pThis->au32Reserved); i++)
1085 pThis->au32Reserved[i] = 0;
1086 for (unsigned i = 0; i < RT_ELEMENTS(pThis->PriorLocks.aRefs); i++)
1087 {
1088 pThis->PriorLocks.aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1089 pThis->PriorLocks.aRefs[i].cLookups = 0;
1090 pThis->PriorLocks.aRefs[i].fAutodidacticism = false;
1091 pThis->PriorLocks.aRefs[i].afReserved[0] = false;
1092 pThis->PriorLocks.aRefs[i].afReserved[1] = false;
1093 pThis->PriorLocks.aRefs[i].afReserved[2] = false;
1094 }
1095 pThis->PriorLocks.pNext = NULL;
1096 for (unsigned i = 0; i < RT_ELEMENTS(pThis->apPriorLocksHash); i++)
1097 pThis->apPriorLocksHash[i] = NULL;
1098 char *pszDst = (char *)(pThis + 1);
1099 pThis->pszName = (char *)memcpy(pszDst, szName, cbName);
1100 pszDst += cbName;
1101 rtLockValidatorSrcPosCopy(&pThis->CreatePos, pSrcPos);
1102 pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
1103 pszDst += cbFile;
1104 pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
1105 Assert(rtLockValidatorSrcPosHash(&pThis->CreatePos) == pThis->Core.Key);
1106#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1107 pThis->cHashHits = 0;
1108 pThis->cHashMisses = 0;
1109#endif
1110
1111 *phClass = pThis;
1112 return VINF_SUCCESS;
1113}
1114
1115
1116RTDECL(int) RTLockValidatorClassCreate(PRTLOCKVALCLASS phClass, bool fAutodidact, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1117{
1118 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1119 va_list va;
1120 va_start(va, pszNameFmt);
1121 int rc = RTLockValidatorClassCreateExV(phClass, &SrcPos,
1122 fAutodidact, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1123 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1124 pszNameFmt, va);
1125 va_end(va);
1126 return rc;
1127}
1128
1129
1130/**
1131 * Creates a new lock validator class with a reference that is consumed by the
1132 * first call to RTLockValidatorClassRetain.
1133 *
1134 * This is tailored for use in the parameter list of a semaphore constructor.
1135 *
1136 * @returns Class handle with a reference that is automatically consumed by the
1137 * first retainer. NIL_RTLOCKVALCLASS if we run into trouble.
1138 *
1139 * @param SRC_POS The source position where call is being made from.
1140 * Use RT_SRC_POS when possible. Optional.
1141 * @param pszNameFmt Class name format string, optional (NULL). Max
1142 * length is 32 bytes.
1143 * @param ... Format string arguments.
1144 */
1145RTDECL(RTLOCKVALCLASS) RTLockValidatorClassCreateUnique(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1146{
1147 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1148 RTLOCKVALCLASSINT *pClass;
1149 va_list va;
1150 va_start(va, pszNameFmt);
1151 int rc = RTLockValidatorClassCreateExV(&pClass, &SrcPos,
1152 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1153 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1154 pszNameFmt, va);
1155 va_end(va);
1156 if (RT_FAILURE(rc))
1157 return NIL_RTLOCKVALCLASS;
1158 ASMAtomicWriteBool(&pClass->fDonateRefToNextRetainer, true); /* see rtLockValidatorClassRetain */
1159 return pClass;
1160}
1161
1162
1163/**
1164 * Internal class retainer.
1165 * @returns The new reference count.
1166 * @param pClass The class.
1167 */
1168DECL_FORCE_INLINE(uint32_t) rtLockValidatorClassRetain(RTLOCKVALCLASSINT *pClass)
1169{
1170 uint32_t cRefs = ASMAtomicIncU32(&pClass->cRefs);
1171 if (cRefs > RTLOCKVALCLASS_MAX_REFS)
1172 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1173 else if ( cRefs == 2
1174 && ASMAtomicXchgBool(&pClass->fDonateRefToNextRetainer, false))
1175 cRefs = ASMAtomicDecU32(&pClass->cRefs);
1176 return cRefs;
1177}
1178
1179
1180/**
1181 * Validates and retains a lock validator class.
1182 *
1183 * @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
1184 * @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
1185 */
1186DECL_FORCE_INLINE(RTLOCKVALCLASS) rtLockValidatorClassValidateAndRetain(RTLOCKVALCLASS hClass)
1187{
1188 if (hClass == NIL_RTLOCKVALCLASS)
1189 return hClass;
1190 AssertPtrReturn(hClass, NIL_RTLOCKVALCLASS);
1191 AssertReturn(hClass->u32Magic == RTLOCKVALCLASS_MAGIC, NIL_RTLOCKVALCLASS);
1192 rtLockValidatorClassRetain(hClass);
1193 return hClass;
1194}
1195
1196
1197/**
1198 * Internal class releaser.
1199 * @returns The new reference count.
1200 * @param pClass The class.
1201 */
1202DECLINLINE(uint32_t) rtLockValidatorClassRelease(RTLOCKVALCLASSINT *pClass)
1203{
1204 uint32_t cRefs = ASMAtomicDecU32(&pClass->cRefs);
1205 if (cRefs + 1 == RTLOCKVALCLASS_MAX_REFS)
1206 ASMAtomicWriteU32(&pClass->cRefs, RTLOCKVALCLASS_MAX_REFS);
1207 else if (!cRefs)
1208 rtLockValidatorClassDestroy(pClass);
1209 return cRefs;
1210}
1211
1212
1213/**
1214 * Destroys a class once there are not more references to it.
1215 *
1216 * @param pClass The class.
1217 */
1218static void rtLockValidatorClassDestroy(RTLOCKVALCLASSINT *pClass)
1219{
1220 AssertReturnVoid(!pClass->fInTree);
1221 ASMAtomicWriteU32(&pClass->u32Magic, RTLOCKVALCLASS_MAGIC_DEAD);
1222
1223 PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks;
1224 while (pChunk)
1225 {
1226 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1227 {
1228 RTLOCKVALCLASSINT *pClass2 = pChunk->aRefs[i].hClass;
1229 if (pClass2 != NIL_RTLOCKVALCLASS)
1230 {
1231 pChunk->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1232 rtLockValidatorClassRelease(pClass2);
1233 }
1234 }
1235
1236 PRTLOCKVALCLASSREFCHUNK pNext = pChunk->pNext;
1237 pChunk->pNext = NULL;
1238 if (pChunk != &pClass->PriorLocks)
1239 RTMemFree(pChunk);
1240 pChunk = pNext;
1241 }
1242
1243 RTMemFree(pClass);
1244}
1245
1246
1247RTDECL(RTLOCKVALCLASS) RTLockValidatorClassFindForSrcPos(PRTLOCKVALSRCPOS pSrcPos)
1248{
1249 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1250 rtLockValidatorLazyInit();
1251 int rcLock = RTSemRWRequestRead(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1252
1253 uint32_t uSrcPosHash = rtLockValidatorSrcPosHash(pSrcPos);
1254 RTLOCKVALCLASSINT *pClass = (RTLOCKVALCLASSINT *)RTAvllU32Get(&g_LockValClassTree, uSrcPosHash);
1255 while (pClass)
1256 {
1257 if (rtLockValidatorSrcPosCompare(&pClass->CreatePos, pSrcPos) == 0)
1258 break;
1259 pClass = (RTLOCKVALCLASSINT *)pClass->Core.pList;
1260 }
1261
1262 if (RT_SUCCESS(rcLock))
1263 RTSemRWReleaseRead(g_hLockValClassTreeRWLock);
1264 return pClass;
1265}
1266
1267
1268RTDECL(RTLOCKVALCLASS) RTLockValidatorClassForSrcPos(RT_SRC_POS_DECL, const char *pszNameFmt, ...)
1269{
1270 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_POS_NO_ID();
1271 RTLOCKVALCLASS hClass = RTLockValidatorClassFindForSrcPos(&SrcPos);
1272 if (hClass == NIL_RTLOCKVALCLASS)
1273 {
1274 /*
1275 * Create a new class and insert it into the tree.
1276 */
1277 va_list va;
1278 va_start(va, pszNameFmt);
1279 int rc = RTLockValidatorClassCreateExV(&hClass, &SrcPos,
1280 true /*fAutodidact*/, true /*fRecursionOk*/, false /*fStrictReleaseOrder*/,
1281 1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/,
1282 pszNameFmt, va);
1283 va_end(va);
1284 if (RT_SUCCESS(rc))
1285 {
1286 if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
1287 rtLockValidatorLazyInit();
1288 int rcLock = RTSemRWRequestWrite(g_hLockValClassTreeRWLock, RT_INDEFINITE_WAIT);
1289
1290 Assert(!hClass->fInTree);
1291 hClass->fInTree = RTAvllU32Insert(&g_LockValClassTree, &hClass->Core);
1292 Assert(hClass->fInTree);
1293
1294 if (RT_SUCCESS(rcLock))
1295 RTSemRWReleaseWrite(g_hLockValClassTreeRWLock);
1296 return hClass;
1297 }
1298 }
1299 return hClass;
1300}
1301
1302
1303RTDECL(uint32_t) RTLockValidatorClassRetain(RTLOCKVALCLASS hClass)
1304{
1305 RTLOCKVALCLASSINT *pClass = hClass;
1306 AssertPtrReturn(pClass, UINT32_MAX);
1307 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1308 return rtLockValidatorClassRetain(pClass);
1309}
1310
1311
1312RTDECL(uint32_t) RTLockValidatorClassRelease(RTLOCKVALCLASS hClass)
1313{
1314 RTLOCKVALCLASSINT *pClass = hClass;
1315 if (pClass == NIL_RTLOCKVALCLASS)
1316 return 0;
1317 AssertPtrReturn(pClass, UINT32_MAX);
1318 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, UINT32_MAX);
1319 return rtLockValidatorClassRelease(pClass);
1320}
1321
1322
1323/**
1324 * Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
1325 * all the chunks for @a pPriorClass.
1326 *
1327 * @returns true / false.
1328 * @param pClass The class to search.
1329 * @param pPriorClass The class to search for.
1330 */
1331static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1332{
1333 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; pChunk; pChunk = pChunk->pNext)
1334 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1335 {
1336 if (pChunk->aRefs[i].hClass == pPriorClass)
1337 {
1338 uint32_t cLookups = ASMAtomicIncU32(&pChunk->aRefs[i].cLookups);
1339 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1340 {
1341 ASMAtomicWriteU32(&pChunk->aRefs[i].cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1342 cLookups = RTLOCKVALCLASSREF_MAX_LOOKUPS;
1343 }
1344
1345 /* update the hash table entry. */
1346 PRTLOCKVALCLASSREF *ppHashEntry = &pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1347 if ( !(*ppHashEntry)
1348 || (*ppHashEntry)->cLookups + 128 < cLookups)
1349 ASMAtomicWritePtr(ppHashEntry, &pChunk->aRefs[i]);
1350
1351#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1352 ASMAtomicIncU32(&pClass->cHashMisses);
1353#endif
1354 return true;
1355 }
1356 }
1357
1358 return false;
1359}
1360
1361
1362/**
1363 * Checks if @a pPriorClass is a known prior class.
1364 *
1365 * @returns true / false.
1366 * @param pClass The class to search.
1367 * @param pPriorClass The class to search for.
1368 */
1369DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
1370{
1371 /*
1372 * Hash lookup here.
1373 */
1374 PRTLOCKVALCLASSREF pRef = pClass->apPriorLocksHash[RTLOCKVALCLASS_HASH(pPriorClass)];
1375 if ( pRef
1376 && pRef->hClass == pPriorClass)
1377 {
1378 uint32_t cLookups = ASMAtomicIncU32(&pRef->cLookups);
1379 if (RT_UNLIKELY(cLookups >= RTLOCKVALCLASSREF_MAX_LOOKUPS_FIX))
1380 ASMAtomicWriteU32(&pRef->cLookups, RTLOCKVALCLASSREF_MAX_LOOKUPS);
1381#ifdef RTLOCKVAL_WITH_CLASS_HASH_STATS
1382 ASMAtomicIncU32(&pClass->cHashHits);
1383#endif
1384 return true;
1385 }
1386
1387 return rtLockValidatorClassIsPriorClassByLinearSearch(pClass, pPriorClass);
1388}
1389
1390
1391/**
1392 * Adds a class to the prior list.
1393 *
1394 * @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
1395 * @param pClass The class to work on.
1396 * @param pPriorClass The class to add.
1397 * @param fAutodidacticism Whether we're teaching ourselves (true) or
1398 * somebody is teaching us via the API (false).
1399 * @param pSrcPos Where this rule was added (optional).
1400 */
1401static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass,
1402 bool fAutodidacticism, PCRTLOCKVALSRCPOS pSrcPos)
1403{
1404 NOREF(pSrcPos);
1405 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
1406 rtLockValidatorLazyInit();
1407 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
1408
1409 /*
1410 * Check that there are no conflict (no assert since we might race each other).
1411 */
1412 int rc = VERR_SEM_LV_INTERNAL_ERROR;
1413 if (!rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
1414 {
1415 if (!rtLockValidatorClassIsPriorClass(pClass, pPriorClass))
1416 {
1417 /*
1418 * Scan the table for a free entry, allocating a new chunk if necessary.
1419 */
1420 for (PRTLOCKVALCLASSREFCHUNK pChunk = &pClass->PriorLocks; ; pChunk = pChunk->pNext)
1421 {
1422 bool fDone = false;
1423 for (uint32_t i = 0; i < RT_ELEMENTS(pChunk->aRefs); i++)
1424 {
1425 ASMAtomicCmpXchgHandle(&pChunk->aRefs[i].hClass, pPriorClass, NIL_RTLOCKVALCLASS, fDone);
1426 if (fDone)
1427 {
1428 pChunk->aRefs[i].fAutodidacticism = fAutodidacticism;
1429 rtLockValidatorClassRetain(pPriorClass);
1430 rc = VINF_SUCCESS;
1431 break;
1432 }
1433 }
1434 if (fDone)
1435 break;
1436
1437 /* If no more chunks, allocate a new one and insert the class before linking it. */
1438 if (!pChunk->pNext)
1439 {
1440 PRTLOCKVALCLASSREFCHUNK pNew = (PRTLOCKVALCLASSREFCHUNK)RTMemAlloc(sizeof(*pNew));
1441 if (!pNew)
1442 {
1443 rc = VERR_NO_MEMORY;
1444 break;
1445 }
1446 RTMEM_MAY_LEAK(pNew);
1447 pNew->pNext = NULL;
1448 for (uint32_t i = 0; i < RT_ELEMENTS(pNew->aRefs); i++)
1449 {
1450 pNew->aRefs[i].hClass = NIL_RTLOCKVALCLASS;
1451 pNew->aRefs[i].cLookups = 0;
1452 pNew->aRefs[i].fAutodidacticism = false;
1453 pNew->aRefs[i].afReserved[0] = false;
1454 pNew->aRefs[i].afReserved[1] = false;
1455 pNew->aRefs[i].afReserved[2] = false;
1456 }
1457
1458 pNew->aRefs[0].hClass = pPriorClass;
1459 pNew->aRefs[0].fAutodidacticism = fAutodidacticism;
1460
1461 ASMAtomicWritePtr(&pChunk->pNext, pNew);
1462 rtLockValidatorClassRetain(pPriorClass);
1463 rc = VINF_SUCCESS;
1464 break;
1465 }
1466 } /* chunk loop */
1467 }
1468 else
1469 rc = VINF_SUCCESS;
1470 }
1471 else
1472 rc = !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
1473
1474 if (RT_SUCCESS(rcLock))
1475 RTCritSectLeave(&g_LockValClassTeachCS);
1476 return rc;
1477}
1478
1479
1480RTDECL(int) RTLockValidatorClassAddPriorClass(RTLOCKVALCLASS hClass, RTLOCKVALCLASS hPriorClass)
1481{
1482 RTLOCKVALCLASSINT *pClass = hClass;
1483 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1484 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1485
1486 RTLOCKVALCLASSINT *pPriorClass = hPriorClass;
1487 AssertPtrReturn(pPriorClass, VERR_INVALID_HANDLE);
1488 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1489
1490 return rtLockValidatorClassAddPriorClass(pClass, pPriorClass, false /*fAutodidacticism*/, NULL);
1491}
1492
1493
1494RTDECL(int) RTLockValidatorClassEnforceStrictReleaseOrder(RTLOCKVALCLASS hClass, bool fEnabled)
1495{
1496 RTLOCKVALCLASSINT *pClass = hClass;
1497 AssertPtrReturn(pClass, VERR_INVALID_HANDLE);
1498 AssertReturn(pClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_INVALID_HANDLE);
1499
1500 ASMAtomicWriteBool(&pClass->fStrictReleaseOrder, fEnabled);
1501 return VINF_SUCCESS;
1502}
1503
1504
1505/**
1506 * Unlinks all siblings.
1507 *
1508 * This is used during record deletion and assumes no races.
1509 *
1510 * @param pCore One of the siblings.
1511 */
1512static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
1513{
1514 /* ASSUMES sibling destruction doesn't involve any races and that all
1515 related records are to be disposed off now. */
1516 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
1517 while (pSibling)
1518 {
1519 PRTLOCKVALRECUNION volatile *ppCoreNext;
1520 switch (pSibling->Core.u32Magic)
1521 {
1522 case RTLOCKVALRECEXCL_MAGIC:
1523 case RTLOCKVALRECEXCL_MAGIC_DEAD:
1524 ppCoreNext = &pSibling->Excl.pSibling;
1525 break;
1526
1527 case RTLOCKVALRECSHRD_MAGIC:
1528 case RTLOCKVALRECSHRD_MAGIC_DEAD:
1529 ppCoreNext = &pSibling->Shared.pSibling;
1530 break;
1531
1532 default:
1533 AssertFailed();
1534 ppCoreNext = NULL;
1535 break;
1536 }
1537 if (RT_UNLIKELY(ppCoreNext))
1538 break;
1539 pSibling = ASMAtomicXchgPtrT(ppCoreNext, NULL, PRTLOCKVALRECUNION);
1540 }
1541}
1542
1543
1544RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
1545{
1546 /*
1547 * Validate input.
1548 */
1549 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
1550 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
1551
1552 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
1553 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1554 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1555 , VERR_SEM_LV_INVALID_PARAMETER);
1556
1557 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
1558 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1559 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1560 , VERR_SEM_LV_INVALID_PARAMETER);
1561
1562 /*
1563 * Link them (circular list).
1564 */
1565 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1566 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
1567 {
1568 p1->Excl.pSibling = p2;
1569 p2->Shared.pSibling = p1;
1570 }
1571 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1572 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
1573 {
1574 p1->Shared.pSibling = p2;
1575 p2->Excl.pSibling = p1;
1576 }
1577 else
1578 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
1579
1580 return VINF_SUCCESS;
1581}
1582
1583
1584#if 0 /* unused */
1585/**
1586 * Gets the lock name for the given record.
1587 *
1588 * @returns Read-only lock name.
1589 * @param pRec The lock record.
1590 */
1591DECL_FORCE_INLINE(const char *) rtLockValidatorRecName(PRTLOCKVALRECUNION pRec)
1592{
1593 switch (pRec->Core.u32Magic)
1594 {
1595 case RTLOCKVALRECEXCL_MAGIC:
1596 return pRec->Excl.szName;
1597 case RTLOCKVALRECSHRD_MAGIC:
1598 return pRec->Shared.szName;
1599 case RTLOCKVALRECSHRDOWN_MAGIC:
1600 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1601 case RTLOCKVALRECNEST_MAGIC:
1602 pRec = rtLockValidatorReadRecUnionPtr(&pRec->Nest.pRec);
1603 if (RT_VALID_PTR(pRec))
1604 {
1605 switch (pRec->Core.u32Magic)
1606 {
1607 case RTLOCKVALRECEXCL_MAGIC:
1608 return pRec->Excl.szName;
1609 case RTLOCKVALRECSHRD_MAGIC:
1610 return pRec->Shared.szName;
1611 case RTLOCKVALRECSHRDOWN_MAGIC:
1612 return pRec->ShrdOwner.pSharedRec ? pRec->ShrdOwner.pSharedRec->szName : "orphaned";
1613 default:
1614 return "unknown-nested";
1615 }
1616 }
1617 return "orphaned-nested";
1618 default:
1619 return "unknown";
1620 }
1621}
1622#endif /* unused */
1623
1624
1625#if 0 /* unused */
1626/**
1627 * Gets the class for this locking record.
1628 *
1629 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1630 * @param pRec The lock validator record.
1631 */
1632DECLINLINE(RTLOCKVALCLASSINT *) rtLockValidatorRecGetClass(PRTLOCKVALRECUNION pRec)
1633{
1634 switch (pRec->Core.u32Magic)
1635 {
1636 case RTLOCKVALRECEXCL_MAGIC:
1637 return pRec->Excl.hClass;
1638
1639 case RTLOCKVALRECSHRD_MAGIC:
1640 return pRec->Shared.hClass;
1641
1642 case RTLOCKVALRECSHRDOWN_MAGIC:
1643 {
1644 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1645 if (RT_LIKELY( RT_VALID_PTR(pSharedRec)
1646 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1647 return pSharedRec->hClass;
1648 return NIL_RTLOCKVALCLASS;
1649 }
1650
1651 case RTLOCKVALRECNEST_MAGIC:
1652 {
1653 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1654 if (RT_VALID_PTR(pRealRec))
1655 {
1656 switch (pRealRec->Core.u32Magic)
1657 {
1658 case RTLOCKVALRECEXCL_MAGIC:
1659 return pRealRec->Excl.hClass;
1660
1661 case RTLOCKVALRECSHRDOWN_MAGIC:
1662 {
1663 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1664 if (RT_LIKELY( RT_VALID_PTR(pSharedRec)
1665 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1666 return pSharedRec->hClass;
1667 break;
1668 }
1669
1670 default:
1671 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1672 break;
1673 }
1674 }
1675 return NIL_RTLOCKVALCLASS;
1676 }
1677
1678 default:
1679 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1680 return NIL_RTLOCKVALCLASS;
1681 }
1682}
1683#endif /* unused */
1684
1685/**
1686 * Gets the class for this locking record and the pointer to the one below it in
1687 * the stack.
1688 *
1689 * @returns Pointer to the class or NIL_RTLOCKVALCLASS.
1690 * @param pRec The lock validator record.
1691 * @param puSubClass Where to return the sub-class.
1692 * @param ppDown Where to return the pointer to the record below.
1693 */
1694DECL_FORCE_INLINE(RTLOCKVALCLASSINT *)
1695rtLockValidatorRecGetClassesAndDown(PRTLOCKVALRECUNION pRec, uint32_t *puSubClass, PRTLOCKVALRECUNION *ppDown)
1696{
1697 switch (pRec->Core.u32Magic)
1698 {
1699 case RTLOCKVALRECEXCL_MAGIC:
1700 *ppDown = pRec->Excl.pDown;
1701 *puSubClass = pRec->Excl.uSubClass;
1702 return pRec->Excl.hClass;
1703
1704 case RTLOCKVALRECSHRD_MAGIC:
1705 *ppDown = NULL;
1706 *puSubClass = pRec->Shared.uSubClass;
1707 return pRec->Shared.hClass;
1708
1709 case RTLOCKVALRECSHRDOWN_MAGIC:
1710 {
1711 *ppDown = pRec->ShrdOwner.pDown;
1712
1713 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1714 if (RT_LIKELY( RT_VALID_PTR(pSharedRec)
1715 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1716 {
1717 *puSubClass = pSharedRec->uSubClass;
1718 return pSharedRec->hClass;
1719 }
1720 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1721 return NIL_RTLOCKVALCLASS;
1722 }
1723
1724 case RTLOCKVALRECNEST_MAGIC:
1725 {
1726 *ppDown = pRec->Nest.pDown;
1727
1728 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1729 if (RT_VALID_PTR(pRealRec))
1730 {
1731 switch (pRealRec->Core.u32Magic)
1732 {
1733 case RTLOCKVALRECEXCL_MAGIC:
1734 *puSubClass = pRealRec->Excl.uSubClass;
1735 return pRealRec->Excl.hClass;
1736
1737 case RTLOCKVALRECSHRDOWN_MAGIC:
1738 {
1739 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1740 if (RT_LIKELY( RT_VALID_PTR(pSharedRec)
1741 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1742 {
1743 *puSubClass = pSharedRec->uSubClass;
1744 return pSharedRec->hClass;
1745 }
1746 break;
1747 }
1748
1749 default:
1750 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1751 break;
1752 }
1753 }
1754 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1755 return NIL_RTLOCKVALCLASS;
1756 }
1757
1758 default:
1759 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1760 *ppDown = NULL;
1761 *puSubClass = RTLOCKVAL_SUB_CLASS_NONE;
1762 return NIL_RTLOCKVALCLASS;
1763 }
1764}
1765
1766
1767/**
1768 * Gets the sub-class for a lock record.
1769 *
1770 * @returns the sub-class.
1771 * @param pRec The lock validator record.
1772 */
1773DECLINLINE(uint32_t) rtLockValidatorRecGetSubClass(PRTLOCKVALRECUNION pRec)
1774{
1775 switch (pRec->Core.u32Magic)
1776 {
1777 case RTLOCKVALRECEXCL_MAGIC:
1778 return pRec->Excl.uSubClass;
1779
1780 case RTLOCKVALRECSHRD_MAGIC:
1781 return pRec->Shared.uSubClass;
1782
1783 case RTLOCKVALRECSHRDOWN_MAGIC:
1784 {
1785 PRTLOCKVALRECSHRD pSharedRec = pRec->ShrdOwner.pSharedRec;
1786 if (RT_LIKELY( RT_VALID_PTR(pSharedRec)
1787 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1788 return pSharedRec->uSubClass;
1789 return RTLOCKVAL_SUB_CLASS_NONE;
1790 }
1791
1792 case RTLOCKVALRECNEST_MAGIC:
1793 {
1794 PRTLOCKVALRECUNION pRealRec = pRec->Nest.pRec;
1795 if (RT_VALID_PTR(pRealRec))
1796 {
1797 switch (pRealRec->Core.u32Magic)
1798 {
1799 case RTLOCKVALRECEXCL_MAGIC:
1800 return pRec->Excl.uSubClass;
1801
1802 case RTLOCKVALRECSHRDOWN_MAGIC:
1803 {
1804 PRTLOCKVALRECSHRD pSharedRec = pRealRec->ShrdOwner.pSharedRec;
1805 if (RT_LIKELY( RT_VALID_PTR(pSharedRec)
1806 && pSharedRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC))
1807 return pSharedRec->uSubClass;
1808 break;
1809 }
1810
1811 default:
1812 AssertMsgFailed(("%p %p %#x\n", pRec, pRealRec, pRealRec->Core.u32Magic));
1813 break;
1814 }
1815 }
1816 return RTLOCKVAL_SUB_CLASS_NONE;
1817 }
1818
1819 default:
1820 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
1821 return RTLOCKVAL_SUB_CLASS_NONE;
1822 }
1823}
1824
1825
1826
1827
1828/**
1829 * Calculates the depth of a lock stack.
1830 *
1831 * @returns Number of stack frames.
1832 * @param pThread The thread.
1833 */
1834static uint32_t rtLockValidatorStackDepth(PRTTHREADINT pThread)
1835{
1836 uint32_t cEntries = 0;
1837 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
1838 while (RT_VALID_PTR(pCur))
1839 {
1840 switch (pCur->Core.u32Magic)
1841 {
1842 case RTLOCKVALRECEXCL_MAGIC:
1843 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
1844 break;
1845
1846 case RTLOCKVALRECSHRDOWN_MAGIC:
1847 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
1848 break;
1849
1850 case RTLOCKVALRECNEST_MAGIC:
1851 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
1852 break;
1853
1854 default:
1855 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), cEntries);
1856 }
1857 cEntries++;
1858 }
1859 return cEntries;
1860}
1861
1862
1863#ifdef RT_STRICT
1864/**
1865 * Checks if the stack contains @a pRec.
1866 *
1867 * @returns true / false.
1868 * @param pThreadSelf The current thread.
1869 * @param pRec The lock record.
1870 */
1871static bool rtLockValidatorStackContainsRec(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1872{
1873 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1874 while (pCur)
1875 {
1876 AssertPtrReturn(pCur, false);
1877 if (pCur == pRec)
1878 return true;
1879 switch (pCur->Core.u32Magic)
1880 {
1881 case RTLOCKVALRECEXCL_MAGIC:
1882 Assert(pCur->Excl.cRecursion >= 1);
1883 pCur = pCur->Excl.pDown;
1884 break;
1885
1886 case RTLOCKVALRECSHRDOWN_MAGIC:
1887 Assert(pCur->ShrdOwner.cRecursion >= 1);
1888 pCur = pCur->ShrdOwner.pDown;
1889 break;
1890
1891 case RTLOCKVALRECNEST_MAGIC:
1892 Assert(pCur->Nest.cRecursion > 1);
1893 pCur = pCur->Nest.pDown;
1894 break;
1895
1896 default:
1897 AssertMsgFailedReturn(("%#x\n", pCur->Core.u32Magic), false);
1898 }
1899 }
1900 return false;
1901}
1902#endif /* RT_STRICT */
1903
1904
1905/**
1906 * Pushes a lock record onto the stack.
1907 *
1908 * @param pThreadSelf The current thread.
1909 * @param pRec The lock record.
1910 */
1911static void rtLockValidatorStackPush(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1912{
1913 Assert(pThreadSelf == RTThreadSelf());
1914 Assert(!rtLockValidatorStackContainsRec(pThreadSelf, pRec));
1915
1916 switch (pRec->Core.u32Magic)
1917 {
1918 case RTLOCKVALRECEXCL_MAGIC:
1919 Assert(pRec->Excl.cRecursion == 1);
1920 Assert(pRec->Excl.pDown == NULL);
1921 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, pThreadSelf->LockValidator.pStackTop);
1922 break;
1923
1924 case RTLOCKVALRECSHRDOWN_MAGIC:
1925 Assert(pRec->ShrdOwner.cRecursion == 1);
1926 Assert(pRec->ShrdOwner.pDown == NULL);
1927 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, pThreadSelf->LockValidator.pStackTop);
1928 break;
1929
1930 default:
1931 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1932 }
1933 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pRec);
1934}
1935
1936
1937/**
1938 * Pops a lock record off the stack.
1939 *
1940 * @param pThreadSelf The current thread.
1941 * @param pRec The lock.
1942 */
1943static void rtLockValidatorStackPop(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
1944{
1945 Assert(pThreadSelf == RTThreadSelf());
1946
1947 PRTLOCKVALRECUNION pDown;
1948 switch (pRec->Core.u32Magic)
1949 {
1950 case RTLOCKVALRECEXCL_MAGIC:
1951 Assert(pRec->Excl.cRecursion == 0);
1952 pDown = pRec->Excl.pDown;
1953 rtLockValidatorWriteRecUnionPtr(&pRec->Excl.pDown, NULL); /* lazy bird */
1954 break;
1955
1956 case RTLOCKVALRECSHRDOWN_MAGIC:
1957 Assert(pRec->ShrdOwner.cRecursion == 0);
1958 pDown = pRec->ShrdOwner.pDown;
1959 rtLockValidatorWriteRecUnionPtr(&pRec->ShrdOwner.pDown, NULL);
1960 break;
1961
1962 default:
1963 AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
1964 }
1965 if (pThreadSelf->LockValidator.pStackTop == pRec)
1966 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pDown);
1967 else
1968 {
1969 /* Find the pointer to our record and unlink ourselves. */
1970 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
1971 while (pCur)
1972 {
1973 PRTLOCKVALRECUNION volatile *ppDown;
1974 switch (pCur->Core.u32Magic)
1975 {
1976 case RTLOCKVALRECEXCL_MAGIC:
1977 Assert(pCur->Excl.cRecursion >= 1);
1978 ppDown = &pCur->Excl.pDown;
1979 break;
1980
1981 case RTLOCKVALRECSHRDOWN_MAGIC:
1982 Assert(pCur->ShrdOwner.cRecursion >= 1);
1983 ppDown = &pCur->ShrdOwner.pDown;
1984 break;
1985
1986 case RTLOCKVALRECNEST_MAGIC:
1987 Assert(pCur->Nest.cRecursion >= 1);
1988 ppDown = &pCur->Nest.pDown;
1989 break;
1990
1991 default:
1992 AssertMsgFailedReturnVoid(("%#x\n", pCur->Core.u32Magic));
1993 }
1994 pCur = *ppDown;
1995 if (pCur == pRec)
1996 {
1997 rtLockValidatorWriteRecUnionPtr(ppDown, pDown);
1998 return;
1999 }
2000 }
2001 AssertMsgFailed(("%p %p\n", pRec, pThreadSelf));
2002 }
2003}
2004
2005
2006/**
2007 * Creates and pushes lock recursion record onto the stack.
2008 *
2009 * @param pThreadSelf The current thread.
2010 * @param pRec The lock record.
2011 * @param pSrcPos Where the recursion occurred.
2012 */
2013static void rtLockValidatorStackPushRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec, PCRTLOCKVALSRCPOS pSrcPos)
2014{
2015 Assert(pThreadSelf == RTThreadSelf());
2016 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2017
2018#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2019 /*
2020 * Allocate a new recursion record
2021 */
2022 PRTLOCKVALRECNEST pRecursionRec = pThreadSelf->LockValidator.pFreeNestRecs;
2023 if (pRecursionRec)
2024 pThreadSelf->LockValidator.pFreeNestRecs = pRecursionRec->pNextFree;
2025 else
2026 {
2027 pRecursionRec = (PRTLOCKVALRECNEST)RTMemAlloc(sizeof(*pRecursionRec));
2028 if (!pRecursionRec)
2029 return;
2030 }
2031
2032 /*
2033 * Initialize it.
2034 */
2035 switch (pRec->Core.u32Magic)
2036 {
2037 case RTLOCKVALRECEXCL_MAGIC:
2038 pRecursionRec->cRecursion = pRec->Excl.cRecursion;
2039 break;
2040
2041 case RTLOCKVALRECSHRDOWN_MAGIC:
2042 pRecursionRec->cRecursion = pRec->ShrdOwner.cRecursion;
2043 break;
2044
2045 default:
2046 AssertMsgFailed(("%#x\n", pRec->Core.u32Magic));
2047 rtLockValidatorSerializeDestructEnter();
2048 rtLockValidatorSerializeDestructLeave();
2049 RTMemFree(pRecursionRec);
2050 return;
2051 }
2052 Assert(pRecursionRec->cRecursion > 1);
2053 pRecursionRec->pRec = pRec;
2054 pRecursionRec->pDown = NULL;
2055 pRecursionRec->pNextFree = NULL;
2056 rtLockValidatorSrcPosCopy(&pRecursionRec->SrcPos, pSrcPos);
2057 pRecursionRec->Core.u32Magic = RTLOCKVALRECNEST_MAGIC;
2058
2059 /*
2060 * Link it.
2061 */
2062 pRecursionRec->pDown = pThreadSelf->LockValidator.pStackTop;
2063 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, (PRTLOCKVALRECUNION)pRecursionRec);
2064#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2065}
2066
2067
2068/**
2069 * Pops a lock recursion record off the stack.
2070 *
2071 * @param pThreadSelf The current thread.
2072 * @param pRec The lock record.
2073 */
2074static void rtLockValidatorStackPopRecursion(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2075{
2076 Assert(pThreadSelf == RTThreadSelf());
2077 Assert(rtLockValidatorStackContainsRec(pThreadSelf, pRec));
2078
2079 uint32_t cRecursion;
2080 switch (pRec->Core.u32Magic)
2081 {
2082 case RTLOCKVALRECEXCL_MAGIC: cRecursion = pRec->Excl.cRecursion; break;
2083 case RTLOCKVALRECSHRDOWN_MAGIC: cRecursion = pRec->ShrdOwner.cRecursion; break;
2084 default: AssertMsgFailedReturnVoid(("%#x\n", pRec->Core.u32Magic));
2085 }
2086 Assert(cRecursion >= 1);
2087
2088#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2089 /*
2090 * Pop the recursion record.
2091 */
2092 PRTLOCKVALRECUNION pNest = pThreadSelf->LockValidator.pStackTop;
2093 if ( pNest != NULL
2094 && pNest->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2095 && pNest->Nest.pRec == pRec
2096 )
2097 {
2098 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2099 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pStackTop, pNest->Nest.pDown);
2100 }
2101 else
2102 {
2103 /* Find the record above ours. */
2104 PRTLOCKVALRECUNION volatile *ppDown = NULL;
2105 for (;;)
2106 {
2107 AssertMsgReturnVoid(pNest, ("%p %p\n", pRec, pThreadSelf));
2108 switch (pNest->Core.u32Magic)
2109 {
2110 case RTLOCKVALRECEXCL_MAGIC:
2111 ppDown = &pNest->Excl.pDown;
2112 pNest = *ppDown;
2113 continue;
2114 case RTLOCKVALRECSHRDOWN_MAGIC:
2115 ppDown = &pNest->ShrdOwner.pDown;
2116 pNest = *ppDown;
2117 continue;
2118 case RTLOCKVALRECNEST_MAGIC:
2119 if (pNest->Nest.pRec == pRec)
2120 break;
2121 ppDown = &pNest->Nest.pDown;
2122 pNest = *ppDown;
2123 continue;
2124 default:
2125 AssertMsgFailedReturnVoid(("%#x\n", pNest->Core.u32Magic));
2126 }
2127 break; /* ugly */
2128 }
2129 Assert(pNest->Nest.cRecursion == cRecursion + 1);
2130 rtLockValidatorWriteRecUnionPtr(ppDown, pNest->Nest.pDown);
2131 }
2132
2133 /*
2134 * Invalidate and free the record.
2135 */
2136 ASMAtomicWriteU32(&pNest->Core.u32Magic, RTLOCKVALRECNEST_MAGIC);
2137 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pDown, NULL);
2138 rtLockValidatorWriteRecUnionPtr(&pNest->Nest.pRec, NULL);
2139 pNest->Nest.cRecursion = 0;
2140 pNest->Nest.pNextFree = pThreadSelf->LockValidator.pFreeNestRecs;
2141 pThreadSelf->LockValidator.pFreeNestRecs = &pNest->Nest;
2142#endif /* RTLOCKVAL_WITH_RECURSION_RECORDS */
2143}
2144
2145
2146/**
2147 * Helper for rtLockValidatorStackCheckLockingOrder that does the bitching and
2148 * returns VERR_SEM_LV_WRONG_ORDER.
2149 */
2150static int rtLockValidatorStackWrongOrder(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf,
2151 PRTLOCKVALRECUNION pRec1, PRTLOCKVALRECUNION pRec2,
2152 RTLOCKVALCLASSINT *pClass1, RTLOCKVALCLASSINT *pClass2)
2153
2154
2155{
2156 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pRec1, false);
2157 rtLockValComplainAboutLock("Other lock: ", pRec2, "\n");
2158 rtLockValComplainAboutClass("My class: ", pClass1, rtLockValidatorRecGetSubClass(pRec1), true /*fVerbose*/);
2159 rtLockValComplainAboutClass("Other class: ", pClass2, rtLockValidatorRecGetSubClass(pRec2), true /*fVerbose*/);
2160 rtLockValComplainAboutLockStack(pThreadSelf, 0, 0, pRec2);
2161 rtLockValComplainPanic();
2162 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_ORDER : VINF_SUCCESS;
2163}
2164
2165
2166/**
2167 * Checks if the sub-class order is ok or not.
2168 *
2169 * Used to deal with two locks from the same class.
2170 *
2171 * @returns true if ok, false if not.
2172 * @param uSubClass1 The sub-class of the lock that is being
2173 * considered.
2174 * @param uSubClass2 The sub-class of the lock that is already being
2175 * held.
2176 */
2177DECL_FORCE_INLINE(bool) rtLockValidatorIsSubClassOrderOk(uint32_t uSubClass1, uint32_t uSubClass2)
2178{
2179 if (uSubClass1 > uSubClass2)
2180 {
2181 /* NONE kills ANY. */
2182 if (uSubClass2 == RTLOCKVAL_SUB_CLASS_NONE)
2183 return false;
2184 return true;
2185 }
2186
2187 /* ANY counters all USER values. (uSubClass1 == NONE only if they are equal) */
2188 AssertCompile(RTLOCKVAL_SUB_CLASS_ANY > RTLOCKVAL_SUB_CLASS_NONE);
2189 if (uSubClass1 == RTLOCKVAL_SUB_CLASS_ANY)
2190 return true;
2191 return false;
2192}
2193
2194
2195/**
2196 * Checks if the class and sub-class lock order is ok.
2197 *
2198 * @returns true if ok, false if not.
2199 * @param pClass1 The class of the lock that is being considered.
2200 * @param uSubClass1 The sub-class that goes with @a pClass1.
2201 * @param pClass2 The class of the lock that is already being
2202 * held.
2203 * @param uSubClass2 The sub-class that goes with @a pClass2.
2204 */
2205DECL_FORCE_INLINE(bool) rtLockValidatorIsClassOrderOk(RTLOCKVALCLASSINT *pClass1, uint32_t uSubClass1,
2206 RTLOCKVALCLASSINT *pClass2, uint32_t uSubClass2)
2207{
2208 if (pClass1 == pClass2)
2209 return rtLockValidatorIsSubClassOrderOk(uSubClass1, uSubClass2);
2210 return rtLockValidatorClassIsPriorClass(pClass1, pClass2);
2211}
2212
2213
2214/**
2215 * Checks the locking order, part two.
2216 *
2217 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2218 * @param pClass The lock class.
2219 * @param uSubClass The lock sub-class.
2220 * @param pThreadSelf The current thread.
2221 * @param pRec The lock record.
2222 * @param pSrcPos The source position of the locking operation.
2223 * @param pFirstBadClass The first bad class.
2224 * @param pFirstBadRec The first bad lock record.
2225 * @param pFirstBadDown The next record on the lock stack.
2226 */
2227static int rtLockValidatorStackCheckLockingOrder2(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2228 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2229 PCRTLOCKVALSRCPOS const pSrcPos,
2230 RTLOCKVALCLASSINT * const pFirstBadClass,
2231 PRTLOCKVALRECUNION const pFirstBadRec,
2232 PRTLOCKVALRECUNION const pFirstBadDown)
2233{
2234 /*
2235 * Something went wrong, pCur is pointing to where.
2236 */
2237 if ( pClass == pFirstBadClass
2238 || rtLockValidatorClassIsPriorClass(pFirstBadClass, pClass))
2239 return rtLockValidatorStackWrongOrder("Wrong locking order!", pSrcPos, pThreadSelf,
2240 pRec, pFirstBadRec, pClass, pFirstBadClass);
2241 if (!pClass->fAutodidact)
2242 return rtLockValidatorStackWrongOrder("Wrong locking order! (unknown)", pSrcPos, pThreadSelf,
2243 pRec, pFirstBadRec, pClass, pFirstBadClass);
2244
2245 /*
2246 * This class is an autodidact, so we have to check out the rest of the stack
2247 * for direct violations.
2248 */
2249 uint32_t cNewRules = 1;
2250 PRTLOCKVALRECUNION pCur = pFirstBadDown;
2251 while (pCur)
2252 {
2253 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2254
2255 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2256 pCur = pCur->Nest.pDown;
2257 else
2258 {
2259 PRTLOCKVALRECUNION pDown;
2260 uint32_t uPriorSubClass;
2261 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2262 if (pPriorClass != NIL_RTLOCKVALCLASS)
2263 {
2264 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2265 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2266 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2267 {
2268 if ( pClass == pPriorClass
2269 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2270 return rtLockValidatorStackWrongOrder("Wrong locking order! (more than one)", pSrcPos, pThreadSelf,
2271 pRec, pCur, pClass, pPriorClass);
2272 cNewRules++;
2273 }
2274 }
2275 pCur = pDown;
2276 }
2277 }
2278
2279 if (cNewRules == 1)
2280 {
2281 /*
2282 * Special case the simple operation, hoping that it will be a
2283 * frequent case.
2284 */
2285 int rc = rtLockValidatorClassAddPriorClass(pClass, pFirstBadClass, true /*fAutodidacticism*/, pSrcPos);
2286 if (rc == VERR_SEM_LV_WRONG_ORDER)
2287 return rtLockValidatorStackWrongOrder("Wrong locking order! (race)", pSrcPos, pThreadSelf,
2288 pRec, pFirstBadRec, pClass, pFirstBadClass);
2289 Assert(RT_SUCCESS(rc) || rc == VERR_NO_MEMORY);
2290 }
2291 else
2292 {
2293 /*
2294 * We may be adding more than one rule, so we have to take the lock
2295 * before starting to add the rules. This means we have to check
2296 * the state after taking it since we might be racing someone adding
2297 * a conflicting rule.
2298 */
2299 if (!RTCritSectIsInitialized(&g_LockValClassTeachCS))
2300 rtLockValidatorLazyInit();
2301 int rcLock = RTCritSectEnter(&g_LockValClassTeachCS);
2302
2303 /* Check */
2304 pCur = pFirstBadRec;
2305 while (pCur)
2306 {
2307 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2308 pCur = pCur->Nest.pDown;
2309 else
2310 {
2311 uint32_t uPriorSubClass;
2312 PRTLOCKVALRECUNION pDown;
2313 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2314 if (pPriorClass != NIL_RTLOCKVALCLASS)
2315 {
2316 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2317 {
2318 if ( pClass == pPriorClass
2319 || rtLockValidatorClassIsPriorClass(pPriorClass, pClass))
2320 {
2321 if (RT_SUCCESS(rcLock))
2322 RTCritSectLeave(&g_LockValClassTeachCS);
2323 return rtLockValidatorStackWrongOrder("Wrong locking order! (2nd)", pSrcPos, pThreadSelf,
2324 pRec, pCur, pClass, pPriorClass);
2325 }
2326 }
2327 }
2328 pCur = pDown;
2329 }
2330 }
2331
2332 /* Iterate the stack yet again, adding new rules this time. */
2333 pCur = pFirstBadRec;
2334 while (pCur)
2335 {
2336 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2337 pCur = pCur->Nest.pDown;
2338 else
2339 {
2340 uint32_t uPriorSubClass;
2341 PRTLOCKVALRECUNION pDown;
2342 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2343 if (pPriorClass != NIL_RTLOCKVALCLASS)
2344 {
2345 if (!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass))
2346 {
2347 Assert( pClass != pPriorClass
2348 && !rtLockValidatorClassIsPriorClass(pPriorClass, pClass));
2349 int rc = rtLockValidatorClassAddPriorClass(pClass, pPriorClass, true /*fAutodidacticism*/, pSrcPos);
2350 if (RT_FAILURE(rc))
2351 {
2352 Assert(rc == VERR_NO_MEMORY);
2353 break;
2354 }
2355 Assert(rtLockValidatorClassIsPriorClass(pClass, pPriorClass));
2356 }
2357 }
2358 pCur = pDown;
2359 }
2360 }
2361
2362 if (RT_SUCCESS(rcLock))
2363 RTCritSectLeave(&g_LockValClassTeachCS);
2364 }
2365
2366 return VINF_SUCCESS;
2367}
2368
2369
2370
2371/**
2372 * Checks the locking order.
2373 *
2374 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_ORDER or VERR_SEM_LV_INTERNAL_ERROR.
2375 * @param pClass The lock class.
2376 * @param uSubClass The lock sub-class.
2377 * @param pThreadSelf The current thread.
2378 * @param pRec The lock record.
2379 * @param pSrcPos The source position of the locking operation.
2380 */
2381static int rtLockValidatorStackCheckLockingOrder(RTLOCKVALCLASSINT * const pClass, uint32_t const uSubClass,
2382 PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION const pRec,
2383 PCRTLOCKVALSRCPOS pSrcPos)
2384{
2385 /*
2386 * Some internal paranoia first.
2387 */
2388 AssertPtr(pClass);
2389 Assert(pClass->u32Magic == RTLOCKVALCLASS_MAGIC);
2390 AssertPtr(pThreadSelf);
2391 Assert(pThreadSelf->u32Magic == RTTHREADINT_MAGIC);
2392 AssertPtr(pRec);
2393 AssertPtrNull(pSrcPos);
2394
2395 /*
2396 * Walk the stack, delegate problems to a worker routine.
2397 */
2398 PRTLOCKVALRECUNION pCur = pThreadSelf->LockValidator.pStackTop;
2399 if (!pCur)
2400 return VINF_SUCCESS;
2401
2402 for (;;)
2403 {
2404 AssertPtrReturn(pCur, VERR_SEM_LV_INTERNAL_ERROR);
2405
2406 if (pCur->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2407 pCur = pCur->Nest.pDown;
2408 else
2409 {
2410 uint32_t uPriorSubClass;
2411 PRTLOCKVALRECUNION pDown;
2412 RTLOCKVALCLASSINT *pPriorClass = rtLockValidatorRecGetClassesAndDown(pCur, &uPriorSubClass, &pDown);
2413 if (pPriorClass != NIL_RTLOCKVALCLASS)
2414 {
2415 AssertPtrReturn(pPriorClass, VERR_SEM_LV_INTERNAL_ERROR);
2416 AssertReturn(pPriorClass->u32Magic == RTLOCKVALCLASS_MAGIC, VERR_SEM_LV_INTERNAL_ERROR);
2417 if (RT_UNLIKELY(!rtLockValidatorIsClassOrderOk(pClass, uSubClass, pPriorClass, uPriorSubClass)))
2418 return rtLockValidatorStackCheckLockingOrder2(pClass, uSubClass, pThreadSelf, pRec, pSrcPos,
2419 pPriorClass, pCur, pDown);
2420 }
2421 pCur = pDown;
2422 }
2423 if (!pCur)
2424 return VINF_SUCCESS;
2425 }
2426}
2427
2428
2429/**
2430 * Check that the lock record is the topmost one on the stack, complain and fail
2431 * if it isn't.
2432 *
2433 * @returns VINF_SUCCESS, VERR_SEM_LV_WRONG_RELEASE_ORDER or
2434 * VERR_SEM_LV_INVALID_PARAMETER.
2435 * @param pThreadSelf The current thread.
2436 * @param pRec The record.
2437 */
2438static int rtLockValidatorStackCheckReleaseOrder(PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
2439{
2440 AssertReturn(pThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
2441 Assert(pThreadSelf == RTThreadSelf());
2442
2443 PRTLOCKVALRECUNION pTop = pThreadSelf->LockValidator.pStackTop;
2444 if (RT_LIKELY( pTop == pRec
2445 || ( pTop
2446 && pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC
2447 && pTop->Nest.pRec == pRec) ))
2448 return VINF_SUCCESS;
2449
2450#ifdef RTLOCKVAL_WITH_RECURSION_RECORDS
2451 /* Look for a recursion record so the right frame is dumped and marked. */
2452 while (pTop)
2453 {
2454 if (pTop->Core.u32Magic == RTLOCKVALRECNEST_MAGIC)
2455 {
2456 if (pTop->Nest.pRec == pRec)
2457 {
2458 pRec = pTop;
2459 break;
2460 }
2461 pTop = pTop->Nest.pDown;
2462 }
2463 else if (pTop->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2464 pTop = pTop->Excl.pDown;
2465 else if (pTop->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2466 pTop = pTop->ShrdOwner.pDown;
2467 else
2468 break;
2469 }
2470#endif
2471
2472 rtLockValComplainFirst("Wrong release order!", NULL, pThreadSelf, pRec, true);
2473 rtLockValComplainPanic();
2474 return !g_fLockValSoftWrongOrder ? VERR_SEM_LV_WRONG_RELEASE_ORDER : VINF_SUCCESS;
2475}
2476
2477
2478/**
2479 * Checks if all owners are blocked - shared record operated in signaller mode.
2480 *
2481 * @returns true / false accordingly.
2482 * @param pRec The record.
2483 * @param pThreadSelf The current thread.
2484 */
2485DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
2486{
2487 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
2488 uint32_t cAllocated = pRec->cAllocated;
2489 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
2490 if (cEntries == 0)
2491 return false;
2492
2493 for (uint32_t i = 0; i < cAllocated; i++)
2494 {
2495 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
2496 if ( pEntry
2497 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2498 {
2499 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2500 if (!pCurThread)
2501 return false;
2502 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
2503 return false;
2504 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
2505 && pCurThread != pThreadSelf)
2506 return false;
2507 if (--cEntries == 0)
2508 break;
2509 }
2510 else
2511 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2512 }
2513
2514 return true;
2515}
2516
2517
2518/**
2519 * Verifies the deadlock stack before calling it a deadlock.
2520 *
2521 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
2522 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
2523 * @retval VERR_TRY_AGAIN if something changed.
2524 *
2525 * @param pStack The deadlock detection stack.
2526 * @param pThreadSelf The current thread.
2527 */
2528static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
2529{
2530 uint32_t const c = pStack->c;
2531 for (uint32_t iPass = 0; iPass < 3; iPass++)
2532 {
2533 for (uint32_t i = 1; i < c; i++)
2534 {
2535 PRTTHREADINT pThread = pStack->a[i].pThread;
2536 if (pThread->u32Magic != RTTHREADINT_MAGIC)
2537 return VERR_TRY_AGAIN;
2538 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
2539 return VERR_TRY_AGAIN;
2540 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
2541 return VERR_TRY_AGAIN;
2542 /* ASSUMES the signaller records won't have siblings! */
2543 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
2544 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
2545 && pRec->Shared.fSignaller
2546 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
2547 return VERR_TRY_AGAIN;
2548 }
2549 RTThreadYield();
2550 }
2551
2552 if (c == 1)
2553 return VERR_SEM_LV_ILLEGAL_UPGRADE;
2554 return VERR_SEM_LV_DEADLOCK;
2555}
2556
2557
2558/**
2559 * Checks for stack cycles caused by another deadlock before returning.
2560 *
2561 * @retval VINF_SUCCESS if the stack is simply too small.
2562 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
2563 *
2564 * @param pStack The deadlock detection stack.
2565 */
2566static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
2567{
2568 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
2569 {
2570 PRTTHREADINT pThread = pStack->a[i].pThread;
2571 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
2572 if (pStack->a[j].pThread == pThread)
2573 return VERR_SEM_LV_EXISTING_DEADLOCK;
2574 }
2575 static bool volatile s_fComplained = false;
2576 if (!s_fComplained)
2577 {
2578 s_fComplained = true;
2579 rtLockValComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
2580 }
2581 return VINF_SUCCESS;
2582}
2583
2584
2585/**
2586 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
2587 * detection.
2588 *
2589 * @retval VINF_SUCCESS
2590 * @retval VERR_SEM_LV_DEADLOCK
2591 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2592 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2593 * @retval VERR_TRY_AGAIN
2594 *
2595 * @param pStack The stack to use.
2596 * @param pOriginalRec The original record.
2597 * @param pThreadSelf The calling thread.
2598 */
2599static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
2600 PRTTHREADINT const pThreadSelf)
2601{
2602 pStack->c = 0;
2603
2604 /* We could use a single RTLOCKVALDDENTRY variable here, but the
2605 compiler may make a better job of it when using individual variables. */
2606 PRTLOCKVALRECUNION pRec = pOriginalRec;
2607 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
2608 uint32_t iEntry = UINT32_MAX;
2609 PRTTHREADINT pThread = NIL_RTTHREAD;
2610 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
2611 for (uint32_t iLoop = 0; ; iLoop++)
2612 {
2613 /*
2614 * Process the current record.
2615 */
2616 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2617
2618 /* Find the next relevant owner thread and record. */
2619 PRTLOCKVALRECUNION pNextRec = NULL;
2620 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
2621 PRTTHREADINT pNextThread = NIL_RTTHREAD;
2622 switch (pRec->Core.u32Magic)
2623 {
2624 case RTLOCKVALRECEXCL_MAGIC:
2625 Assert(iEntry == UINT32_MAX);
2626 for (;;)
2627 {
2628 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
2629 if ( !pNextThread
2630 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2631 break;
2632 enmNextState = rtThreadGetState(pNextThread);
2633 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2634 && pNextThread != pThreadSelf)
2635 break;
2636 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2637 if (RT_LIKELY( !pNextRec
2638 || enmNextState == rtThreadGetState(pNextThread)))
2639 break;
2640 pNextRec = NULL;
2641 }
2642 if (!pNextRec)
2643 {
2644 pRec = pRec->Excl.pSibling;
2645 if ( pRec
2646 && pRec != pFirstSibling)
2647 continue;
2648 pNextThread = NIL_RTTHREAD;
2649 }
2650 break;
2651
2652 case RTLOCKVALRECSHRD_MAGIC:
2653 if (!pRec->Shared.fSignaller)
2654 {
2655 /* Skip to the next sibling if same side. ASSUMES reader priority. */
2656 /** @todo The read side of a read-write lock is problematic if
2657 * the implementation prioritizes writers over readers because
2658 * that means we should could deadlock against current readers
2659 * if a writer showed up. If the RW sem implementation is
2660 * wrapping some native API, it's not so easy to detect when we
2661 * should do this and when we shouldn't. Checking when we
2662 * shouldn't is subject to wakeup scheduling and cannot easily
2663 * be made reliable.
2664 *
2665 * At the moment we circumvent all this mess by declaring that
2666 * readers has priority. This is TRUE on linux, but probably
2667 * isn't on Solaris and FreeBSD. */
2668 if ( pRec == pFirstSibling
2669 && pRec->Shared.pSibling != NULL
2670 && pRec->Shared.pSibling != pFirstSibling)
2671 {
2672 pRec = pRec->Shared.pSibling;
2673 Assert(iEntry == UINT32_MAX);
2674 continue;
2675 }
2676 }
2677
2678 /* Scan the owner table for blocked owners. */
2679 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
2680 && ( !pRec->Shared.fSignaller
2681 || iEntry != UINT32_MAX
2682 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
2683 )
2684 )
2685 {
2686 uint32_t cAllocated = pRec->Shared.cAllocated;
2687 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
2688 while (++iEntry < cAllocated)
2689 {
2690 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
2691 if (pEntry)
2692 {
2693 for (;;)
2694 {
2695 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
2696 break;
2697 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
2698 if ( !pNextThread
2699 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
2700 break;
2701 enmNextState = rtThreadGetState(pNextThread);
2702 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
2703 && pNextThread != pThreadSelf)
2704 break;
2705 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
2706 if (RT_LIKELY( !pNextRec
2707 || enmNextState == rtThreadGetState(pNextThread)))
2708 break;
2709 pNextRec = NULL;
2710 }
2711 if (pNextRec)
2712 break;
2713 }
2714 else
2715 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
2716 }
2717 if (pNextRec)
2718 break;
2719 pNextThread = NIL_RTTHREAD;
2720 }
2721
2722 /* Advance to the next sibling, if any. */
2723 pRec = pRec->Shared.pSibling;
2724 if ( pRec != NULL
2725 && pRec != pFirstSibling)
2726 {
2727 iEntry = UINT32_MAX;
2728 continue;
2729 }
2730 break;
2731
2732 case RTLOCKVALRECEXCL_MAGIC_DEAD:
2733 case RTLOCKVALRECSHRD_MAGIC_DEAD:
2734 break;
2735
2736 case RTLOCKVALRECSHRDOWN_MAGIC:
2737 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
2738 default:
2739 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core.u32Magic));
2740 break;
2741 }
2742
2743 if (pNextRec)
2744 {
2745 /*
2746 * Recurse and check for deadlock.
2747 */
2748 uint32_t i = pStack->c;
2749 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
2750 return rtLockValidatorDdHandleStackOverflow(pStack);
2751
2752 pStack->c++;
2753 pStack->a[i].pRec = pRec;
2754 pStack->a[i].iEntry = iEntry;
2755 pStack->a[i].enmState = enmState;
2756 pStack->a[i].pThread = pThread;
2757 pStack->a[i].pFirstSibling = pFirstSibling;
2758
2759 if (RT_UNLIKELY( pNextThread == pThreadSelf
2760 && ( i != 0
2761 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
2762 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
2763 )
2764 )
2765 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
2766
2767 pRec = pNextRec;
2768 pFirstSibling = pNextRec;
2769 iEntry = UINT32_MAX;
2770 enmState = enmNextState;
2771 pThread = pNextThread;
2772 }
2773 else
2774 {
2775 /*
2776 * No deadlock here, unwind the stack and deal with any unfinished
2777 * business there.
2778 */
2779 uint32_t i = pStack->c;
2780 for (;;)
2781 {
2782 /* pop */
2783 if (i == 0)
2784 return VINF_SUCCESS;
2785 i--;
2786 pRec = pStack->a[i].pRec;
2787 iEntry = pStack->a[i].iEntry;
2788
2789 /* Examine it. */
2790 uint32_t u32Magic = pRec->Core.u32Magic;
2791 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
2792 pRec = pRec->Excl.pSibling;
2793 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
2794 {
2795 if (iEntry + 1 < pRec->Shared.cAllocated)
2796 break; /* continue processing this record. */
2797 pRec = pRec->Shared.pSibling;
2798 }
2799 else
2800 {
2801 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
2802 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
2803 continue;
2804 }
2805
2806 /* Any next record to advance to? */
2807 if ( !pRec
2808 || pRec == pStack->a[i].pFirstSibling)
2809 continue;
2810 iEntry = UINT32_MAX;
2811 break;
2812 }
2813
2814 /* Restore the rest of the state and update the stack. */
2815 pFirstSibling = pStack->a[i].pFirstSibling;
2816 enmState = pStack->a[i].enmState;
2817 pThread = pStack->a[i].pThread;
2818 pStack->c = i;
2819 }
2820
2821 Assert(iLoop != 1000000);
2822 }
2823}
2824
2825
2826/**
2827 * Check for the simple no-deadlock case.
2828 *
2829 * @returns true if no deadlock, false if further investigation is required.
2830 *
2831 * @param pOriginalRec The original record.
2832 */
2833DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
2834{
2835 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
2836 && !pOriginalRec->Excl.pSibling)
2837 {
2838 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
2839 if ( !pThread
2840 || pThread->u32Magic != RTTHREADINT_MAGIC)
2841 return true;
2842 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2843 if (!RTTHREAD_IS_SLEEPING(enmState))
2844 return true;
2845 }
2846 return false;
2847}
2848
2849
2850/**
2851 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
2852 *
2853 * @param pStack The chain of locks causing the deadlock.
2854 * @param pRec The record relating to the current thread's lock
2855 * operation.
2856 * @param pThreadSelf This thread.
2857 * @param pSrcPos Where we are going to deadlock.
2858 * @param rc The return code.
2859 */
2860static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
2861 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
2862{
2863 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
2864 {
2865 const char *pszWhat;
2866 switch (rc)
2867 {
2868 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
2869 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
2870 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
2871 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
2872 }
2873 rtLockValComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL, true);
2874 rtLockValComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
2875 for (uint32_t i = 0; i < pStack->c; i++)
2876 {
2877 char szPrefix[24];
2878 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
2879 PRTLOCKVALRECUNION pShrdOwner = NULL;
2880 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
2881 pShrdOwner = (PRTLOCKVALRECUNION)pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
2882 if (RT_VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
2883 {
2884 rtLockValComplainAboutLock(szPrefix, pShrdOwner, "\n");
2885 rtLockValComplainAboutLockStack(pShrdOwner->ShrdOwner.hThread, 5, 2, pShrdOwner);
2886 }
2887 else
2888 {
2889 rtLockValComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
2890 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
2891 rtLockValComplainAboutLockStack(pStack->a[i].pRec->Excl.hThread, 5, 2, pStack->a[i].pRec);
2892 }
2893 }
2894 rtLockValComplainMore("---- end of deadlock chain ----\n");
2895 }
2896
2897 rtLockValComplainPanic();
2898}
2899
2900
2901/**
2902 * Perform deadlock detection.
2903 *
2904 * @retval VINF_SUCCESS
2905 * @retval VERR_SEM_LV_DEADLOCK
2906 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
2907 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
2908 *
2909 * @param pRec The record relating to the current thread's lock
2910 * operation.
2911 * @param pThreadSelf The current thread.
2912 * @param pSrcPos The position of the current lock operation.
2913 */
2914static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
2915{
2916 RTLOCKVALDDSTACK Stack;
2917 rtLockValidatorSerializeDetectionEnter();
2918 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2919 rtLockValidatorSerializeDetectionLeave();
2920 if (RT_SUCCESS(rc))
2921 return VINF_SUCCESS;
2922
2923 if (rc == VERR_TRY_AGAIN)
2924 {
2925 for (uint32_t iLoop = 0; ; iLoop++)
2926 {
2927 rtLockValidatorSerializeDetectionEnter();
2928 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
2929 rtLockValidatorSerializeDetectionLeave();
2930 if (RT_SUCCESS_NP(rc))
2931 return VINF_SUCCESS;
2932 if (rc != VERR_TRY_AGAIN)
2933 break;
2934 RTThreadYield();
2935 if (iLoop >= 3)
2936 return VINF_SUCCESS;
2937 }
2938 }
2939
2940 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
2941 return rc;
2942}
2943
2944
2945RTDECL(void) RTLockValidatorRecExclInitV(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2946 void *hLock, bool fEnabled, const char *pszNameFmt, va_list va)
2947{
2948 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
2949 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
2950 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
2951 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
2952 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
2953
2954 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
2955 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
2956 pRec->afReserved[0] = 0;
2957 pRec->afReserved[1] = 0;
2958 pRec->afReserved[2] = 0;
2959 rtLockValidatorSrcPosInit(&pRec->SrcPos);
2960 pRec->hThread = NIL_RTTHREAD;
2961 pRec->pDown = NULL;
2962 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
2963 pRec->uSubClass = uSubClass;
2964 pRec->cRecursion = 0;
2965 pRec->hLock = hLock;
2966 pRec->pSibling = NULL;
2967 if (pszNameFmt)
2968 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
2969 else
2970 {
2971 static uint32_t volatile s_cAnonymous = 0;
2972 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
2973 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-excl-%u", i);
2974 }
2975
2976 /* Lazy initialization. */
2977 if (RT_UNLIKELY(g_hLockValidatorXRoads == NIL_RTSEMXROADS))
2978 rtLockValidatorLazyInit();
2979}
2980
2981
2982RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
2983 void *hLock, bool fEnabled, const char *pszNameFmt, ...)
2984{
2985 va_list va;
2986 va_start(va, pszNameFmt);
2987 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, hLock, fEnabled, pszNameFmt, va);
2988 va_end(va);
2989}
2990
2991
2992RTDECL(int) RTLockValidatorRecExclCreateV(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
2993 uint32_t uSubClass, void *pvLock, bool fEnabled,
2994 const char *pszNameFmt, va_list va)
2995{
2996 PRTLOCKVALRECEXCL pRec;
2997 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
2998 if (!pRec)
2999 return VERR_NO_MEMORY;
3000 RTLockValidatorRecExclInitV(pRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
3001 return VINF_SUCCESS;
3002}
3003
3004
3005RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALCLASS hClass,
3006 uint32_t uSubClass, void *pvLock, bool fEnabled,
3007 const char *pszNameFmt, ...)
3008{
3009 va_list va;
3010 va_start(va, pszNameFmt);
3011 int rc = RTLockValidatorRecExclCreateV(ppRec, hClass, uSubClass, pvLock, fEnabled, pszNameFmt, va);
3012 va_end(va);
3013 return rc;
3014}
3015
3016
3017RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
3018{
3019 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3020
3021 rtLockValidatorSerializeDestructEnter();
3022
3023 /** @todo Check that it's not on our stack first. Need to make it
3024 * configurable whether deleting a owned lock is acceptable? */
3025
3026 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
3027 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
3028 RTLOCKVALCLASS hClass;
3029 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3030 if (pRec->pSibling)
3031 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3032 rtLockValidatorSerializeDestructLeave();
3033 if (hClass != NIL_RTLOCKVALCLASS)
3034 RTLockValidatorClassRelease(hClass);
3035}
3036
3037
3038RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
3039{
3040 PRTLOCKVALRECEXCL pRec = *ppRec;
3041 *ppRec = NULL;
3042 if (pRec)
3043 {
3044 RTLockValidatorRecExclDelete(pRec);
3045 RTMemFree(pRec);
3046 }
3047}
3048
3049
3050RTDECL(uint32_t) RTLockValidatorRecExclSetSubClass(PRTLOCKVALRECEXCL pRec, uint32_t uSubClass)
3051{
3052 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3053 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3054 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3055 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3056 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3057 RTLOCKVAL_SUB_CLASS_INVALID);
3058 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3059}
3060
3061
3062RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3063 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
3064{
3065 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3066 if (!pRecU)
3067 return;
3068 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3069 if (!pRecU->Excl.fEnabled)
3070 return;
3071 if (hThreadSelf == NIL_RTTHREAD)
3072 {
3073 hThreadSelf = RTThreadSelfAutoAdopt();
3074 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
3075 }
3076 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
3077 Assert(hThreadSelf == RTThreadSelf());
3078
3079 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
3080
3081 if (pRecU->Excl.hThread == hThreadSelf)
3082 {
3083 Assert(!fFirstRecursion); RT_NOREF_PV(fFirstRecursion);
3084 pRecU->Excl.cRecursion++;
3085 rtLockValidatorStackPushRecursion(hThreadSelf, pRecU, pSrcPos);
3086 }
3087 else
3088 {
3089 Assert(pRecU->Excl.hThread == NIL_RTTHREAD);
3090
3091 rtLockValidatorSrcPosCopy(&pRecU->Excl.SrcPos, pSrcPos);
3092 ASMAtomicUoWriteU32(&pRecU->Excl.cRecursion, 1);
3093 ASMAtomicWriteHandle(&pRecU->Excl.hThread, hThreadSelf);
3094
3095 rtLockValidatorStackPush(hThreadSelf, pRecU);
3096 }
3097}
3098
3099
3100/**
3101 * Internal worker for RTLockValidatorRecExclReleaseOwner and
3102 * RTLockValidatorRecExclReleaseOwnerUnchecked.
3103 */
3104static void rtLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECUNION pRec, bool fFinalRecursion)
3105{
3106 RTTHREADINT *pThread = pRec->Excl.hThread;
3107 AssertReturnVoid(pThread != NIL_RTTHREAD);
3108 Assert(pThread == RTThreadSelf());
3109
3110 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
3111 uint32_t c = ASMAtomicDecU32(&pRec->Excl.cRecursion);
3112 if (c == 0)
3113 {
3114 rtLockValidatorStackPop(pThread, pRec);
3115 ASMAtomicWriteHandle(&pRec->Excl.hThread, NIL_RTTHREAD);
3116 }
3117 else
3118 {
3119 Assert(c < UINT32_C(0xffff0000));
3120 Assert(!fFinalRecursion); RT_NOREF_PV(fFinalRecursion);
3121 rtLockValidatorStackPopRecursion(pThread, pRec);
3122 }
3123}
3124
3125RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
3126{
3127 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3128 if (!pRecU)
3129 return VINF_SUCCESS;
3130 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3131 if (!pRecU->Excl.fEnabled)
3132 return VINF_SUCCESS;
3133
3134 /*
3135 * Check the release order.
3136 */
3137 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3138 && pRecU->Excl.hClass->fStrictReleaseOrder
3139 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3140 )
3141 {
3142 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3143 if (RT_FAILURE(rc))
3144 return rc;
3145 }
3146
3147 /*
3148 * Join paths with RTLockValidatorRecExclReleaseOwnerUnchecked.
3149 */
3150 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, fFinalRecursion);
3151 return VINF_SUCCESS;
3152}
3153
3154
3155RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
3156{
3157 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3158 AssertReturnVoid(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
3159 if (pRecU->Excl.fEnabled)
3160 rtLockValidatorRecExclReleaseOwnerUnchecked(pRecU, false);
3161}
3162
3163
3164RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
3165{
3166 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3167 if (!pRecU)
3168 return VINF_SUCCESS;
3169 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3170 if (!pRecU->Excl.fEnabled)
3171 return VINF_SUCCESS;
3172 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3173 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3174
3175 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3176 && !pRecU->Excl.hClass->fRecursionOk)
3177 {
3178 rtLockValComplainFirst("Recursion not allowed by the class!",
3179 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3180 rtLockValComplainPanic();
3181 return VERR_SEM_LV_NESTED;
3182 }
3183
3184 Assert(pRecU->Excl.cRecursion < _1M);
3185 pRecU->Excl.cRecursion++;
3186 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3187 return VINF_SUCCESS;
3188}
3189
3190
3191RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
3192{
3193 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3194 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3195 if (!pRecU->Excl.fEnabled)
3196 return VINF_SUCCESS;
3197 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3198 Assert(pRecU->Excl.hThread == RTThreadSelf());
3199 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3200
3201 /*
3202 * Check the release order.
3203 */
3204 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3205 && pRecU->Excl.hClass->fStrictReleaseOrder
3206 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3207 )
3208 {
3209 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3210 if (RT_FAILURE(rc))
3211 return rc;
3212 }
3213
3214 /*
3215 * Perform the unwind.
3216 */
3217 pRecU->Excl.cRecursion--;
3218 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3219 return VINF_SUCCESS;
3220}
3221
3222
3223RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
3224{
3225 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3226 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3227 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3228 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3229 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3230 , VERR_SEM_LV_INVALID_PARAMETER);
3231 if (!pRecU->Excl.fEnabled)
3232 return VINF_SUCCESS;
3233 Assert(pRecU->Excl.hThread == RTThreadSelf());
3234 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3235 AssertReturn(pRecU->Excl.cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
3236
3237 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3238 && !pRecU->Excl.hClass->fRecursionOk)
3239 {
3240 rtLockValComplainFirst("Mixed recursion not allowed by the class!",
3241 pSrcPos, pRecU->Excl.hThread, (PRTLOCKVALRECUNION)pRec, true);
3242 rtLockValComplainPanic();
3243 return VERR_SEM_LV_NESTED;
3244 }
3245
3246 Assert(pRecU->Excl.cRecursion < _1M);
3247 pRecU->Excl.cRecursion++;
3248 rtLockValidatorStackPushRecursion(pRecU->Excl.hThread, pRecU, pSrcPos);
3249
3250 return VINF_SUCCESS;
3251}
3252
3253
3254RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
3255{
3256 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3257 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3258 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
3259 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3260 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
3261 , VERR_SEM_LV_INVALID_PARAMETER);
3262 if (!pRecU->Excl.fEnabled)
3263 return VINF_SUCCESS;
3264 Assert(pRecU->Excl.hThread == RTThreadSelf());
3265 AssertReturn(pRecU->Excl.hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
3266 AssertReturn(pRecU->Excl.cRecursion > 1, VERR_SEM_LV_INVALID_PARAMETER);
3267
3268 /*
3269 * Check the release order.
3270 */
3271 if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3272 && pRecU->Excl.hClass->fStrictReleaseOrder
3273 && pRecU->Excl.hClass->cMsMinOrder != RT_INDEFINITE_WAIT
3274 )
3275 {
3276 int rc = rtLockValidatorStackCheckReleaseOrder(pRecU->Excl.hThread, pRecU);
3277 if (RT_FAILURE(rc))
3278 return rc;
3279 }
3280
3281 /*
3282 * Perform the unwind.
3283 */
3284 pRecU->Excl.cRecursion--;
3285 rtLockValidatorStackPopRecursion(pRecU->Excl.hThread, pRecU);
3286 return VINF_SUCCESS;
3287}
3288
3289
3290RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3291 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3292{
3293 /*
3294 * Validate and adjust input. Quit early if order validation is disabled.
3295 */
3296 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3297 if (!pRecU)
3298 return VINF_SUCCESS;
3299 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3300 if ( !pRecU->Excl.fEnabled
3301 || pRecU->Excl.hClass == NIL_RTLOCKVALCLASS
3302 || pRecU->Excl.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3303 || pRecU->Excl.hClass->cMsMinOrder > cMillies)
3304 return VINF_SUCCESS;
3305
3306 if (hThreadSelf == NIL_RTTHREAD)
3307 {
3308 hThreadSelf = RTThreadSelfAutoAdopt();
3309 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3310 }
3311 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3312 Assert(hThreadSelf == RTThreadSelf());
3313
3314 /*
3315 * Detect recursion as it isn't subject to order restrictions.
3316 */
3317 if (pRec->hThread == hThreadSelf)
3318 return VINF_SUCCESS;
3319
3320 return rtLockValidatorStackCheckLockingOrder(pRecU->Excl.hClass, pRecU->Excl.uSubClass, hThreadSelf, pRecU, pSrcPos);
3321}
3322
3323
3324RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3325 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3326 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3327{
3328 /*
3329 * Fend off wild life.
3330 */
3331 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3332 if (!pRecU)
3333 return VINF_SUCCESS;
3334 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3335 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3336 if (!pRec->fEnabled)
3337 return VINF_SUCCESS;
3338
3339 PRTTHREADINT pThreadSelf = hThreadSelf;
3340 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3341 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3342 Assert(pThreadSelf == RTThreadSelf());
3343
3344 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3345
3346 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3347 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3348 {
3349 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3350 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3351 , VERR_SEM_LV_INVALID_PARAMETER);
3352 enmSleepState = enmThreadState;
3353 }
3354
3355 /*
3356 * Record the location.
3357 */
3358 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3359 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3360 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3361 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3362 rtThreadSetState(pThreadSelf, enmSleepState);
3363
3364 /*
3365 * Don't do deadlock detection if we're recursing.
3366 *
3367 * On some hosts we don't do recursion accounting our selves and there
3368 * isn't any other place to check for this.
3369 */
3370 int rc = VINF_SUCCESS;
3371 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
3372 {
3373 if ( !fRecursiveOk
3374 || ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3375 && !pRecU->Excl.hClass->fRecursionOk))
3376 {
3377 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3378 rtLockValComplainPanic();
3379 rc = VERR_SEM_LV_NESTED;
3380 }
3381 }
3382 /*
3383 * Perform deadlock detection.
3384 */
3385 else if ( pRecU->Excl.hClass != NIL_RTLOCKVALCLASS
3386 && ( pRecU->Excl.hClass->cMsMinDeadlock > cMillies
3387 || pRecU->Excl.hClass->cMsMinDeadlock > RT_INDEFINITE_WAIT))
3388 rc = VINF_SUCCESS;
3389 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3390 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3391
3392 if (RT_SUCCESS(rc))
3393 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3394 else
3395 {
3396 rtThreadSetState(pThreadSelf, enmThreadState);
3397 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3398 }
3399 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3400 return rc;
3401}
3402RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
3403
3404
3405RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
3406 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3407 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3408{
3409 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3410 if (RT_SUCCESS(rc))
3411 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3412 enmSleepState, fReallySleeping);
3413 return rc;
3414}
3415RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
3416
3417
3418RTDECL(void) RTLockValidatorRecSharedInitV(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3419 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, va_list va)
3420{
3421 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
3422 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
3423 Assert( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3424 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3425 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY);
3426
3427 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
3428 pRec->uSubClass = uSubClass;
3429 pRec->hClass = rtLockValidatorClassValidateAndRetain(hClass);
3430 pRec->hLock = hLock;
3431 pRec->fEnabled = fEnabled && RTLockValidatorIsEnabled();
3432 pRec->fSignaller = fSignaller;
3433 pRec->pSibling = NULL;
3434
3435 /* the table */
3436 pRec->cEntries = 0;
3437 pRec->iLastEntry = 0;
3438 pRec->cAllocated = 0;
3439 pRec->fReallocating = false;
3440 pRec->fPadding = false;
3441 pRec->papOwners = NULL;
3442
3443 /* the name */
3444 if (pszNameFmt)
3445 RTStrPrintfV(pRec->szName, sizeof(pRec->szName), pszNameFmt, va);
3446 else
3447 {
3448 static uint32_t volatile s_cAnonymous = 0;
3449 uint32_t i = ASMAtomicIncU32(&s_cAnonymous) - 1;
3450 RTStrPrintf(pRec->szName, sizeof(pRec->szName), "anon-shrd-%u", i);
3451 }
3452}
3453
3454
3455RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
3456 void *hLock, bool fSignaller, bool fEnabled, const char *pszNameFmt, ...)
3457{
3458 va_list va;
3459 va_start(va, pszNameFmt);
3460 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, hLock, fSignaller, fEnabled, pszNameFmt, va);
3461 va_end(va);
3462}
3463
3464
3465RTDECL(int) RTLockValidatorRecSharedCreateV(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3466 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3467 const char *pszNameFmt, va_list va)
3468{
3469 PRTLOCKVALRECSHRD pRec;
3470 *ppRec = pRec = (PRTLOCKVALRECSHRD)RTMemAlloc(sizeof(*pRec));
3471 if (!pRec)
3472 return VERR_NO_MEMORY;
3473 RTLockValidatorRecSharedInitV(pRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3474 return VINF_SUCCESS;
3475}
3476
3477
3478RTDECL(int) RTLockValidatorRecSharedCreate(PRTLOCKVALRECSHRD *ppRec, RTLOCKVALCLASS hClass,
3479 uint32_t uSubClass, void *pvLock, bool fSignaller, bool fEnabled,
3480 const char *pszNameFmt, ...)
3481{
3482 va_list va;
3483 va_start(va, pszNameFmt);
3484 int rc = RTLockValidatorRecSharedCreateV(ppRec, hClass, uSubClass, pvLock, fSignaller, fEnabled, pszNameFmt, va);
3485 va_end(va);
3486 return rc;
3487}
3488
3489
3490RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
3491{
3492 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3493
3494 /** @todo Check that it's not on our stack first. Need to make it
3495 * configurable whether deleting a owned lock is acceptable? */
3496
3497 /*
3498 * Flip it into table realloc mode and take the destruction lock.
3499 */
3500 rtLockValidatorSerializeDestructEnter();
3501 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
3502 {
3503 rtLockValidatorSerializeDestructLeave();
3504
3505 rtLockValidatorSerializeDetectionEnter();
3506 rtLockValidatorSerializeDetectionLeave();
3507
3508 rtLockValidatorSerializeDestructEnter();
3509 }
3510
3511 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
3512 RTLOCKVALCLASS hClass;
3513 ASMAtomicXchgHandle(&pRec->hClass, NIL_RTLOCKVALCLASS, &hClass);
3514 if (pRec->papOwners)
3515 {
3516 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
3517 ASMAtomicUoWriteNullPtr(&pRec->papOwners);
3518 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
3519
3520 RTMemFree((void *)papOwners);
3521 }
3522 if (pRec->pSibling)
3523 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
3524 ASMAtomicWriteBool(&pRec->fReallocating, false);
3525
3526 rtLockValidatorSerializeDestructLeave();
3527
3528 if (hClass != NIL_RTLOCKVALCLASS)
3529 RTLockValidatorClassRelease(hClass);
3530}
3531
3532
3533RTDECL(void) RTLockValidatorRecSharedDestroy(PRTLOCKVALRECSHRD *ppRec)
3534{
3535 PRTLOCKVALRECSHRD pRec = *ppRec;
3536 *ppRec = NULL;
3537 if (pRec)
3538 {
3539 RTLockValidatorRecSharedDelete(pRec);
3540 RTMemFree(pRec);
3541 }
3542}
3543
3544
3545RTDECL(uint32_t) RTLockValidatorRecSharedSetSubClass(PRTLOCKVALRECSHRD pRec, uint32_t uSubClass)
3546{
3547 AssertPtrReturn(pRec, RTLOCKVAL_SUB_CLASS_INVALID);
3548 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
3549 AssertReturn( uSubClass >= RTLOCKVAL_SUB_CLASS_USER
3550 || uSubClass == RTLOCKVAL_SUB_CLASS_NONE
3551 || uSubClass == RTLOCKVAL_SUB_CLASS_ANY,
3552 RTLOCKVAL_SUB_CLASS_INVALID);
3553 return ASMAtomicXchgU32(&pRec->uSubClass, uSubClass);
3554}
3555
3556
3557/**
3558 * Locates an owner (thread) in a shared lock record.
3559 *
3560 * @returns Pointer to the owner entry on success, NULL on failure..
3561 * @param pShared The shared lock record.
3562 * @param hThread The thread (owner) to find.
3563 * @param piEntry Where to optionally return the table in index.
3564 * Optional.
3565 */
3566DECLINLINE(PRTLOCKVALRECUNION)
3567rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
3568{
3569 rtLockValidatorSerializeDetectionEnter();
3570
3571 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3572 if (papOwners)
3573 {
3574 uint32_t const cMax = pShared->cAllocated;
3575 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3576 {
3577 PRTLOCKVALRECUNION pEntry = (PRTLOCKVALRECUNION)rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
3578 if (pEntry && pEntry->ShrdOwner.hThread == hThread)
3579 {
3580 rtLockValidatorSerializeDetectionLeave();
3581 if (piEntry)
3582 *piEntry = iEntry;
3583 return pEntry;
3584 }
3585 }
3586 }
3587
3588 rtLockValidatorSerializeDetectionLeave();
3589 return NULL;
3590}
3591
3592
3593RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3594 PCRTLOCKVALSRCPOS pSrcPos, RTMSINTERVAL cMillies)
3595{
3596 /*
3597 * Validate and adjust input. Quit early if order validation is disabled.
3598 */
3599 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3600 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3601 if ( !pRecU->Shared.fEnabled
3602 || pRecU->Shared.hClass == NIL_RTLOCKVALCLASS
3603 || pRecU->Shared.hClass->cMsMinOrder == RT_INDEFINITE_WAIT
3604 || pRecU->Shared.hClass->cMsMinOrder > cMillies
3605 )
3606 return VINF_SUCCESS;
3607
3608 if (hThreadSelf == NIL_RTTHREAD)
3609 {
3610 hThreadSelf = RTThreadSelfAutoAdopt();
3611 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
3612 }
3613 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3614 Assert(hThreadSelf == RTThreadSelf());
3615
3616 /*
3617 * Detect recursion as it isn't subject to order restrictions.
3618 */
3619 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, hThreadSelf, NULL);
3620 if (pEntry)
3621 return VINF_SUCCESS;
3622
3623 return rtLockValidatorStackCheckLockingOrder(pRecU->Shared.hClass, pRecU->Shared.uSubClass, hThreadSelf, pRecU, pSrcPos);
3624}
3625
3626
3627RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3628 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3629 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3630{
3631 /*
3632 * Fend off wild life.
3633 */
3634 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
3635 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
3636 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3637 if (!pRecU->Shared.fEnabled)
3638 return VINF_SUCCESS;
3639
3640 PRTTHREADINT pThreadSelf = hThreadSelf;
3641 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
3642 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
3643 Assert(pThreadSelf == RTThreadSelf());
3644
3645 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
3646
3647 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
3648 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
3649 {
3650 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
3651 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
3652 , VERR_SEM_LV_INVALID_PARAMETER);
3653 enmSleepState = enmThreadState;
3654 }
3655
3656 /*
3657 * Record the location.
3658 */
3659 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
3660 rtLockValidatorSrcPosCopy(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
3661 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
3662 pThreadSelf->LockValidator.enmRecState = enmSleepState;
3663 rtThreadSetState(pThreadSelf, enmSleepState);
3664
3665 /*
3666 * Don't do deadlock detection if we're recursing.
3667 */
3668 int rc = VINF_SUCCESS;
3669 PRTLOCKVALRECUNION pEntry = !pRecU->Shared.fSignaller
3670 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
3671 : NULL;
3672 if (pEntry)
3673 {
3674 if ( !fRecursiveOk
3675 || ( pRec->hClass
3676 && !pRec->hClass->fRecursionOk)
3677 )
3678 {
3679 rtLockValComplainFirst("Recursion not allowed!", pSrcPos, pThreadSelf, pRecU, true);
3680 rtLockValComplainPanic();
3681 rc = VERR_SEM_LV_NESTED;
3682 }
3683 }
3684 /*
3685 * Perform deadlock detection.
3686 */
3687 else if ( pRec->hClass
3688 && ( pRec->hClass->cMsMinDeadlock == RT_INDEFINITE_WAIT
3689 || pRec->hClass->cMsMinDeadlock > cMillies))
3690 rc = VINF_SUCCESS;
3691 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
3692 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
3693
3694 if (RT_SUCCESS(rc))
3695 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
3696 else
3697 {
3698 rtThreadSetState(pThreadSelf, enmThreadState);
3699 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
3700 }
3701 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
3702 return rc;
3703}
3704RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
3705
3706
3707RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
3708 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, RTMSINTERVAL cMillies,
3709 RTTHREADSTATE enmSleepState, bool fReallySleeping)
3710{
3711 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos, cMillies);
3712 if (RT_SUCCESS(rc))
3713 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, cMillies,
3714 enmSleepState, fReallySleeping);
3715 return rc;
3716}
3717RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
3718
3719
3720/**
3721 * Allocates and initializes an owner entry for the shared lock record.
3722 *
3723 * @returns The new owner entry.
3724 * @param pRec The shared lock record.
3725 * @param pThreadSelf The calling thread and owner. Used for record
3726 * initialization and allocation.
3727 * @param pSrcPos The source position.
3728 */
3729DECLINLINE(PRTLOCKVALRECUNION)
3730rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
3731{
3732 PRTLOCKVALRECUNION pEntry;
3733
3734 /*
3735 * Check if the thread has any statically allocated records we can easily
3736 * make use of.
3737 */
3738 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
3739 if ( iEntry > 0
3740 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
3741 {
3742 pEntry = (PRTLOCKVALRECUNION)&pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
3743 Assert(!pEntry->ShrdOwner.fReserved);
3744 pEntry->ShrdOwner.fStaticAlloc = true;
3745 rtThreadGet(pThreadSelf);
3746 }
3747 else
3748 {
3749 pEntry = (PRTLOCKVALRECUNION)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
3750 if (RT_UNLIKELY(!pEntry))
3751 return NULL;
3752 pEntry->ShrdOwner.fStaticAlloc = false;
3753 }
3754
3755 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
3756 pEntry->ShrdOwner.cRecursion = 1;
3757 pEntry->ShrdOwner.fReserved = true;
3758 pEntry->ShrdOwner.hThread = pThreadSelf;
3759 pEntry->ShrdOwner.pDown = NULL;
3760 pEntry->ShrdOwner.pSharedRec = pRec;
3761#if HC_ARCH_BITS == 32
3762 pEntry->ShrdOwner.pvReserved = NULL;
3763#endif
3764 if (pSrcPos)
3765 pEntry->ShrdOwner.SrcPos = *pSrcPos;
3766 else
3767 rtLockValidatorSrcPosInit(&pEntry->ShrdOwner.SrcPos);
3768 return pEntry;
3769}
3770
3771
3772/**
3773 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
3774 *
3775 * @param pEntry The owner entry.
3776 */
3777DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
3778{
3779 if (pEntry)
3780 {
3781 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
3782 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
3783
3784 PRTTHREADINT pThread;
3785 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
3786
3787 Assert(pEntry->fReserved);
3788 pEntry->fReserved = false;
3789
3790 if (pEntry->fStaticAlloc)
3791 {
3792 AssertPtrReturnVoid(pThread);
3793 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
3794
3795 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
3796 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
3797
3798 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry));
3799 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, (int32_t)iEntry);
3800
3801 rtThreadRelease(pThread);
3802 }
3803 else
3804 {
3805 rtLockValidatorSerializeDestructEnter();
3806 rtLockValidatorSerializeDestructLeave();
3807
3808 RTMemFree(pEntry);
3809 }
3810 }
3811}
3812
3813
3814/**
3815 * Make more room in the table.
3816 *
3817 * @retval true on success
3818 * @retval false if we're out of memory or running into a bad race condition
3819 * (probably a bug somewhere). No longer holding the lock.
3820 *
3821 * @param pShared The shared lock record.
3822 */
3823static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
3824{
3825 for (unsigned i = 0; i < 1000; i++)
3826 {
3827 /*
3828 * Switch to the other data access direction.
3829 */
3830 rtLockValidatorSerializeDetectionLeave();
3831 if (i >= 10)
3832 {
3833 Assert(i != 10 && i != 100);
3834 RTThreadSleep(i >= 100);
3835 }
3836 rtLockValidatorSerializeDestructEnter();
3837
3838 /*
3839 * Try grab the privilege to reallocating the table.
3840 */
3841 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
3842 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
3843 {
3844 uint32_t cAllocated = pShared->cAllocated;
3845 if (cAllocated < pShared->cEntries)
3846 {
3847 /*
3848 * Ok, still not enough space. Reallocate the table.
3849 */
3850 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
3851 PRTLOCKVALRECSHRDOWN *papOwners;
3852 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
3853 (cAllocated + cInc) * sizeof(void *));
3854 if (!papOwners)
3855 {
3856 ASMAtomicWriteBool(&pShared->fReallocating, false);
3857 rtLockValidatorSerializeDestructLeave();
3858 /* RTMemRealloc will assert */
3859 return false;
3860 }
3861
3862 while (cInc-- > 0)
3863 {
3864 papOwners[cAllocated] = NULL;
3865 cAllocated++;
3866 }
3867
3868 ASMAtomicWritePtr(&pShared->papOwners, papOwners);
3869 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
3870 }
3871 ASMAtomicWriteBool(&pShared->fReallocating, false);
3872 }
3873 rtLockValidatorSerializeDestructLeave();
3874
3875 rtLockValidatorSerializeDetectionEnter();
3876 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
3877 break;
3878
3879 if (pShared->cAllocated >= pShared->cEntries)
3880 return true;
3881 }
3882
3883 rtLockValidatorSerializeDetectionLeave();
3884 AssertFailed(); /* too many iterations or destroyed while racing. */
3885 return false;
3886}
3887
3888
3889/**
3890 * Adds an owner entry to a shared lock record.
3891 *
3892 * @returns true on success, false on serious race or we're if out of memory.
3893 * @param pShared The shared lock record.
3894 * @param pEntry The owner entry.
3895 */
3896DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
3897{
3898 rtLockValidatorSerializeDetectionEnter();
3899 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
3900 {
3901 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
3902 && !rtLockValidatorRecSharedMakeRoom(pShared))
3903 return false; /* the worker leave the lock */
3904
3905 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3906 uint32_t const cMax = pShared->cAllocated;
3907 for (unsigned i = 0; i < 100; i++)
3908 {
3909 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
3910 {
3911 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], pEntry, NULL))
3912 {
3913 rtLockValidatorSerializeDetectionLeave();
3914 return true;
3915 }
3916 }
3917 Assert(i != 25);
3918 }
3919 AssertFailed();
3920 }
3921 rtLockValidatorSerializeDetectionLeave();
3922 return false;
3923}
3924
3925
3926/**
3927 * Remove an owner entry from a shared lock record and free it.
3928 *
3929 * @param pShared The shared lock record.
3930 * @param pEntry The owner entry to remove.
3931 * @param iEntry The last known index.
3932 */
3933DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
3934 uint32_t iEntry)
3935{
3936 /*
3937 * Remove it from the table.
3938 */
3939 rtLockValidatorSerializeDetectionEnter();
3940 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3941 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
3942 || !ASMAtomicCmpXchgPtr(&pShared->papOwners[iEntry], NULL, pEntry)))
3943 {
3944 /* this shouldn't happen yet... */
3945 AssertFailed();
3946 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
3947 uint32_t const cMax = pShared->cAllocated;
3948 for (iEntry = 0; iEntry < cMax; iEntry++)
3949 if (ASMAtomicCmpXchgPtr(&papOwners[iEntry], NULL, pEntry))
3950 break;
3951 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
3952 }
3953 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
3954 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
3955 rtLockValidatorSerializeDetectionLeave();
3956
3957 /*
3958 * Successfully removed, now free it.
3959 */
3960 rtLockValidatorRecSharedFreeOwner(pEntry);
3961}
3962
3963
3964RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
3965{
3966 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
3967 if (!pRec->fEnabled)
3968 return;
3969 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
3970 AssertReturnVoid(pRec->fSignaller);
3971
3972 /*
3973 * Free all current owners.
3974 */
3975 rtLockValidatorSerializeDetectionEnter();
3976 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
3977 {
3978 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
3979 uint32_t iEntry = 0;
3980 uint32_t cEntries = pRec->cAllocated;
3981 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
3982 while (iEntry < cEntries)
3983 {
3984 PRTLOCKVALRECSHRDOWN pEntry = ASMAtomicXchgPtrT(&papEntries[iEntry], NULL, PRTLOCKVALRECSHRDOWN);
3985 if (pEntry)
3986 {
3987 ASMAtomicDecU32(&pRec->cEntries);
3988 rtLockValidatorSerializeDetectionLeave();
3989
3990 rtLockValidatorRecSharedFreeOwner(pEntry);
3991
3992 rtLockValidatorSerializeDetectionEnter();
3993 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
3994 break;
3995 cEntries = pRec->cAllocated;
3996 papEntries = pRec->papOwners;
3997 }
3998 iEntry++;
3999 }
4000 }
4001 rtLockValidatorSerializeDetectionLeave();
4002
4003 if (hThread != NIL_RTTHREAD)
4004 {
4005 /*
4006 * Allocate a new owner entry and insert it into the table.
4007 */
4008 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4009 if ( pEntry
4010 && !rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4011 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4012 }
4013}
4014RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
4015
4016
4017RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
4018{
4019 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4020 if (!pRec->fEnabled)
4021 return;
4022 if (hThread == NIL_RTTHREAD)
4023 {
4024 hThread = RTThreadSelfAutoAdopt();
4025 AssertReturnVoid(hThread != NIL_RTTHREAD);
4026 }
4027 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4028
4029 /*
4030 * Recursive?
4031 *
4032 * Note! This code can be optimized to try avoid scanning the table on
4033 * insert. However, that's annoying work that makes the code big,
4034 * so it can wait til later sometime.
4035 */
4036 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4037 if (pEntry)
4038 {
4039 Assert(!pRec->fSignaller);
4040 pEntry->ShrdOwner.cRecursion++;
4041 rtLockValidatorStackPushRecursion(hThread, pEntry, pSrcPos);
4042 return;
4043 }
4044
4045 /*
4046 * Allocate a new owner entry and insert it into the table.
4047 */
4048 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
4049 if (pEntry)
4050 {
4051 if (rtLockValidatorRecSharedAddOwner(pRec, &pEntry->ShrdOwner))
4052 {
4053 if (!pRec->fSignaller)
4054 rtLockValidatorStackPush(hThread, pEntry);
4055 }
4056 else
4057 rtLockValidatorRecSharedFreeOwner(&pEntry->ShrdOwner);
4058 }
4059}
4060RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
4061
4062
4063RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4064{
4065 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
4066 if (!pRec->fEnabled)
4067 return;
4068 if (hThread == NIL_RTTHREAD)
4069 {
4070 hThread = RTThreadSelfAutoAdopt();
4071 AssertReturnVoid(hThread != NIL_RTTHREAD);
4072 }
4073 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
4074
4075 /*
4076 * Find the entry hope it's a recursive one.
4077 */
4078 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
4079 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
4080 AssertReturnVoid(pEntry);
4081 AssertReturnVoid(pEntry->ShrdOwner.cRecursion > 0);
4082
4083 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4084 if (c == 0)
4085 {
4086 if (!pRec->fSignaller)
4087 rtLockValidatorStackPop(hThread, (PRTLOCKVALRECUNION)pEntry);
4088 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4089 }
4090 else
4091 {
4092 Assert(!pRec->fSignaller);
4093 rtLockValidatorStackPopRecursion(hThread, pEntry);
4094 }
4095}
4096RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
4097
4098
4099RTDECL(bool) RTLockValidatorRecSharedIsOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
4100{
4101 /* Validate and resolve input. */
4102 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, false);
4103 if (!pRec->fEnabled)
4104 return false;
4105 if (hThread == NIL_RTTHREAD)
4106 {
4107 hThread = RTThreadSelfAutoAdopt();
4108 AssertReturn(hThread != NIL_RTTHREAD, false);
4109 }
4110 AssertReturn(hThread->u32Magic == RTTHREADINT_MAGIC, false);
4111
4112 /* Do the job. */
4113 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
4114 return pEntry != NULL;
4115}
4116RT_EXPORT_SYMBOL(RTLockValidatorRecSharedIsOwner);
4117
4118
4119RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4120{
4121 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4122 if (!pRec->fEnabled)
4123 return VINF_SUCCESS;
4124 if (hThreadSelf == NIL_RTTHREAD)
4125 {
4126 hThreadSelf = RTThreadSelfAutoAdopt();
4127 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4128 }
4129 Assert(hThreadSelf == RTThreadSelf());
4130 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4131
4132 /*
4133 * Locate the entry for this thread in the table.
4134 */
4135 uint32_t iEntry = 0;
4136 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4137 if (RT_UNLIKELY(!pEntry))
4138 {
4139 rtLockValComplainFirst("Not owner (shared)!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4140 rtLockValComplainPanic();
4141 return VERR_SEM_LV_NOT_OWNER;
4142 }
4143
4144 /*
4145 * Check the release order.
4146 */
4147 if ( pRec->hClass != NIL_RTLOCKVALCLASS
4148 && pRec->hClass->fStrictReleaseOrder
4149 && pRec->hClass->cMsMinOrder != RT_INDEFINITE_WAIT
4150 )
4151 {
4152 int rc = rtLockValidatorStackCheckReleaseOrder(hThreadSelf, (PRTLOCKVALRECUNION)pEntry);
4153 if (RT_FAILURE(rc))
4154 return rc;
4155 }
4156
4157 /*
4158 * Release the ownership or unwind a level of recursion.
4159 */
4160 Assert(pEntry->ShrdOwner.cRecursion > 0);
4161 uint32_t c = --pEntry->ShrdOwner.cRecursion;
4162 if (c == 0)
4163 {
4164 rtLockValidatorStackPop(hThreadSelf, pEntry);
4165 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, &pEntry->ShrdOwner, iEntry);
4166 }
4167 else
4168 rtLockValidatorStackPopRecursion(hThreadSelf, pEntry);
4169
4170 return VINF_SUCCESS;
4171}
4172
4173
4174RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
4175{
4176 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4177 if (!pRec->fEnabled)
4178 return VINF_SUCCESS;
4179 if (hThreadSelf == NIL_RTTHREAD)
4180 {
4181 hThreadSelf = RTThreadSelfAutoAdopt();
4182 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INTERNAL_ERROR);
4183 }
4184 Assert(hThreadSelf == RTThreadSelf());
4185 AssertReturn(hThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
4186
4187 /*
4188 * Locate the entry for this thread in the table.
4189 */
4190 uint32_t iEntry = 0;
4191 PRTLOCKVALRECUNION pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
4192 if (RT_UNLIKELY(!pEntry))
4193 {
4194 rtLockValComplainFirst("Invalid signaller!", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec, true);
4195 rtLockValComplainPanic();
4196 return VERR_SEM_LV_NOT_SIGNALLER;
4197 }
4198 return VINF_SUCCESS;
4199}
4200
4201
4202RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
4203{
4204 if (Thread == NIL_RTTHREAD)
4205 return 0;
4206
4207 PRTTHREADINT pThread = rtThreadGet(Thread);
4208 if (!pThread)
4209 return VERR_INVALID_HANDLE;
4210 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
4211 rtThreadRelease(pThread);
4212 return cWriteLocks;
4213}
4214RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
4215
4216
4217RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
4218{
4219 PRTTHREADINT pThread = rtThreadGet(Thread);
4220 AssertReturnVoid(pThread);
4221 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
4222 rtThreadRelease(pThread);
4223}
4224RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
4225
4226
4227RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
4228{
4229 PRTTHREADINT pThread = rtThreadGet(Thread);
4230 AssertReturnVoid(pThread);
4231 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
4232 rtThreadRelease(pThread);
4233}
4234RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
4235
4236
4237RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
4238{
4239 if (Thread == NIL_RTTHREAD)
4240 return 0;
4241
4242 PRTTHREADINT pThread = rtThreadGet(Thread);
4243 if (!pThread)
4244 return VERR_INVALID_HANDLE;
4245 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
4246 rtThreadRelease(pThread);
4247 return cReadLocks;
4248}
4249RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
4250
4251
4252RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
4253{
4254 PRTTHREADINT pThread = rtThreadGet(Thread);
4255 Assert(pThread);
4256 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
4257 rtThreadRelease(pThread);
4258}
4259RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
4260
4261
4262RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
4263{
4264 PRTTHREADINT pThread = rtThreadGet(Thread);
4265 Assert(pThread);
4266 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
4267 rtThreadRelease(pThread);
4268}
4269RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
4270
4271
4272RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
4273{
4274 void *pvLock = NULL;
4275 PRTTHREADINT pThread = rtThreadGet(hThread);
4276 if (pThread)
4277 {
4278 RTTHREADSTATE enmState = rtThreadGetState(pThread);
4279 if (RTTHREAD_IS_SLEEPING(enmState))
4280 {
4281 rtLockValidatorSerializeDetectionEnter();
4282
4283 enmState = rtThreadGetState(pThread);
4284 if (RTTHREAD_IS_SLEEPING(enmState))
4285 {
4286 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
4287 if (pRec)
4288 {
4289 switch (pRec->Core.u32Magic)
4290 {
4291 case RTLOCKVALRECEXCL_MAGIC:
4292 pvLock = pRec->Excl.hLock;
4293 break;
4294
4295 case RTLOCKVALRECSHRDOWN_MAGIC:
4296 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
4297 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
4298 break;
4299 RT_FALL_THRU();
4300 case RTLOCKVALRECSHRD_MAGIC:
4301 pvLock = pRec->Shared.hLock;
4302 break;
4303 }
4304 if (RTThreadGetState(pThread) != enmState)
4305 pvLock = NULL;
4306 }
4307 }
4308
4309 rtLockValidatorSerializeDetectionLeave();
4310 }
4311 rtThreadRelease(pThread);
4312 }
4313 return pvLock;
4314}
4315RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
4316
4317
4318RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
4319{
4320 bool fRet = false;
4321 PRTTHREADINT pThread = rtThreadGet(hThread);
4322 if (pThread)
4323 {
4324 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
4325 rtThreadRelease(pThread);
4326 }
4327 return fRet;
4328}
4329RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
4330
4331
4332RTDECL(bool) RTLockValidatorHoldsLocksInClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass)
4333{
4334 bool fRet = false;
4335 if (hCurrentThread == NIL_RTTHREAD)
4336 hCurrentThread = RTThreadSelf();
4337 else
4338 Assert(hCurrentThread == RTThreadSelf());
4339 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4340 if (pThread)
4341 {
4342 if (hClass != NIL_RTLOCKVALCLASS)
4343 {
4344 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4345 while (RT_VALID_PTR(pCur) && !fRet)
4346 {
4347 switch (pCur->Core.u32Magic)
4348 {
4349 case RTLOCKVALRECEXCL_MAGIC:
4350 fRet = pCur->Excl.hClass == hClass;
4351 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4352 break;
4353 case RTLOCKVALRECSHRDOWN_MAGIC:
4354 fRet = RT_VALID_PTR(pCur->ShrdOwner.pSharedRec)
4355 && pCur->ShrdOwner.pSharedRec->hClass == hClass;
4356 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4357 break;
4358 case RTLOCKVALRECNEST_MAGIC:
4359 switch (pCur->Nest.pRec->Core.u32Magic)
4360 {
4361 case RTLOCKVALRECEXCL_MAGIC:
4362 fRet = pCur->Nest.pRec->Excl.hClass == hClass;
4363 break;
4364 case RTLOCKVALRECSHRDOWN_MAGIC:
4365 fRet = RT_VALID_PTR(pCur->ShrdOwner.pSharedRec)
4366 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass;
4367 break;
4368 }
4369 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4370 break;
4371 default:
4372 pCur = NULL;
4373 break;
4374 }
4375 }
4376 }
4377
4378 rtThreadRelease(pThread);
4379 }
4380 return fRet;
4381}
4382RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4383
4384
4385RTDECL(bool) RTLockValidatorHoldsLocksInSubClass(RTTHREAD hCurrentThread, RTLOCKVALCLASS hClass, uint32_t uSubClass)
4386{
4387 bool fRet = false;
4388 if (hCurrentThread == NIL_RTTHREAD)
4389 hCurrentThread = RTThreadSelf();
4390 else
4391 Assert(hCurrentThread == RTThreadSelf());
4392 PRTTHREADINT pThread = rtThreadGet(hCurrentThread);
4393 if (pThread)
4394 {
4395 if (hClass != NIL_RTLOCKVALCLASS)
4396 {
4397 PRTLOCKVALRECUNION pCur = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pStackTop);
4398 while (RT_VALID_PTR(pCur) && !fRet)
4399 {
4400 switch (pCur->Core.u32Magic)
4401 {
4402 case RTLOCKVALRECEXCL_MAGIC:
4403 fRet = pCur->Excl.hClass == hClass
4404 && pCur->Excl.uSubClass == uSubClass;
4405 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Excl.pDown);
4406 break;
4407 case RTLOCKVALRECSHRDOWN_MAGIC:
4408 fRet = RT_VALID_PTR(pCur->ShrdOwner.pSharedRec)
4409 && pCur->ShrdOwner.pSharedRec->hClass == hClass
4410 && pCur->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4411 pCur = rtLockValidatorReadRecUnionPtr(&pCur->ShrdOwner.pDown);
4412 break;
4413 case RTLOCKVALRECNEST_MAGIC:
4414 switch (pCur->Nest.pRec->Core.u32Magic)
4415 {
4416 case RTLOCKVALRECEXCL_MAGIC:
4417 fRet = pCur->Nest.pRec->Excl.hClass == hClass
4418 && pCur->Nest.pRec->Excl.uSubClass == uSubClass;
4419 break;
4420 case RTLOCKVALRECSHRDOWN_MAGIC:
4421 fRet = RT_VALID_PTR(pCur->ShrdOwner.pSharedRec)
4422 && pCur->Nest.pRec->ShrdOwner.pSharedRec->hClass == hClass
4423 && pCur->Nest.pRec->ShrdOwner.pSharedRec->uSubClass == uSubClass;
4424 break;
4425 }
4426 pCur = rtLockValidatorReadRecUnionPtr(&pCur->Nest.pDown);
4427 break;
4428 default:
4429 pCur = NULL;
4430 break;
4431 }
4432 }
4433 }
4434
4435 rtThreadRelease(pThread);
4436 }
4437 return fRet;
4438}
4439RT_EXPORT_SYMBOL(RTLockValidatorHoldsLocksInClass);
4440
4441
4442RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
4443{
4444 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
4445}
4446RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
4447
4448
4449RTDECL(bool) RTLockValidatorIsEnabled(void)
4450{
4451 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
4452}
4453RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
4454
4455
4456RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
4457{
4458 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
4459}
4460RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
4461
4462
4463RTDECL(bool) RTLockValidatorIsQuiet(void)
4464{
4465 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
4466}
4467RT_EXPORT_SYMBOL(RTLockValidatorIsQuiet);
4468
4469
4470RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
4471{
4472 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
4473}
4474RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
4475
4476
4477RTDECL(bool) RTLockValidatorMayPanic(void)
4478{
4479 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
4480}
4481RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
4482
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette