VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 25664

Last change on this file since 25664 was 25662, checked in by vboxsync, 15 years ago

lockvalidator.cpp: Fixed a bug in rtLockValidatorDdDoDetection where we would end up spinning forever.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 71.5 KB
Line 
1/* $Id: lockvalidator.cpp 25662 2010-01-06 02:33:18Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include <iprt/lockvalidator.h>
35#include "internal/iprt.h"
36
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/err.h>
40#include <iprt/mem.h>
41#include <iprt/once.h>
42#include <iprt/semaphore.h>
43#include <iprt/string.h>
44#include <iprt/thread.h>
45
46#include "internal/lockvalidator.h"
47#include "internal/magics.h"
48#include "internal/thread.h"
49
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * Deadlock detection stack entry.
56 */
57typedef struct RTLOCKVALDDENTRY
58{
59 /** The current record. */
60 PRTLOCKVALRECUNION pRec;
61 /** The current entry number if pRec is a shared one. */
62 uint32_t iEntry;
63 /** The thread state of the thread we followed to get to pFirstSibling.
64 * This is only used for validating a deadlock stack. */
65 RTTHREADSTATE enmState;
66 /** The thread we followed to get to pFirstSibling.
67 * This is only used for validating a deadlock stack. */
68 PRTTHREADINT pThread;
69 /** What pThread is waiting on, i.e. where we entered the circular list of
70 * siblings. This is used for validating a deadlock stack as well as
71 * terminating the sibling walk. */
72 PRTLOCKVALRECUNION pFirstSibling;
73} RTLOCKVALDDENTRY;
74
75
76/**
77 * Deadlock detection stack.
78 */
79typedef struct RTLOCKVALDDSTACK
80{
81 /** The number stack entries. */
82 uint32_t c;
83 /** The stack entries. */
84 RTLOCKVALDDENTRY a[32];
85} RTLOCKVALDDSTACK;
86/** Pointer to a deadlock detction stack. */
87typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
88
89
90/*******************************************************************************
91* Defined Constants And Macros *
92*******************************************************************************/
93/** Macro that asserts that a pointer is aligned correctly.
94 * Only used when fighting bugs. */
95#if 1
96# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
97 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
98#else
99# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
100#endif
101
102
103/*******************************************************************************
104* Global Variables *
105*******************************************************************************/
106/** Serializing object destruction and deadlock detection.
107 *
108 * This makes sure that none of the memory examined by the deadlock detection
109 * code will become invalid (reused for other purposes or made not present)
110 * while the detection is in progress.
111 *
112 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
113 * EW: Deadlock detection and some related activities.
114 */
115static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
116/** Whether the lock validator is enabled or disabled.
117 * Only applies to new locks. */
118static bool volatile g_fLockValidatorEnabled = true;
119/** Set if the lock validator is quiet. */
120#ifdef RT_STRICT
121static bool volatile g_fLockValidatorQuiet = false;
122#else
123static bool volatile g_fLockValidatorQuiet = true;
124#endif
125/** Set if the lock validator may panic. */
126#ifdef RT_STRICT
127static bool volatile g_fLockValidatorMayPanic = true;
128#else
129static bool volatile g_fLockValidatorMayPanic = false;
130#endif
131
132
133/** Wrapper around ASMAtomicReadPtr. */
134DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
135{
136 PRTLOCKVALRECUNION p = (PRTLOCKVALRECUNION)ASMAtomicReadPtr((void * volatile *)ppRec);
137 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
138 return p;
139}
140
141
142/** Wrapper around ASMAtomicWritePtr. */
143DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
144{
145 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
146 ASMAtomicWritePtr((void * volatile *)ppRec, pRecNew);
147}
148
149
150/** Wrapper around ASMAtomicReadPtr. */
151DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
152{
153 PRTTHREADINT p = (PRTTHREADINT)ASMAtomicReadPtr((void * volatile *)phThread);
154 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
155 return p;
156}
157
158
159/** Wrapper around ASMAtomicUoReadPtr. */
160DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
161{
162 PRTLOCKVALRECSHRDOWN p = (PRTLOCKVALRECSHRDOWN)ASMAtomicUoReadPtr((void * volatile *)ppOwner);
163 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
164 return p;
165}
166
167
168/**
169 * Reads a volatile thread handle field and returns the thread name.
170 *
171 * @returns Thread name (read only).
172 * @param phThread The thread handle field.
173 */
174static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
175{
176 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
177 if (!pThread)
178 return "<NIL>";
179 if (!VALID_PTR(pThread))
180 return "<INVALID>";
181 if (pThread->u32Magic != RTTHREADINT_MAGIC)
182 return "<BAD-THREAD-MAGIC>";
183 return pThread->szName;
184}
185
186
187/**
188 * Launch a simple assertion like complaint w/ panic.
189 *
190 * @param pszFile Where from - file.
191 * @param iLine Where from - line.
192 * @param pszFunction Where from - function.
193 * @param pszWhat What we're complaining about.
194 * @param ... Format arguments.
195 */
196static void rtLockValidatorComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
197{
198 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
199 {
200 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
201 va_list va;
202 va_start(va, pszWhat);
203 RTAssertMsg2WeakV(pszWhat, va);
204 va_end(va);
205 }
206 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
207 RTAssertPanic();
208}
209
210
211/**
212 * Describes the lock.
213 *
214 * @param pszPrefix Message prefix.
215 * @param pRec The lock record we're working on.
216 * @param pszSuffix Message suffix.
217 */
218static void rtLockValidatorComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
219{
220 if ( VALID_PTR(pRec)
221 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
222 {
223 switch (pRec->Core.u32Magic)
224 {
225 case RTLOCKVALRECEXCL_MAGIC:
226 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s nest=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
227 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
228 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), pRec->Excl.cRecursion,
229 pRec->Excl.SrcPos.pszFile, pRec->Excl.SrcPos.uLine, pRec->Excl.SrcPos.pszFunction, pRec->Excl.SrcPos.uId,
230 pszSuffix);
231 break;
232
233 case RTLOCKVALRECSHRD_MAGIC:
234 RTAssertMsg2AddWeak("%s%p %s srec=%p%s", pszPrefix,
235 pRec->Shared.hLock, pRec->Shared.pszName, pRec,
236 pszSuffix);
237 break;
238
239 case RTLOCKVALRECSHRDOWN_MAGIC:
240 {
241 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
242 if ( VALID_PTR(pShared)
243 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
244 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p thr=%s nest=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
245 pShared->hLock, pShared->pszName, pShared,
246 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), pRec->ShrdOwner.cRecursion,
247 pRec->ShrdOwner.SrcPos.pszFile, pRec->ShrdOwner.SrcPos.uLine, pRec->ShrdOwner.SrcPos.pszFunction, pRec->ShrdOwner.SrcPos.uId,
248 pszSuffix);
249 else
250 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p thr=%s nest=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
251 pShared,
252 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), pRec->ShrdOwner.cRecursion,
253 pRec->ShrdOwner.SrcPos.pszFile, pRec->ShrdOwner.SrcPos.uLine, pRec->ShrdOwner.SrcPos.pszFunction, pRec->ShrdOwner.SrcPos.uId,
254 pszSuffix);
255 break;
256 }
257
258 default:
259 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
260 break;
261 }
262 }
263}
264
265
266/**
267 * Launch the initial complaint.
268 *
269 * @param pszWhat What we're complaining about.
270 * @param pSrcPos Where we are complaining from, as it were.
271 * @param pThreadSelf The calling thread.
272 * @param pRec The main lock involved. Can be NULL.
273 */
274static void rtLockValidatorComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
275{
276 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
277 {
278 ASMCompilerBarrier(); /* paranoia */
279 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
280 if (pSrcPos && pSrcPos->uId)
281 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
282 else
283 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
284 rtLockValidatorComplainAboutLock("Lock: ", pRec, "\n");
285 }
286}
287
288
289/**
290 * Continue bitching.
291 *
292 * @param pszFormat Format string.
293 * @param ... Format arguments.
294 */
295static void rtLockValidatorComplainMore(const char *pszFormat, ...)
296{
297 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
298 {
299 va_list va;
300 va_start(va, pszFormat);
301 RTAssertMsg2AddWeakV(pszFormat, va);
302 va_end(va);
303 }
304}
305
306
307/**
308 * Raise a panic if enabled.
309 */
310static void rtLockValidatorComplainPanic(void)
311{
312 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
313 RTAssertPanic();
314}
315
316
317/**
318 * Copy a source position record.
319 *
320 * @param pDst The destination.
321 * @param pSrc The source. Can be NULL.
322 */
323DECL_FORCE_INLINE(void) rtLockValidatorCopySrcPos(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
324{
325 if (pSrc)
326 {
327 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
328 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFile, pSrc->pszFile);
329 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFunction, pSrc->pszFunction);
330 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
331 }
332 else
333 {
334 ASMAtomicUoWriteU32(&pDst->uLine, 0);
335 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFile, NULL);
336 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFunction, NULL);
337 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, 0);
338 }
339}
340
341
342/**
343 * Init a source position record.
344 *
345 * @param pSrcPos The source position record.
346 */
347DECL_FORCE_INLINE(void) rtLockValidatorInitSrcPos(PRTLOCKVALSRCPOS pSrcPos)
348{
349 pSrcPos->pszFile = NULL;
350 pSrcPos->pszFunction = NULL;
351 pSrcPos->uId = 0;
352 pSrcPos->uLine = 0;
353#if HC_ARCH_BITS == 64
354 pSrcPos->u32Padding = 0;
355#endif
356}
357
358
359/**
360 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
361 */
362DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
363{
364 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
365 if (hXRoads != NIL_RTSEMXROADS)
366 RTSemXRoadsNSEnter(hXRoads);
367}
368
369
370/**
371 * Call after rtLockValidatorSerializeDestructEnter.
372 */
373DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
374{
375 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
376 if (hXRoads != NIL_RTSEMXROADS)
377 RTSemXRoadsNSLeave(hXRoads);
378}
379
380
381/**
382 * Serializes deadlock detection against destruction of the objects being
383 * inspected.
384 */
385DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
386{
387 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
388 if (hXRoads != NIL_RTSEMXROADS)
389 RTSemXRoadsEWEnter(hXRoads);
390}
391
392
393/**
394 * Call after rtLockValidatorSerializeDetectionEnter.
395 */
396DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
397{
398 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
399 if (hXRoads != NIL_RTSEMXROADS)
400 RTSemXRoadsEWLeave(hXRoads);
401}
402
403
404/**
405 * Initializes the per thread lock validator data.
406 *
407 * @param pPerThread The data.
408 */
409DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
410{
411 pPerThread->bmFreeShrdOwners = UINT32_MAX;
412
413 /* ASSUMES the rest has already been zeroed. */
414 Assert(pPerThread->pRec == NULL);
415 Assert(pPerThread->cWriteLocks == 0);
416 Assert(pPerThread->cReadLocks == 0);
417 Assert(pPerThread->fInValidator == false);
418}
419
420
421/**
422 * Checks if all owners are blocked - shared record operated in signaller mode.
423 *
424 * @returns true / false accordingly.
425 * @param pRec The record.
426 * @param pThreadSelf The current thread.
427 */
428DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
429{
430 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
431 uint32_t cAllocated = pRec->cAllocated;
432 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries);
433 if (cEntries == 0)
434 return false;
435
436 for (uint32_t i = 0; i < cAllocated; i++)
437 {
438 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]);
439 if ( pEntry
440 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
441 {
442 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
443 if (!pCurThread)
444 return false;
445 if (pCurThread->u32Magic != RTTHREADINT_MAGIC)
446 return false;
447 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread))
448 && pCurThread != pThreadSelf)
449 return false;
450 if (--cEntries == 0)
451 break;
452 }
453 else
454 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
455 }
456
457 return true;
458}
459
460
461
462
463/**
464 * Verifies the deadlock stack before calling it a deadlock.
465 *
466 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
467 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
468 * @retval VERR_TRY_AGAIN if something changed.
469 *
470 * @param pStack The deadlock detection stack.
471 * @param pThreadSelf The current thread.
472 */
473static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf)
474{
475 uint32_t const c = pStack->c;
476 for (uint32_t iPass = 0; iPass < 3; iPass++)
477 {
478 for (uint32_t i = 1; i < c; i++)
479 {
480 PRTTHREADINT pThread = pStack->a[i].pThread;
481 if (pThread->u32Magic != RTTHREADINT_MAGIC)
482 return VERR_TRY_AGAIN;
483 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
484 return VERR_TRY_AGAIN;
485 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
486 return VERR_TRY_AGAIN;
487 /* ASSUMES the signaller records won't have siblings! */
488 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec;
489 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
490 && pRec->Shared.fSignaller
491 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf))
492 return VERR_TRY_AGAIN;
493 }
494 RTThreadYield();
495 }
496
497 if (c == 1)
498 return VERR_SEM_LV_ILLEGAL_UPGRADE;
499 return VERR_SEM_LV_DEADLOCK;
500}
501
502
503/**
504 * Checks for stack cycles caused by another deadlock before returning.
505 *
506 * @retval VINF_SUCCESS if the stack is simply too small.
507 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
508 *
509 * @param pStack The deadlock detection stack.
510 */
511static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
512{
513 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
514 {
515 PRTTHREADINT pThread = pStack->a[i].pThread;
516 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
517 if (pStack->a[j].pThread == pThread)
518 return VERR_SEM_LV_EXISTING_DEADLOCK;
519 }
520 static bool volatile s_fComplained = false;
521 if (!s_fComplained)
522 {
523 s_fComplained = true;
524 rtLockValidatorComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
525 }
526 return VINF_SUCCESS;
527}
528
529
530/**
531 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
532 * detection.
533 *
534 * @retval VINF_SUCCESS
535 * @retval VERR_SEM_LV_DEADLOCK
536 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
537 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
538 * @retval VERR_TRY_AGAIN
539 *
540 * @param pStack The stack to use.
541 * @param pOriginalRec The original record.
542 * @param pThreadSelf The calling thread.
543 */
544static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
545 PRTTHREADINT const pThreadSelf)
546{
547 pStack->c = 0;
548
549 /* We could use a single RTLOCKVALDDENTRY variable here, but the
550 compiler may make a better job of it when using individual variables. */
551 PRTLOCKVALRECUNION pRec = pOriginalRec;
552 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
553 uint32_t iEntry = UINT32_MAX;
554 PRTTHREADINT pThread = NIL_RTTHREAD;
555 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
556 for (uint32_t iLoop = 0; ; iLoop++)
557 {
558 /*
559 * Process the current record.
560 */
561 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
562
563 /* Find the next relevant owner thread and record. */
564 PRTLOCKVALRECUNION pNextRec = NULL;
565 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
566 PRTTHREADINT pNextThread = NIL_RTTHREAD;
567 switch (pRec->Core.u32Magic)
568 {
569 case RTLOCKVALRECEXCL_MAGIC:
570 Assert(iEntry == UINT32_MAX);
571 for (;;)
572 {
573 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
574 if ( !pNextThread
575 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
576 break;
577 enmNextState = rtThreadGetState(pNextThread);
578 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
579 && pNextThread != pThreadSelf)
580 break;
581 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
582 if (RT_LIKELY( !pNextRec
583 || enmNextState == rtThreadGetState(pNextThread)))
584 break;
585 pNextRec = NULL;
586 }
587 if (!pNextRec)
588 {
589 pRec = pRec->Excl.pSibling;
590 if ( pRec
591 && pRec != pFirstSibling)
592 continue;
593 pNextThread = NIL_RTTHREAD;
594 }
595 break;
596
597 case RTLOCKVALRECSHRD_MAGIC:
598 if (!pRec->Shared.fSignaller)
599 {
600 /* Skip to the next sibling if same side. ASSUMES reader priority. */
601 /** @todo The read side of a read-write lock is problematic if
602 * the implementation prioritizes writers over readers because
603 * that means we should could deadlock against current readers
604 * if a writer showed up. If the RW sem implementation is
605 * wrapping some native API, it's not so easy to detect when we
606 * should do this and when we shouldn't. Checking when we
607 * shouldn't is subject to wakeup scheduling and cannot easily
608 * be made reliable.
609 *
610 * At the moment we circumvent all this mess by declaring that
611 * readers has priority. This is TRUE on linux, but probably
612 * isn't on Solaris and FreeBSD. */
613 if ( pRec == pFirstSibling
614 && pRec->Shared.pSibling != NULL
615 && pRec->Shared.pSibling != pFirstSibling)
616 {
617 pRec = pRec->Shared.pSibling;
618 Assert(iEntry == UINT32_MAX);
619 continue;
620 }
621 }
622
623 /* Scan the owner table for blocked owners. */
624 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0
625 && ( !pRec->Shared.fSignaller
626 || iEntry != UINT32_MAX
627 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)
628 )
629 )
630 {
631 uint32_t cAllocated = pRec->Shared.cAllocated;
632 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
633 while (++iEntry < cAllocated)
634 {
635 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
636 if (pEntry)
637 {
638 for (;;)
639 {
640 if (pEntry->Core.u32Magic != RTLOCKVALRECSHRDOWN_MAGIC)
641 break;
642 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
643 if ( !pNextThread
644 || pNextThread->u32Magic != RTTHREADINT_MAGIC)
645 break;
646 enmNextState = rtThreadGetState(pNextThread);
647 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
648 && pNextThread != pThreadSelf)
649 break;
650 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
651 if (RT_LIKELY( !pNextRec
652 || enmNextState == rtThreadGetState(pNextThread)))
653 break;
654 pNextRec = NULL;
655 }
656 if (pNextRec)
657 break;
658 }
659 else
660 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
661 }
662 if (pNextRec)
663 break;
664 pNextThread = NIL_RTTHREAD;
665 }
666
667 /* Advance to the next sibling, if any. */
668 pRec = pRec->Shared.pSibling;
669 if ( pRec != NULL
670 && pRec != pFirstSibling)
671 {
672 iEntry = UINT32_MAX;
673 continue;
674 }
675 break;
676
677 case RTLOCKVALRECEXCL_MAGIC_DEAD:
678 case RTLOCKVALRECSHRD_MAGIC_DEAD:
679 break;
680
681 case RTLOCKVALRECSHRDOWN_MAGIC:
682 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
683 default:
684 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
685 break;
686 }
687
688 if (pNextRec)
689 {
690 /*
691 * Recurse and check for deadlock.
692 */
693 uint32_t i = pStack->c;
694 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
695 return rtLockValidatorDdHandleStackOverflow(pStack);
696
697 pStack->c++;
698 pStack->a[i].pRec = pRec;
699 pStack->a[i].iEntry = iEntry;
700 pStack->a[i].enmState = enmState;
701 pStack->a[i].pThread = pThread;
702 pStack->a[i].pFirstSibling = pFirstSibling;
703
704 if (RT_UNLIKELY( pNextThread == pThreadSelf
705 && ( i != 0
706 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC
707 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */
708 )
709 )
710 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf);
711
712 pRec = pNextRec;
713 pFirstSibling = pNextRec;
714 iEntry = UINT32_MAX;
715 enmState = enmNextState;
716 pThread = pNextThread;
717 }
718 else
719 {
720 /*
721 * No deadlock here, unwind the stack and deal with any unfinished
722 * business there.
723 */
724 uint32_t i = pStack->c;
725 for (;;)
726 {
727 /* pop */
728 if (i == 0)
729 return VINF_SUCCESS;
730 i--;
731 pRec = pStack->a[i].pRec;
732 iEntry = pStack->a[i].iEntry;
733
734 /* Examine it. */
735 uint32_t u32Magic = pRec->Core.u32Magic;
736 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
737 pRec = pRec->Excl.pSibling;
738 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
739 {
740 if (iEntry + 1 < pRec->Shared.cAllocated)
741 break; /* continue processing this record. */
742 pRec = pRec->Shared.pSibling;
743 }
744 else
745 {
746 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
747 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
748 continue;
749 }
750
751 /* Any next record to advance to? */
752 if ( !pRec
753 || pRec == pStack->a[i].pFirstSibling)
754 continue;
755 iEntry = UINT32_MAX;
756 break;
757 }
758
759 /* Restore the rest of the state and update the stack. */
760 pFirstSibling = pStack->a[i].pFirstSibling;
761 enmState = pStack->a[i].enmState;
762 pThread = pStack->a[i].pThread;
763 pStack->c = i;
764 }
765
766 Assert(iLoop != 1000000);
767 }
768}
769
770
771/**
772 * Check for the simple no-deadlock case.
773 *
774 * @returns true if no deadlock, false if further investigation is required.
775 *
776 * @param pOriginalRec The original record.
777 */
778DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
779{
780 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
781 && !pOriginalRec->Excl.pSibling)
782 {
783 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
784 if ( !pThread
785 || pThread->u32Magic != RTTHREADINT_MAGIC)
786 return true;
787 RTTHREADSTATE enmState = rtThreadGetState(pThread);
788 if (!RTTHREAD_IS_SLEEPING(enmState))
789 return true;
790 }
791 return false;
792}
793
794
795/**
796 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
797 *
798 * @param pStack The chain of locks causing the deadlock.
799 * @param pRec The record relating to the current thread's lock
800 * operation.
801 * @param pThreadSelf This thread.
802 * @param pSrcPos Where we are going to deadlock.
803 * @param rc The return code.
804 */
805static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
806 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
807{
808 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
809 {
810 const char *pszWhat;
811 switch (rc)
812 {
813 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
814 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
815 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
816 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
817 }
818 rtLockValidatorComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL);
819 rtLockValidatorComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
820 for (uint32_t i = 0; i < pStack->c; i++)
821 {
822 char szPrefix[24];
823 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
824 PRTLOCKVALRECSHRDOWN pShrdOwner = NULL;
825 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
826 pShrdOwner = pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
827 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
828 rtLockValidatorComplainAboutLock(szPrefix, (PRTLOCKVALRECUNION)pShrdOwner, "\n");
829 else
830 rtLockValidatorComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
831 }
832 rtLockValidatorComplainMore("---- end of deadlock chain ----\n");
833 }
834
835 rtLockValidatorComplainPanic();
836}
837
838
839/**
840 * Perform deadlock detection.
841 *
842 * @retval VINF_SUCCESS
843 * @retval VERR_SEM_LV_DEADLOCK
844 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
845 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
846 *
847 * @param pRec The record relating to the current thread's lock
848 * operation.
849 * @param pThreadSelf The current thread.
850 * @param pSrcPos The position of the current lock operation.
851 */
852static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
853{
854#ifdef DEBUG_bird
855 RTLOCKVALDDSTACK Stack;
856 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
857 if (RT_SUCCESS(rc))
858 return VINF_SUCCESS;
859
860 if (rc == VERR_TRY_AGAIN)
861 {
862 for (uint32_t iLoop = 0; ; iLoop++)
863 {
864 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
865 if (RT_SUCCESS_NP(rc))
866 return VINF_SUCCESS;
867 if (rc != VERR_TRY_AGAIN)
868 break;
869 RTThreadYield();
870 if (iLoop >= 3)
871 return VINF_SUCCESS;
872 }
873 }
874
875 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
876 return rc;
877#else
878 return VINF_SUCCESS;
879#endif
880}
881
882
883/**
884 * Unlinks all siblings.
885 *
886 * This is used during record deletion and assumes no races.
887 *
888 * @param pCore One of the siblings.
889 */
890static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
891{
892 /* ASSUMES sibling destruction doesn't involve any races and that all
893 related records are to be disposed off now. */
894 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
895 while (pSibling)
896 {
897 PRTLOCKVALRECUNION volatile *ppCoreNext;
898 switch (pSibling->Core.u32Magic)
899 {
900 case RTLOCKVALRECEXCL_MAGIC:
901 case RTLOCKVALRECEXCL_MAGIC_DEAD:
902 ppCoreNext = &pSibling->Excl.pSibling;
903 break;
904
905 case RTLOCKVALRECSHRD_MAGIC:
906 case RTLOCKVALRECSHRD_MAGIC_DEAD:
907 ppCoreNext = &pSibling->Shared.pSibling;
908 break;
909
910 default:
911 AssertFailed();
912 ppCoreNext = NULL;
913 break;
914 }
915 if (RT_UNLIKELY(ppCoreNext))
916 break;
917 pSibling = (PRTLOCKVALRECUNION)ASMAtomicXchgPtr((void * volatile *)ppCoreNext, NULL);
918 }
919}
920
921
922RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
923{
924 /*
925 * Validate input.
926 */
927 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
928 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
929
930 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
931 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
932 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
933 , VERR_SEM_LV_INVALID_PARAMETER);
934
935 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
936 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
937 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
938 , VERR_SEM_LV_INVALID_PARAMETER);
939
940 /*
941 * Link them (circular list).
942 */
943 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
944 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
945 {
946 p1->Excl.pSibling = p2;
947 p2->Shared.pSibling = p1;
948 }
949 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
950 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
951 {
952 p1->Shared.pSibling = p2;
953 p2->Excl.pSibling = p1;
954 }
955 else
956 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
957
958 return VINF_SUCCESS;
959}
960
961
962
963
964RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALIDATORCLASS hClass,
965 uint32_t uSubClass, const char *pszName, void *hLock)
966{
967 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
968 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
969
970 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
971 pRec->fEnabled = RTLockValidatorIsEnabled();
972 pRec->afReserved[0] = 0;
973 pRec->afReserved[1] = 0;
974 pRec->afReserved[2] = 0;
975 rtLockValidatorInitSrcPos(&pRec->SrcPos);
976 pRec->hThread = NIL_RTTHREAD;
977 pRec->pDown = NULL;
978 pRec->hClass = hClass;
979 pRec->uSubClass = uSubClass;
980 pRec->cRecursion = 0;
981 pRec->hLock = hLock;
982 pRec->pszName = pszName;
983 pRec->pSibling = NULL;
984
985 /* Lazily initialize the crossroads semaphore. */
986 static uint32_t volatile s_fInitializing = false;
987 if (RT_UNLIKELY( g_hLockValidatorXRoads == NIL_RTSEMXROADS
988 && ASMAtomicCmpXchgU32(&s_fInitializing, true, false)))
989 {
990 RTSEMXROADS hXRoads;
991 int rc = RTSemXRoadsCreate(&hXRoads);
992 if (RT_SUCCESS(rc))
993 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
994 ASMAtomicWriteU32(&s_fInitializing, false);
995 }
996}
997
998
999RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALIDATORCLASS hClass,
1000 uint32_t uSubClass, const char *pszName, void *pvLock)
1001{
1002 PRTLOCKVALRECEXCL pRec;
1003 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
1004 if (!pRec)
1005 return VERR_NO_MEMORY;
1006
1007 RTLockValidatorRecExclInit(pRec, hClass, uSubClass, pszName, pvLock);
1008
1009 return VINF_SUCCESS;
1010}
1011
1012
1013RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
1014{
1015 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
1016
1017 rtLockValidatorSerializeDestructEnter();
1018
1019 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
1020 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
1021 ASMAtomicWriteHandle(&pRec->hClass, NIL_RTLOCKVALIDATORCLASS);
1022 if (pRec->pSibling)
1023 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
1024 rtLockValidatorSerializeDestructLeave();
1025}
1026
1027
1028RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
1029{
1030 PRTLOCKVALRECEXCL pRec = *ppRec;
1031 *ppRec = NULL;
1032 if (pRec)
1033 {
1034 RTLockValidatorRecExclDelete(pRec);
1035 RTMemFree(pRec);
1036 }
1037}
1038
1039
1040RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
1041 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
1042{
1043 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
1044 if (!pRec->fEnabled)
1045 return;
1046 if (hThreadSelf == NIL_RTTHREAD)
1047 {
1048 hThreadSelf = RTThreadSelfAutoAdopt();
1049 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
1050 }
1051 Assert(hThreadSelf == RTThreadSelf());
1052
1053 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
1054
1055 if (pRec->hThread == hThreadSelf)
1056 {
1057 Assert(!fFirstRecursion);
1058 pRec->cRecursion++;
1059 }
1060 else
1061 {
1062 Assert(pRec->hThread == NIL_RTTHREAD);
1063
1064 /*
1065 * Update the record.
1066 */
1067 rtLockValidatorCopySrcPos(&pRec->SrcPos, pSrcPos);
1068 ASMAtomicUoWriteU32(&pRec->cRecursion, 1);
1069 ASMAtomicWriteHandle(&pRec->hThread, hThreadSelf);
1070
1071 /*
1072 * Push the lock onto the lock stack.
1073 */
1074 /** @todo push it onto the per-thread lock stack. */
1075 }
1076}
1077
1078
1079RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
1080{
1081 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1082 if (!pRec->fEnabled)
1083 return VINF_SUCCESS;
1084 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1085
1086 RTLockValidatorRecExclReleaseOwnerUnchecked(pRec);
1087 return VINF_SUCCESS;
1088}
1089
1090
1091RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
1092{
1093 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
1094 if (!pRec->fEnabled)
1095 return;
1096 RTTHREADINT *pThread = pRec->hThread;
1097 AssertReturnVoid(pThread != NIL_RTTHREAD);
1098 Assert(pThread == RTThreadSelf());
1099
1100 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
1101
1102 if (ASMAtomicDecU32(&pRec->cRecursion) == 0)
1103 {
1104 /*
1105 * Pop (remove) the lock.
1106 */
1107 /** @todo remove it from the per-thread stack/whatever. */
1108
1109 /*
1110 * Update the record.
1111 */
1112 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
1113 }
1114}
1115
1116
1117RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
1118{
1119 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1120 if (!pRec->fEnabled)
1121 return VINF_SUCCESS;
1122 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1123
1124 Assert(pRec->cRecursion < _1M);
1125 pRec->cRecursion++;
1126
1127 return VINF_SUCCESS;
1128}
1129
1130
1131RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
1132{
1133 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1134 if (!pRec->fEnabled)
1135 return VINF_SUCCESS;
1136 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1137 AssertReturn(pRec->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
1138
1139 Assert(pRec->cRecursion);
1140 pRec->cRecursion--;
1141 return VINF_SUCCESS;
1142}
1143
1144
1145RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
1146{
1147 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1148 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
1149 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1150 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1151 , VERR_SEM_LV_INVALID_PARAMETER);
1152 if (!pRec->fEnabled)
1153 return VINF_SUCCESS;
1154 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1155 AssertReturn(pRec->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
1156
1157 Assert(pRec->cRecursion < _1M);
1158 pRec->cRecursion++;
1159
1160 return VINF_SUCCESS;
1161}
1162
1163
1164RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
1165{
1166 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1167 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
1168 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1169 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1170 , VERR_SEM_LV_INVALID_PARAMETER);
1171 if (!pRec->fEnabled)
1172 return VINF_SUCCESS;
1173 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1174 AssertReturn(pRec->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
1175
1176 Assert(pRec->cRecursion);
1177 pRec->cRecursion--;
1178 return VINF_SUCCESS;
1179}
1180
1181
1182RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
1183{
1184 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1185 if (!pRec->fEnabled)
1186 return VINF_SUCCESS;
1187
1188 /*
1189 * Check it locks we're currently holding.
1190 */
1191 /** @todo later */
1192
1193 /*
1194 * If missing order rules, add them.
1195 */
1196
1197 return VINF_SUCCESS;
1198}
1199
1200
1201RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
1202 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk,
1203 RTTHREADSTATE enmSleepState, bool fReallySleeping)
1204{
1205 /*
1206 * Fend off wild life.
1207 */
1208 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
1209 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
1210 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1211 if (!pRecU->Excl.fEnabled)
1212 return VINF_SUCCESS;
1213
1214 PRTTHREADINT pThreadSelf = hThreadSelf;
1215 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
1216 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1217 Assert(pThreadSelf == RTThreadSelf());
1218
1219 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
1220
1221 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
1222 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
1223 {
1224 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
1225 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
1226 , VERR_SEM_LV_INVALID_PARAMETER);
1227 enmSleepState = enmThreadState;
1228 }
1229
1230 /*
1231 * Record the location.
1232 */
1233 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
1234 rtLockValidatorCopySrcPos(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
1235 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
1236 pThreadSelf->LockValidator.enmRecState = enmSleepState;
1237 rtThreadSetState(pThreadSelf, enmSleepState);
1238
1239 /*
1240 * Don't do deadlock detection if we're recursing.
1241 *
1242 * On some hosts we don't do recursion accounting our selves and there
1243 * isn't any other place to check for this.
1244 */
1245 int rc = VINF_SUCCESS;
1246 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
1247 {
1248 if (!fRecursiveOk)
1249 {
1250 rtLockValidatorComplainFirst("Recursion not allowed", pSrcPos, pThreadSelf, pRecU);
1251 rtLockValidatorComplainPanic();
1252 rc = VERR_SEM_LV_NESTED;
1253 }
1254 }
1255 /*
1256 * Perform deadlock detection.
1257 */
1258 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
1259 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
1260
1261 if (RT_SUCCESS(rc))
1262 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
1263 else
1264 {
1265 rtThreadSetState(pThreadSelf, enmThreadState);
1266 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
1267 }
1268 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
1269 return rc;
1270}
1271RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
1272
1273
1274RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
1275 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk,
1276 RTTHREADSTATE enmSleepState, bool fReallySleeping)
1277{
1278 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos);
1279 if (RT_SUCCESS(rc))
1280 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, enmSleepState, fReallySleeping);
1281 return rc;
1282}
1283RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
1284
1285
1286RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALIDATORCLASS hClass, uint32_t uSubClass,
1287 const char *pszName, void *hLock, bool fSignaller)
1288{
1289 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
1290 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
1291
1292 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
1293 pRec->uSubClass = uSubClass;
1294 pRec->hClass = hClass;
1295 pRec->hLock = hLock;
1296 pRec->pszName = pszName;
1297 pRec->fEnabled = RTLockValidatorIsEnabled();
1298 pRec->fSignaller = fSignaller;
1299 pRec->pSibling = NULL;
1300
1301 /* the table */
1302 pRec->cEntries = 0;
1303 pRec->iLastEntry = 0;
1304 pRec->cAllocated = 0;
1305 pRec->fReallocating = false;
1306 pRec->fPadding = false;
1307 pRec->papOwners = NULL;
1308#if HC_ARCH_BITS == 32
1309 pRec->u32Alignment = UINT32_MAX;
1310#endif
1311}
1312
1313
1314RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
1315{
1316 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
1317
1318 /*
1319 * Flip it into table realloc mode and take the destruction lock.
1320 */
1321 rtLockValidatorSerializeDestructEnter();
1322 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
1323 {
1324 rtLockValidatorSerializeDestructLeave();
1325
1326 rtLockValidatorSerializeDetectionEnter();
1327 rtLockValidatorSerializeDetectionLeave();
1328
1329 rtLockValidatorSerializeDestructEnter();
1330 }
1331
1332 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
1333 ASMAtomicUoWriteHandle(&pRec->hClass, NIL_RTLOCKVALIDATORCLASS);
1334 if (pRec->papOwners)
1335 {
1336 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
1337 ASMAtomicUoWritePtr((void * volatile *)&pRec->papOwners, NULL);
1338 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
1339
1340 RTMemFree((void *)pRec->papOwners);
1341 }
1342 if (pRec->pSibling)
1343 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
1344 ASMAtomicWriteBool(&pRec->fReallocating, false);
1345
1346 rtLockValidatorSerializeDestructLeave();
1347}
1348
1349
1350/**
1351 * Locates an owner (thread) in a shared lock record.
1352 *
1353 * @returns Pointer to the owner entry on success, NULL on failure..
1354 * @param pShared The shared lock record.
1355 * @param hThread The thread (owner) to find.
1356 * @param piEntry Where to optionally return the table in index.
1357 * Optional.
1358 */
1359DECLINLINE(PRTLOCKVALRECSHRDOWN)
1360rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
1361{
1362 rtLockValidatorSerializeDetectionEnter();
1363
1364 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
1365 if (papOwners)
1366 {
1367 uint32_t const cMax = pShared->cAllocated;
1368 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
1369 {
1370 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
1371 if (pEntry && pEntry->hThread == hThread)
1372 {
1373 rtLockValidatorSerializeDetectionLeave();
1374 if (piEntry)
1375 *piEntry = iEntry;
1376 return pEntry;
1377 }
1378 }
1379 }
1380
1381 rtLockValidatorSerializeDetectionLeave();
1382 return NULL;
1383}
1384
1385
1386RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
1387{
1388 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1389 if (!pRec->fEnabled)
1390 return VINF_SUCCESS;
1391 Assert(hThreadSelf == NIL_RTTHREAD || hThreadSelf == RTThreadSelf());
1392
1393 /*
1394 * Check it locks we're currently holding.
1395 */
1396 /** @todo later */
1397
1398 /*
1399 * If missing order rules, add them.
1400 */
1401
1402 return VINF_SUCCESS;
1403}
1404
1405
1406RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
1407 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk,
1408 RTTHREADSTATE enmSleepState, bool fReallySleeping)
1409{
1410 /*
1411 * Fend off wild life.
1412 */
1413 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
1414 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
1415 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1416 if (!pRecU->Shared.fEnabled)
1417 return VINF_SUCCESS;
1418
1419 PRTTHREADINT pThreadSelf = hThreadSelf;
1420 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
1421 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1422 Assert(pThreadSelf == RTThreadSelf());
1423
1424 AssertReturn(RTTHREAD_IS_SLEEPING(enmSleepState), VERR_SEM_LV_INVALID_PARAMETER);
1425
1426 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
1427 if (RT_UNLIKELY(enmThreadState != RTTHREADSTATE_RUNNING))
1428 {
1429 AssertReturn( enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
1430 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
1431 , VERR_SEM_LV_INVALID_PARAMETER);
1432 enmSleepState = enmThreadState;
1433 }
1434
1435 /*
1436 * Record the location.
1437 */
1438 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
1439 rtLockValidatorCopySrcPos(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
1440 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, true);
1441 pThreadSelf->LockValidator.enmRecState = enmSleepState;
1442 rtThreadSetState(pThreadSelf, enmSleepState);
1443
1444 /*
1445 * Don't do deadlock detection if we're recursing.
1446 */
1447 int rc = VINF_SUCCESS;
1448 PRTLOCKVALRECSHRDOWN pEntry = !pRecU->Shared.fSignaller
1449 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL)
1450 : NULL;
1451 if (pEntry)
1452 {
1453 if (!fRecursiveOk)
1454 {
1455 rtLockValidatorComplainFirst("Recursion not allowed", pSrcPos, pThreadSelf, pRecU);
1456 rtLockValidatorComplainPanic();
1457 rc = VERR_SEM_LV_NESTED;
1458 }
1459 }
1460 /*
1461 * Perform deadlock detection.
1462 */
1463 else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
1464 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
1465
1466 if (RT_SUCCESS(rc))
1467 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping);
1468 else
1469 {
1470 rtThreadSetState(pThreadSelf, enmThreadState);
1471 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, NULL);
1472 }
1473 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false);
1474 return rc;
1475}
1476RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
1477
1478
1479RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
1480 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk,
1481 RTTHREADSTATE enmSleepState, bool fReallySleeping)
1482{
1483 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos);
1484 if (RT_SUCCESS(rc))
1485 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, enmSleepState, fReallySleeping);
1486 return rc;
1487}
1488RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
1489
1490
1491/**
1492 * Allocates and initializes an owner entry for the shared lock record.
1493 *
1494 * @returns The new owner entry.
1495 * @param pRec The shared lock record.
1496 * @param pThreadSelf The calling thread and owner. Used for record
1497 * initialization and allocation.
1498 * @param pSrcPos The source position.
1499 */
1500DECLINLINE(PRTLOCKVALRECSHRDOWN)
1501rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
1502{
1503 PRTLOCKVALRECSHRDOWN pEntry;
1504
1505 /*
1506 * Check if the thread has any statically allocated records we can easily
1507 * make use of.
1508 */
1509 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
1510 if ( iEntry > 0
1511 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1))
1512 {
1513 pEntry = &pThreadSelf->LockValidator.aShrdOwners[iEntry - 1];
1514 Assert(!pEntry->fReserved);
1515 pEntry->fStaticAlloc = true;
1516 rtThreadGet(pThreadSelf);
1517 }
1518 else
1519 {
1520 pEntry = (PRTLOCKVALRECSHRDOWN)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
1521 if (RT_UNLIKELY(!pEntry))
1522 return NULL;
1523 pEntry->fStaticAlloc = false;
1524 }
1525
1526 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
1527 pEntry->cRecursion = 1;
1528 pEntry->fReserved = true;
1529 pEntry->hThread = pThreadSelf;
1530 pEntry->pDown = NULL;
1531 pEntry->pSharedRec = pRec;
1532#if HC_ARCH_BITS == 32
1533 pEntry->pvReserved = NULL;
1534#endif
1535 if (pSrcPos)
1536 pEntry->SrcPos = *pSrcPos;
1537 else
1538 rtLockValidatorInitSrcPos(&pEntry->SrcPos);
1539 return pEntry;
1540}
1541
1542
1543/**
1544 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
1545 *
1546 * @param pEntry The owner entry.
1547 */
1548DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
1549{
1550 if (pEntry)
1551 {
1552 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC);
1553 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
1554
1555 PRTTHREADINT pThread;
1556 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread);
1557
1558 Assert(pEntry->fReserved);
1559 pEntry->fReserved = false;
1560
1561 if (pEntry->fStaticAlloc)
1562 {
1563 AssertPtrReturnVoid(pThread);
1564 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC);
1565
1566 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0];
1567 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners));
1568
1569 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, iEntry));
1570 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, iEntry);
1571
1572 rtThreadRelease(pThread);
1573 }
1574 else
1575 {
1576 rtLockValidatorSerializeDestructEnter();
1577 rtLockValidatorSerializeDestructLeave();
1578
1579 RTMemFree(pEntry);
1580 }
1581 }
1582}
1583
1584
1585/**
1586 * Make more room in the table.
1587 *
1588 * @retval true on success
1589 * @retval false if we're out of memory or running into a bad race condition
1590 * (probably a bug somewhere). No longer holding the lock.
1591 *
1592 * @param pShared The shared lock record.
1593 */
1594static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
1595{
1596 for (unsigned i = 0; i < 1000; i++)
1597 {
1598 /*
1599 * Switch to the other data access direction.
1600 */
1601 rtLockValidatorSerializeDetectionLeave();
1602 if (i >= 10)
1603 {
1604 Assert(i != 10 && i != 100);
1605 RTThreadSleep(i >= 100);
1606 }
1607 rtLockValidatorSerializeDestructEnter();
1608
1609 /*
1610 * Try grab the privilege to reallocating the table.
1611 */
1612 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1613 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
1614 {
1615 uint32_t cAllocated = pShared->cAllocated;
1616 if (cAllocated < pShared->cEntries)
1617 {
1618 /*
1619 * Ok, still not enough space. Reallocate the table.
1620 */
1621#if 0 /** @todo enable this after making sure growing works flawlessly. */
1622 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
1623#else
1624 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
1625#endif
1626 PRTLOCKVALRECSHRDOWN *papOwners;
1627 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
1628 (cAllocated + cInc) * sizeof(void *));
1629 if (!papOwners)
1630 {
1631 ASMAtomicWriteBool(&pShared->fReallocating, false);
1632 rtLockValidatorSerializeDestructLeave();
1633 /* RTMemRealloc will assert */
1634 return false;
1635 }
1636
1637 while (cInc-- > 0)
1638 {
1639 papOwners[cAllocated] = NULL;
1640 cAllocated++;
1641 }
1642
1643 ASMAtomicWritePtr((void * volatile *)&pShared->papOwners, papOwners);
1644 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
1645 }
1646 ASMAtomicWriteBool(&pShared->fReallocating, false);
1647 }
1648 rtLockValidatorSerializeDestructLeave();
1649
1650 rtLockValidatorSerializeDetectionEnter();
1651 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
1652 break;
1653
1654 if (pShared->cAllocated >= pShared->cEntries)
1655 return true;
1656 }
1657
1658 rtLockValidatorSerializeDetectionLeave();
1659 AssertFailed(); /* too many iterations or destroyed while racing. */
1660 return false;
1661}
1662
1663
1664/**
1665 * Adds an owner entry to a shared lock record.
1666 *
1667 * @returns true on success, false on serious race or we're if out of memory.
1668 * @param pShared The shared lock record.
1669 * @param pEntry The owner entry.
1670 */
1671DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
1672{
1673 rtLockValidatorSerializeDetectionEnter();
1674 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
1675 {
1676 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
1677 && !rtLockValidatorRecSharedMakeRoom(pShared))
1678 return false; /* the worker leave the lock */
1679
1680 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
1681 uint32_t const cMax = pShared->cAllocated;
1682 for (unsigned i = 0; i < 100; i++)
1683 {
1684 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
1685 {
1686 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], pEntry, NULL))
1687 {
1688 rtLockValidatorSerializeDetectionLeave();
1689 return true;
1690 }
1691 }
1692 Assert(i != 25);
1693 }
1694 AssertFailed();
1695 }
1696 rtLockValidatorSerializeDetectionLeave();
1697 return false;
1698}
1699
1700
1701/**
1702 * Remove an owner entry from a shared lock record and free it.
1703 *
1704 * @param pShared The shared lock record.
1705 * @param pEntry The owner entry to remove.
1706 * @param iEntry The last known index.
1707 */
1708DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
1709 uint32_t iEntry)
1710{
1711 /*
1712 * Remove it from the table.
1713 */
1714 rtLockValidatorSerializeDetectionEnter();
1715 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
1716 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
1717 || !ASMAtomicCmpXchgPtr((void * volatile *)&pShared->papOwners[iEntry], NULL, pEntry)))
1718 {
1719 /* this shouldn't happen yet... */
1720 AssertFailed();
1721 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
1722 uint32_t const cMax = pShared->cAllocated;
1723 for (iEntry = 0; iEntry < cMax; iEntry++)
1724 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], NULL, pEntry))
1725 break;
1726 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
1727 }
1728 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
1729 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
1730 rtLockValidatorSerializeDetectionLeave();
1731
1732 /*
1733 * Successfully removed, now free it.
1734 */
1735 rtLockValidatorRecSharedFreeOwner(pEntry);
1736}
1737
1738
1739RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
1740{
1741 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
1742 if (!pRec->fEnabled)
1743 return;
1744 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC);
1745
1746 /*
1747 * Free all current owners.
1748 */
1749 rtLockValidatorSerializeDetectionEnter();
1750 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0)
1751 {
1752 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
1753 uint32_t iEntry = 0;
1754 uint32_t cEntries = pRec->cAllocated;
1755 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners;
1756 while (iEntry < cEntries)
1757 {
1758 PRTLOCKVALRECSHRDOWN pEntry = (PRTLOCKVALRECSHRDOWN)ASMAtomicXchgPtr((void * volatile *)&papEntries[iEntry], NULL);
1759 if (pEntry)
1760 {
1761 ASMAtomicDecU32(&pRec->cEntries);
1762 rtLockValidatorSerializeDetectionLeave();
1763
1764 rtLockValidatorRecSharedFreeOwner(pEntry);
1765
1766 rtLockValidatorSerializeDetectionEnter();
1767 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0)
1768 break;
1769 cEntries = pRec->cAllocated;
1770 papEntries = pRec->papOwners;
1771 }
1772 iEntry++;
1773 }
1774 }
1775 rtLockValidatorSerializeDetectionLeave();
1776
1777 if (hThread != NIL_RTTHREAD)
1778 {
1779 /*
1780 * Allocate a new owner entry and insert it into the table.
1781 */
1782 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
1783 if ( pEntry
1784 && !rtLockValidatorRecSharedAddOwner(pRec, pEntry))
1785 rtLockValidatorRecSharedFreeOwner(pEntry);
1786 }
1787}
1788RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner);
1789
1790
1791RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
1792{
1793 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
1794 if (!pRec->fEnabled)
1795 return;
1796 if (hThread == NIL_RTTHREAD)
1797 {
1798 hThread = RTThreadSelfAutoAdopt();
1799 AssertReturnVoid(hThread != NIL_RTTHREAD);
1800 }
1801 AssertReturnVoid(hThread != NIL_RTTHREAD);
1802 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
1803
1804 /*
1805 * Recursive?
1806 *
1807 * Note! This code can be optimized to try avoid scanning the table on
1808 * insert. However, that's annoying work that makes the code big,
1809 * so it can wait til later sometime.
1810 */
1811 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL);
1812 if (pEntry)
1813 {
1814 Assert(hThread == RTThreadSelf());
1815 pEntry->cRecursion++;
1816 return;
1817 }
1818
1819 /*
1820 * Allocate a new owner entry and insert it into the table.
1821 */
1822 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos);
1823 if ( pEntry
1824 && !rtLockValidatorRecSharedAddOwner(pRec, pEntry))
1825 rtLockValidatorRecSharedFreeOwner(pEntry);
1826}
1827RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner);
1828
1829
1830RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread)
1831{
1832 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
1833 if (!pRec->fEnabled)
1834 return;
1835 AssertReturnVoid(hThread != NIL_RTTHREAD);
1836 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC);
1837
1838 /*
1839 * Find the entry hope it's a recursive one.
1840 */
1841 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
1842 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry);
1843 AssertReturnVoid(pEntry);
1844 if (pEntry->cRecursion > 1)
1845 {
1846 Assert(hThread == RTThreadSelf());
1847 pEntry->cRecursion--;
1848 }
1849 else
1850 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, pEntry, iEntry);
1851}
1852RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner);
1853
1854
1855RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
1856{
1857 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1858 if (!pRec->fEnabled)
1859 return VINF_SUCCESS;
1860 if (hThreadSelf == NIL_RTTHREAD)
1861 {
1862 hThreadSelf = RTThreadSelfAutoAdopt();
1863 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1864 }
1865 Assert(hThreadSelf == RTThreadSelf());
1866
1867 /*
1868 * Locate the entry for this thread in the table.
1869 */
1870 uint32_t iEntry = 0;
1871 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
1872 if (RT_UNLIKELY(!pEntry))
1873 {
1874 rtLockValidatorComplainFirst("Not owner (shared)", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec);
1875 rtLockValidatorComplainPanic();
1876 return VERR_SEM_LV_NOT_OWNER;
1877 }
1878
1879 /*
1880 * Check the release order.
1881 */
1882 if (pRec->hClass != NIL_RTLOCKVALIDATORCLASS)
1883 {
1884 /** @todo order validation */
1885 }
1886
1887 /*
1888 * Release the ownership or unwind a level of recursion.
1889 */
1890 Assert(pEntry->cRecursion > 0);
1891 if (pEntry->cRecursion > 1)
1892 pEntry->cRecursion--;
1893 else
1894 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, pEntry, iEntry);
1895
1896 return VINF_SUCCESS;
1897}
1898
1899
1900RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
1901{
1902 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1903 if (!pRec->fEnabled)
1904 return VINF_SUCCESS;
1905 if (hThreadSelf == NIL_RTTHREAD)
1906 {
1907 hThreadSelf = RTThreadSelfAutoAdopt();
1908 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1909 }
1910 Assert(hThreadSelf == RTThreadSelf());
1911
1912 /*
1913 * Locate the entry for this thread in the table.
1914 */
1915 uint32_t iEntry = 0;
1916 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
1917 if (RT_UNLIKELY(!pEntry))
1918 {
1919 rtLockValidatorComplainFirst("Invalid signaller", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec);
1920 rtLockValidatorComplainPanic();
1921 return VERR_SEM_LV_NOT_SIGNALLER;
1922 }
1923 return VINF_SUCCESS;
1924}
1925
1926
1927RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
1928{
1929 if (Thread == NIL_RTTHREAD)
1930 return 0;
1931
1932 PRTTHREADINT pThread = rtThreadGet(Thread);
1933 if (!pThread)
1934 return VERR_INVALID_HANDLE;
1935 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
1936 rtThreadRelease(pThread);
1937 return cWriteLocks;
1938}
1939RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
1940
1941
1942RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
1943{
1944 PRTTHREADINT pThread = rtThreadGet(Thread);
1945 AssertReturnVoid(pThread);
1946 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
1947 rtThreadRelease(pThread);
1948}
1949RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
1950
1951
1952RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
1953{
1954 PRTTHREADINT pThread = rtThreadGet(Thread);
1955 AssertReturnVoid(pThread);
1956 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
1957 rtThreadRelease(pThread);
1958}
1959RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
1960
1961
1962RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
1963{
1964 if (Thread == NIL_RTTHREAD)
1965 return 0;
1966
1967 PRTTHREADINT pThread = rtThreadGet(Thread);
1968 if (!pThread)
1969 return VERR_INVALID_HANDLE;
1970 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
1971 rtThreadRelease(pThread);
1972 return cReadLocks;
1973}
1974RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
1975
1976
1977RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
1978{
1979 PRTTHREADINT pThread = rtThreadGet(Thread);
1980 Assert(pThread);
1981 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
1982 rtThreadRelease(pThread);
1983}
1984RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
1985
1986
1987RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
1988{
1989 PRTTHREADINT pThread = rtThreadGet(Thread);
1990 Assert(pThread);
1991 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
1992 rtThreadRelease(pThread);
1993}
1994RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
1995
1996
1997RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
1998{
1999 void *pvLock = NULL;
2000 PRTTHREADINT pThread = rtThreadGet(hThread);
2001 if (pThread)
2002 {
2003 RTTHREADSTATE enmState = rtThreadGetState(pThread);
2004 if (RTTHREAD_IS_SLEEPING(enmState))
2005 {
2006 rtLockValidatorSerializeDetectionEnter();
2007
2008 enmState = rtThreadGetState(pThread);
2009 if (RTTHREAD_IS_SLEEPING(enmState))
2010 {
2011 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
2012 if (pRec)
2013 {
2014 switch (pRec->Core.u32Magic)
2015 {
2016 case RTLOCKVALRECEXCL_MAGIC:
2017 pvLock = pRec->Excl.hLock;
2018 break;
2019
2020 case RTLOCKVALRECSHRDOWN_MAGIC:
2021 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
2022 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
2023 break;
2024 case RTLOCKVALRECSHRD_MAGIC:
2025 pvLock = pRec->Shared.hLock;
2026 break;
2027 }
2028 if (RTThreadGetState(pThread) != enmState)
2029 pvLock = NULL;
2030 }
2031 }
2032
2033 rtLockValidatorSerializeDetectionLeave();
2034 }
2035 rtThreadRelease(pThread);
2036 }
2037 return pvLock;
2038}
2039RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
2040
2041
2042RTDECL(bool) RTLockValidatorIsBlockedThreadInValidator(RTTHREAD hThread)
2043{
2044 bool fRet = false;
2045 PRTTHREADINT pThread = rtThreadGet(hThread);
2046 if (pThread)
2047 {
2048 fRet = ASMAtomicReadBool(&pThread->LockValidator.fInValidator);
2049 rtThreadRelease(pThread);
2050 }
2051 return fRet;
2052}
2053RT_EXPORT_SYMBOL(RTLockValidatorIsBlockedThreadInValidator);
2054
2055
2056RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
2057{
2058 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
2059}
2060RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
2061
2062
2063RTDECL(bool) RTLockValidatorIsEnabled(void)
2064{
2065 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
2066}
2067RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
2068
2069
2070RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
2071{
2072 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
2073}
2074RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
2075
2076
2077RTDECL(bool) RTLockValidatorAreQuiet(void)
2078{
2079 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
2080}
2081RT_EXPORT_SYMBOL(RTLockValidatorAreQuiet);
2082
2083
2084RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
2085{
2086 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
2087}
2088RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
2089
2090
2091RTDECL(bool) RTLockValidatorMayPanic(void)
2092{
2093 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
2094}
2095RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
2096
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette