VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 25617

Last change on this file since 25617 was 25617, checked in by vboxsync, 15 years ago

iprt: Added lock validator testcase for read-write semaphore deadlocks. Fixed bugs found with it.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 63.0 KB
Line 
1/* $Id: lockvalidator.cpp 25617 2010-01-02 00:14:47Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include <iprt/lockvalidator.h>
35#include "internal/iprt.h"
36
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/err.h>
40#include <iprt/mem.h>
41#include <iprt/once.h>
42#include <iprt/semaphore.h>
43#include <iprt/string.h>
44#include <iprt/thread.h>
45
46#include "internal/lockvalidator.h"
47#include "internal/magics.h"
48#include "internal/thread.h"
49
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * Deadlock detection stack entry.
56 */
57typedef struct RTLOCKVALDDENTRY
58{
59 /** The current record. */
60 PRTLOCKVALRECUNION pRec;
61 /** The current entry number if pRec is a shared one. */
62 uint32_t iEntry;
63 /** The thread state of the thread we followed to get to pFirstSibling.
64 * This is only used for validating a deadlock stack. */
65 RTTHREADSTATE enmState;
66 /** The thread we followed to get to pFirstSibling.
67 * This is only used for validating a deadlock stack. */
68 PRTTHREADINT pThread;
69 /** What pThread is waiting on, i.e. where we entered the circular list of
70 * siblings. This is used for validating a deadlock stack as well as
71 * terminating the sibling walk. */
72 PRTLOCKVALRECUNION pFirstSibling;
73} RTLOCKVALDDENTRY;
74
75
76/**
77 * Deadlock detection stack.
78 */
79typedef struct RTLOCKVALDDSTACK
80{
81 /** The number stack entries. */
82 uint32_t c;
83 /** The stack entries. */
84 RTLOCKVALDDENTRY a[32];
85} RTLOCKVALDDSTACK;
86/** Pointer to a deadlock detction stack. */
87typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
88
89
90/*******************************************************************************
91* Defined Constants And Macros *
92*******************************************************************************/
93/** Macro that asserts that a pointer is aligned correctly.
94 * Only used when fighting bugs. */
95#if 1
96# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
97 AssertMsg(!((uintptr_t)(p) & (sizeof(uintptr_t) - 1)), ("%p\n", (p)));
98#else
99# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
100#endif
101
102
103/*******************************************************************************
104* Global Variables *
105*******************************************************************************/
106/** Serializing object destruction and deadlock detection.
107 *
108 * This makes sure that none of the memory examined by the deadlock detection
109 * code will become invalid (reused for other purposes or made not present)
110 * while the detection is in progress.
111 *
112 * NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
113 * EW: Deadlock detection and some related activities.
114 */
115static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
116/** Whether the lock validator is enabled or disabled.
117 * Only applies to new locks. */
118static bool volatile g_fLockValidatorEnabled = true;
119/** Set if the lock validator is quiet. */
120#ifdef RT_STRICT
121static bool volatile g_fLockValidatorQuiet = false;
122#else
123static bool volatile g_fLockValidatorQuiet = true;
124#endif
125/** Set if the lock validator may panic. */
126#ifdef RT_STRICT
127static bool volatile g_fLockValidatorMayPanic = true;
128#else
129static bool volatile g_fLockValidatorMayPanic = false;
130#endif
131
132
133/** Wrapper around ASMAtomicReadPtr. */
134DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
135{
136 PRTLOCKVALRECUNION p = (PRTLOCKVALRECUNION)ASMAtomicReadPtr((void * volatile *)ppRec);
137 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
138 return p;
139}
140
141
142/** Wrapper around ASMAtomicWritePtr. */
143DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
144{
145 RTLOCKVAL_ASSERT_PTR_ALIGN(pRecNew);
146 ASMAtomicWritePtr((void * volatile *)ppRec, pRecNew);
147}
148
149
150/** Wrapper around ASMAtomicReadPtr. */
151DECL_FORCE_INLINE(PRTTHREADINT) rtLockValidatorReadThreadHandle(RTTHREAD volatile *phThread)
152{
153 PRTTHREADINT p = (PRTTHREADINT)ASMAtomicReadPtr((void * volatile *)phThread);
154 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
155 return p;
156}
157
158
159/** Wrapper around ASMAtomicUoReadPtr. */
160DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
161{
162 PRTLOCKVALRECSHRDOWN p = (PRTLOCKVALRECSHRDOWN)ASMAtomicUoReadPtr((void * volatile *)ppOwner);
163 RTLOCKVAL_ASSERT_PTR_ALIGN(p);
164 return p;
165}
166
167
168/**
169 * Reads a volatile thread handle field and returns the thread name.
170 *
171 * @returns Thread name (read only).
172 * @param phThread The thread handle field.
173 */
174static const char *rtLockValidatorNameThreadHandle(RTTHREAD volatile *phThread)
175{
176 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(phThread);
177 if (!pThread)
178 return "<NIL>";
179 if (!VALID_PTR(pThread))
180 return "<INVALID>";
181 if (pThread->u32Magic != RTTHREADINT_MAGIC)
182 return "<BAD-THREAD-MAGIC>";
183 return pThread->szName;
184}
185
186
187/**
188 * Launch a simple assertion like complaint w/ panic.
189 *
190 * @param RT_SRC_POS_DECL Where from.
191 * @param pszWhat What we're complaining about.
192 * @param ... Format arguments.
193 */
194static void rtLockValidatorComplain(RT_SRC_POS_DECL, const char *pszWhat, ...)
195{
196 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
197 {
198 RTAssertMsg1Weak("RTLockValidator", iLine, pszFile, pszFunction);
199 va_list va;
200 va_start(va, pszWhat);
201 RTAssertMsg2WeakV(pszWhat, va);
202 va_end(va);
203 }
204 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
205 RTAssertPanic();
206}
207
208
209/**
210 * Describes the lock.
211 *
212 * @param pszPrefix Message prefix.
213 * @param Rec The lock record we're working on.
214 * @param pszPrefix Message suffix.
215 */
216static void rtLockValidatorComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
217{
218 if ( VALID_PTR(pRec)
219 && !ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
220 {
221 switch (pRec->Core.u32Magic)
222 {
223 case RTLOCKVALRECEXCL_MAGIC:
224 RTAssertMsg2AddWeak("%s%p %s xrec=%p own=%s nest=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
225 pRec->Excl.hLock, pRec->Excl.pszName, pRec,
226 rtLockValidatorNameThreadHandle(&pRec->Excl.hThread), pRec->Excl.cRecursion,
227 pRec->Excl.SrcPos.pszFile, pRec->Excl.SrcPos.uLine, pRec->Excl.SrcPos.pszFunction, pRec->Excl.SrcPos.uId,
228 pszSuffix);
229 break;
230
231 case RTLOCKVALRECSHRD_MAGIC:
232 RTAssertMsg2AddWeak("%s%p %s srec=%p%s", pszPrefix,
233 pRec->Shared.hLock, pRec->Shared.pszName, pRec,
234 pszSuffix);
235 break;
236
237 case RTLOCKVALRECSHRDOWN_MAGIC:
238 {
239 PRTLOCKVALRECSHRD pShared = pRec->ShrdOwner.pSharedRec;
240 if ( VALID_PTR(pShared)
241 && pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
242 RTAssertMsg2AddWeak("%s%p %s srec=%p trec=%p thr=%s nest=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
243 pShared->hLock, pShared->pszName, pShared,
244 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), pRec->ShrdOwner.cRecursion,
245 pRec->ShrdOwner.SrcPos.pszFile, pRec->ShrdOwner.SrcPos.uLine, pRec->ShrdOwner.SrcPos.pszFunction, pRec->ShrdOwner.SrcPos.uId,
246 pszSuffix);
247 else
248 RTAssertMsg2AddWeak("%sbad srec=%p trec=%p thr=%s nest=%u pos={%Rbn(%u) %Rfn %p}%s", pszPrefix,
249 pShared,
250 pRec, rtLockValidatorNameThreadHandle(&pRec->ShrdOwner.hThread), pRec->ShrdOwner.cRecursion,
251 pRec->ShrdOwner.SrcPos.pszFile, pRec->ShrdOwner.SrcPos.uLine, pRec->ShrdOwner.SrcPos.pszFunction, pRec->ShrdOwner.SrcPos.uId,
252 pszSuffix);
253 break;
254 }
255
256 default:
257 RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
258 break;
259 }
260 }
261}
262
263
264/**
265 * Launch the initial complaint.
266 *
267 * @param pszWhat What we're complaining about.
268 * @param pSrcPos Where we are complaining from, as it were.
269 * @param pThreadSelf The calling thread.
270 * @param pRec The main lock involved. Can be NULL.
271 */
272static void rtLockValidatorComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
273{
274 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
275 {
276 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
277 if (pSrcPos && pSrcPos->uId)
278 RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
279 else
280 RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
281 rtLockValidatorComplainAboutLock("Lock: ", pRec, "\n");
282 }
283}
284
285
286/**
287 * Continue bitching.
288 *
289 * @param pszFormat Format string.
290 * @param ... Format arguments.
291 */
292static void rtLockValidatorComplainMore(const char *pszFormat, ...)
293{
294 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
295 {
296 va_list va;
297 va_start(va, pszFormat);
298 RTAssertMsg2AddWeakV(pszFormat, va);
299 va_end(va);
300 }
301}
302
303
304/**
305 * Raise a panic if enabled.
306 */
307static void rtLockValidatorComplainPanic(void)
308{
309 if (ASMAtomicUoReadBool(&g_fLockValidatorMayPanic))
310 RTAssertPanic();
311}
312
313
314/**
315 * Copy a source position record.
316 *
317 * @param pDst The destination.
318 * @param pSrc The source. Can be NULL.
319 */
320DECL_FORCE_INLINE(void) rtLockValidatorCopySrcPos(PRTLOCKVALSRCPOS pDst, PCRTLOCKVALSRCPOS pSrc)
321{
322 if (pSrc)
323 {
324 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
325 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFile, pSrc->pszFile);
326 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFunction, pSrc->pszFunction);
327 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
328 }
329 else
330 {
331 ASMAtomicUoWriteU32(&pDst->uLine, 0);
332 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFile, NULL);
333 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFunction, NULL);
334 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, 0);
335 }
336}
337
338
339/**
340 * Init a source position record.
341 *
342 * @param pSrcPos The source position record.
343 */
344DECL_FORCE_INLINE(void) rtLockValidatorInitSrcPos(PRTLOCKVALSRCPOS pSrcPos)
345{
346 pSrcPos->pszFile = NULL;
347 pSrcPos->pszFunction = NULL;
348 pSrcPos->uId = 0;
349 pSrcPos->uLine = 0;
350#if HC_ARCH_BITS == 64
351 pSrcPos->u32Padding = 0;
352#endif
353}
354
355
356/**
357 * Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
358 */
359DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
360{
361 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
362 if (hXRoads != NIL_RTSEMXROADS)
363 RTSemXRoadsNSEnter(hXRoads);
364}
365
366
367/**
368 * Call after rtLockValidatorSerializeDestructEnter.
369 */
370DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
371{
372 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
373 if (hXRoads != NIL_RTSEMXROADS)
374 RTSemXRoadsNSLeave(hXRoads);
375}
376
377
378/**
379 * Serializes deadlock detection against destruction of the objects being
380 * inspected.
381 */
382DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
383{
384 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
385 if (hXRoads != NIL_RTSEMXROADS)
386 RTSemXRoadsEWEnter(hXRoads);
387}
388
389
390/**
391 * Call after rtLockValidatorSerializeDetectionEnter.
392 */
393DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
394{
395 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
396 if (hXRoads != NIL_RTSEMXROADS)
397 RTSemXRoadsEWLeave(hXRoads);
398}
399
400
401/**
402 * Initializes the per thread lock validator data.
403 *
404 * @param pPerThread The data.
405 */
406DECLHIDDEN(void) rtLockValidatorInitPerThread(RTLOCKVALPERTHREAD *pPerThread)
407{
408 pPerThread->bmFreeShrdOwners = UINT32_MAX;
409
410 /* ASSUMES the rest has already been zeroed. */
411 Assert(pPerThread->pRec == NULL);
412 Assert(pPerThread->cWriteLocks == 0);
413 Assert(pPerThread->cReadLocks == 0);
414}
415
416
417/**
418 * Verifies the deadlock stack before calling it a deadlock.
419 *
420 * @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
421 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
422 * @retval VERR_TRY_AGAIN if something changed.
423 *
424 * @param pStack The deadlock detection stack.
425 */
426static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack)
427{
428 uint32_t const c = pStack->c;
429 for (uint32_t iPass = 0; iPass < 3; iPass++)
430 {
431 for (uint32_t i = 1; i < c; i++)
432 {
433 PRTTHREADINT pThread = pStack->a[i].pThread;
434 if (pThread->u32Magic != RTTHREADINT_MAGIC)
435 return VERR_TRY_AGAIN;
436 if (rtThreadGetState(pThread) != pStack->a[i].enmState)
437 return VERR_TRY_AGAIN;
438 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling)
439 return VERR_TRY_AGAIN;
440 }
441 RTThreadYield();
442 }
443
444 if (c == 1)
445 return VERR_SEM_LV_ILLEGAL_UPGRADE;
446 return VERR_SEM_LV_DEADLOCK;
447}
448
449
450/**
451 * Checks for stack cycles caused by another deadlock before returning.
452 *
453 * @retval VINF_SUCCESS if the stack is simply too small.
454 * @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
455 *
456 * @param pStack The deadlock detection stack.
457 */
458static int rtLockValidatorDdHandleStackOverflow(PRTLOCKVALDDSTACK pStack)
459{
460 for (size_t i = 0; i < RT_ELEMENTS(pStack->a) - 1; i++)
461 {
462 PRTTHREADINT pThread = pStack->a[i].pThread;
463 for (size_t j = i + 1; j < RT_ELEMENTS(pStack->a); j++)
464 if (pStack->a[j].pThread == pThread)
465 return VERR_SEM_LV_EXISTING_DEADLOCK;
466 }
467 static bool volatile s_fComplained = false;
468 if (!s_fComplained)
469 {
470 s_fComplained = true;
471 rtLockValidatorComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
472 }
473 return VINF_SUCCESS;
474}
475
476
477/**
478 * Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
479 * detection.
480 *
481 * @retval VINF_SUCCESS
482 * @retval VERR_SEM_LV_DEADLOCK
483 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
484 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
485 * @retval VERR_TRY_AGAIN
486 *
487 * @param pStack The stack to use.
488 * @param pOriginalRec The original record.
489 * @param pThreadSelf The calling thread.
490 */
491static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
492 PRTTHREADINT const pThreadSelf)
493{
494 pStack->c = 0;
495
496 /* We could use a single RTLOCKVALDDENTRY variable here, but the
497 compiler may make a better job of it when using individual variables. */
498 PRTLOCKVALRECUNION pRec = pOriginalRec;
499 PRTLOCKVALRECUNION pFirstSibling = pOriginalRec;
500 uint32_t iEntry = UINT32_MAX;
501 PRTTHREADINT pThread = NIL_RTTHREAD;
502 RTTHREADSTATE enmState = RTTHREADSTATE_RUNNING;
503 for (;;)
504 {
505 /*
506 * Process the current record.
507 */
508 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
509
510 /* Find the next relevant owner thread. */
511 PRTTHREADINT pNextThread;
512 switch (pRec->Core.u32Magic)
513 {
514 case RTLOCKVALRECEXCL_MAGIC:
515 Assert(iEntry == UINT32_MAX);
516 pNextThread = rtLockValidatorReadThreadHandle(&pRec->Excl.hThread);
517 if ( !pNextThread
518 || ( pNextThread != pThreadSelf
519 && ( pNextThread->u32Magic != RTTHREADINT_MAGIC
520 || !RTTHREAD_IS_SLEEPING(rtThreadGetState(pNextThread)) )
521 )
522 )
523 {
524 pRec = pRec->Excl.pSibling;
525 if ( pRec
526 && pRec != pFirstSibling)
527 continue;
528 pNextThread = NIL_RTTHREAD;
529 }
530 break;
531
532 case RTLOCKVALRECSHRD_MAGIC:
533 /* Skip to the next sibling if same side. ASSUMES reader priority. */
534 /** @todo The read side of a read-write lock is problematic if
535 * the implementation prioritizes writers over readers because
536 * that means we should could deadlock against current readers
537 * if a writer showed up. If the RW sem implementation is
538 * wrapping some native API, it's not so easy to detect when we
539 * should do this and when we shouldn't. Checking when we
540 * shouldn't is subject to wakeup scheduling and cannot easily
541 * be made reliable.
542 *
543 * At the moment we circumvent all this mess by declaring that
544 * readers has priority. This is TRUE on linux, but probably
545 * isn't on Solaris and FreeBSD. */
546 if ( pRec == pFirstSibling
547 && pRec->Shared.pSibling != NULL
548 && pRec->Shared.pSibling != pFirstSibling)
549 {
550 pRec = pRec->Shared.pSibling;
551 Assert(iEntry == UINT32_MAX);
552 continue;
553 }
554
555 /* Scan the owner table for blocked owners. */
556 pNextThread = NIL_RTTHREAD;
557 if (ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0)
558 {
559 uint32_t cAllocated = ASMAtomicUoReadU32(&pRec->Shared.cAllocated);
560 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners;
561 while (++iEntry < cAllocated)
562 {
563 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
564 if ( pEntry
565 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
566 {
567 pNextThread = rtLockValidatorReadThreadHandle(&pEntry->hThread);
568 if (pNextThread)
569 {
570 if ( pNextThread->u32Magic == RTTHREADINT_MAGIC
571 && ( RTTHREAD_IS_SLEEPING(rtThreadGetState(pNextThread))
572 || pNextThread == pThreadSelf))
573 break;
574 pNextThread = NIL_RTTHREAD;
575 }
576 }
577 else
578 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
579 }
580 if (pNextThread != NIL_RTTHREAD)
581 break;
582 }
583
584 /* Advance to the next sibling, if any. */
585 pRec = pRec->Shared.pSibling;
586 if ( pRec != NULL
587 && pRec != pFirstSibling)
588 {
589 iEntry = UINT32_MAX;
590 continue;
591 }
592 Assert(pNextThread == NIL_RTTHREAD);
593 break;
594
595 case RTLOCKVALRECEXCL_MAGIC_DEAD:
596 case RTLOCKVALRECSHRD_MAGIC_DEAD:
597 pNextThread = NIL_RTTHREAD;
598 break;
599
600 case RTLOCKVALRECSHRDOWN_MAGIC:
601 case RTLOCKVALRECSHRDOWN_MAGIC_DEAD:
602 default:
603 AssertMsgFailed(("%p: %#x\n", pRec, pRec->Core));
604 pNextThread = NIL_RTTHREAD;
605 break;
606 }
607
608 /* If we found a thread, check if it is still waiting for something. */
609 RTTHREADSTATE enmNextState = RTTHREADSTATE_RUNNING;
610 PRTLOCKVALRECUNION pNextRec = NULL;
611 if ( pNextThread != NIL_RTTHREAD
612 && RT_LIKELY(pNextThread->u32Magic == RTTHREADINT_MAGIC))
613 {
614 do
615 {
616 enmNextState = rtThreadGetState(pNextThread);
617 if ( !RTTHREAD_IS_SLEEPING(enmNextState)
618 && pNextThread != pThreadSelf)
619 break;
620 pNextRec = rtLockValidatorReadRecUnionPtr(&pNextThread->LockValidator.pRec);
621 if (RT_LIKELY( !pNextRec
622 || enmNextState == rtThreadGetState(pNextThread)))
623 break;
624 pNextRec = NULL;
625 } while (pNextThread->u32Magic == RTTHREADINT_MAGIC);
626 }
627 if (pNextRec)
628 {
629 /*
630 * Recurse and check for deadlock.
631 */
632 uint32_t i = pStack->c;
633 if (RT_UNLIKELY(i >= RT_ELEMENTS(pStack->a)))
634 return rtLockValidatorDdHandleStackOverflow(pStack);
635
636 pStack->c++;
637 pStack->a[i].pRec = pRec;
638 pStack->a[i].iEntry = iEntry;
639 pStack->a[i].enmState = enmState;
640 pStack->a[i].pThread = pThread;
641 pStack->a[i].pFirstSibling = pFirstSibling;
642
643 if (RT_UNLIKELY(pNextThread == pThreadSelf))
644 return rtLockValidatorDdVerifyDeadlock(pStack);
645
646 pRec = pNextRec;
647 pFirstSibling = pNextRec;
648 iEntry = UINT32_MAX;
649 enmState = enmNextState;
650 pThread = pNextThread;
651 }
652 else if (RT_LIKELY(!pNextThread))
653 {
654 /*
655 * No deadlock here, unwind the stack and deal with any unfinished
656 * business there.
657 */
658 uint32_t i = pStack->c;
659 for (;;)
660 {
661 /* pop */
662 if (i == 0)
663 return VINF_SUCCESS;
664 i--;
665 pRec = pStack->a[i].pRec;
666 iEntry = pStack->a[i].iEntry;
667
668 /* Examine it. */
669 uint32_t u32Magic = pRec->Core.u32Magic;
670 if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
671 pRec = pRec->Excl.pSibling;
672 else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
673 {
674 if (iEntry + 1 < pRec->Shared.cAllocated)
675 break; /* continue processing this record. */
676 pRec = pRec->Shared.pSibling;
677 }
678 else
679 {
680 Assert( u32Magic == RTLOCKVALRECEXCL_MAGIC_DEAD
681 || u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
682 continue;
683 }
684
685 /* Any next record to advance to? */
686 if ( !pRec
687 || pRec == pStack->a[i].pFirstSibling)
688 continue;
689 iEntry = UINT32_MAX;
690 break;
691 }
692
693 /* Restore the rest of the state and update the stack. */
694 pFirstSibling = pStack->a[i].pFirstSibling;
695 enmState = pStack->a[i].enmState;
696 pThread = pStack->a[i].pThread;
697 pStack->c = i;
698 }
699 /* else: see if there is another thread to check for this lock. */
700 }
701}
702
703
704/**
705 * Check for the simple no-deadlock case.
706 *
707 * @returns true if no deadlock, false if further investigation is required.
708 *
709 * @param pOriginalRec The original record.
710 */
711DECLINLINE(int) rtLockValidatorIsSimpleNoDeadlockCase(PRTLOCKVALRECUNION pOriginalRec)
712{
713 if ( pOriginalRec->Excl.Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
714 && !pOriginalRec->Excl.pSibling)
715 {
716 PRTTHREADINT pThread = rtLockValidatorReadThreadHandle(&pOriginalRec->Excl.hThread);
717 if ( !pThread
718 || pThread->u32Magic != RTTHREADINT_MAGIC)
719 return true;
720 RTTHREADSTATE enmState = rtThreadGetState(pThread);
721 if (!RTTHREAD_IS_SLEEPING(enmState))
722 return true;
723 }
724 return false;
725}
726
727
728/**
729 * Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
730 *
731 * @param pStack The chain of locks causing the deadlock.
732 * @param pRec The record relating to the current thread's lock
733 * operation.
734 * @param pThreadSelf This thread.
735 * @param pSrcPos Where we are going to deadlock.
736 * @param rc The return code.
737 */
738static void rcLockValidatorDoDeadlockComplaining(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION pRec,
739 PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, int rc)
740{
741 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet))
742 {
743 const char *pszWhat;
744 switch (rc)
745 {
746 case VERR_SEM_LV_DEADLOCK: pszWhat = "Detected deadlock!"; break;
747 case VERR_SEM_LV_EXISTING_DEADLOCK: pszWhat = "Found existing deadlock!"; break;
748 case VERR_SEM_LV_ILLEGAL_UPGRADE: pszWhat = "Illegal lock upgrade!"; break;
749 default: AssertFailed(); pszWhat = "!unexpected rc!"; break;
750 }
751 rtLockValidatorComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL);
752 rtLockValidatorComplainMore("---- start of deadlock chain - %u entries ----\n", pStack->c);
753 for (uint32_t i = 0; i < pStack->c; i++)
754 {
755 char szPrefix[24];
756 RTStrPrintf(szPrefix, sizeof(szPrefix), "#%02u: ", i);
757 PRTLOCKVALRECSHRDOWN pShrdOwner = NULL;
758 if (pStack->a[i].pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
759 pShrdOwner = pStack->a[i].pRec->Shared.papOwners[pStack->a[i].iEntry];
760 if (VALID_PTR(pShrdOwner) && pShrdOwner->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC)
761 rtLockValidatorComplainAboutLock(szPrefix, (PRTLOCKVALRECUNION)pShrdOwner, "\n");
762 else
763 rtLockValidatorComplainAboutLock(szPrefix, pStack->a[i].pRec, "\n");
764 }
765 rtLockValidatorComplainMore("---- end of deadlock chain ----\n");
766 }
767
768 rtLockValidatorComplainPanic();
769}
770
771
772/**
773 * Perform deadlock detection.
774 *
775 * @retval VINF_SUCCESS
776 * @retval VERR_SEM_LV_DEADLOCK
777 * @retval VERR_SEM_LV_EXISTING_DEADLOCK
778 * @retval VERR_SEM_LV_ILLEGAL_UPGRADE
779 *
780 * @param pRec The record relating to the current thread's lock
781 * operation.
782 * @param pThreadSelf The current thread.
783 * @param pSrcPos The position of the current lock operation.
784 */
785static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
786{
787#ifdef DEBUG_bird
788 RTLOCKVALDDSTACK Stack;
789 int rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
790 if (RT_SUCCESS(rc))
791 return VINF_SUCCESS;
792
793 if (rc == VERR_TRY_AGAIN)
794 {
795 for (uint32_t iLoop = 0; ; iLoop++)
796 {
797 rc = rtLockValidatorDdDoDetection(&Stack, pRec, pThreadSelf);
798 if (RT_SUCCESS_NP(rc))
799 return VINF_SUCCESS;
800 if (rc != VERR_TRY_AGAIN)
801 break;
802 RTThreadYield();
803 if (iLoop >= 3)
804 return VINF_SUCCESS;
805 }
806 }
807
808 rcLockValidatorDoDeadlockComplaining(&Stack, pRec, pThreadSelf, pSrcPos, rc);
809 return rc;
810#else
811 return VINF_SUCCESS;
812#endif
813}
814
815
816/**
817 * Unlinks all siblings.
818 *
819 * This is used during record deletion and assumes no races.
820 *
821 * @param pCore One of the siblings.
822 */
823static void rtLockValidatorUnlinkAllSiblings(PRTLOCKVALRECCORE pCore)
824{
825 /* ASSUMES sibling destruction doesn't involve any races and that all
826 related records are to be disposed off now. */
827 PRTLOCKVALRECUNION pSibling = (PRTLOCKVALRECUNION)pCore;
828 while (pSibling)
829 {
830 PRTLOCKVALRECUNION volatile *ppCoreNext;
831 switch (pSibling->Core.u32Magic)
832 {
833 case RTLOCKVALRECEXCL_MAGIC:
834 case RTLOCKVALRECEXCL_MAGIC_DEAD:
835 ppCoreNext = &pSibling->Excl.pSibling;
836 break;
837
838 case RTLOCKVALRECSHRD_MAGIC:
839 case RTLOCKVALRECSHRD_MAGIC_DEAD:
840 ppCoreNext = &pSibling->Shared.pSibling;
841 break;
842
843 default:
844 AssertFailed();
845 ppCoreNext = NULL;
846 break;
847 }
848 if (RT_UNLIKELY(ppCoreNext))
849 break;
850 pSibling = (PRTLOCKVALRECUNION)ASMAtomicXchgPtr((void * volatile *)ppCoreNext, NULL);
851 }
852}
853
854
855RTDECL(int) RTLockValidatorRecMakeSiblings(PRTLOCKVALRECCORE pRec1, PRTLOCKVALRECCORE pRec2)
856{
857 /*
858 * Validate input.
859 */
860 PRTLOCKVALRECUNION p1 = (PRTLOCKVALRECUNION)pRec1;
861 PRTLOCKVALRECUNION p2 = (PRTLOCKVALRECUNION)pRec2;
862
863 AssertPtrReturn(p1, VERR_SEM_LV_INVALID_PARAMETER);
864 AssertReturn( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
865 || p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
866 , VERR_SEM_LV_INVALID_PARAMETER);
867
868 AssertPtrReturn(p2, VERR_SEM_LV_INVALID_PARAMETER);
869 AssertReturn( p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
870 || p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
871 , VERR_SEM_LV_INVALID_PARAMETER);
872
873 /*
874 * Link them (circular list).
875 */
876 if ( p1->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
877 && p2->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)
878 {
879 p1->Excl.pSibling = p2;
880 p2->Shared.pSibling = p1;
881 }
882 else if ( p1->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
883 && p2->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC)
884 {
885 p1->Shared.pSibling = p2;
886 p2->Excl.pSibling = p1;
887 }
888 else
889 AssertFailedReturn(VERR_SEM_LV_INVALID_PARAMETER); /* unsupported mix */
890
891 return VINF_SUCCESS;
892}
893
894
895
896
897RTDECL(void) RTLockValidatorRecExclInit(PRTLOCKVALRECEXCL pRec, RTLOCKVALIDATORCLASS hClass,
898 uint32_t uSubClass, const char *pszName, void *hLock)
899{
900 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
901 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
902
903 pRec->Core.u32Magic = RTLOCKVALRECEXCL_MAGIC;
904 pRec->fEnabled = RTLockValidatorIsEnabled();
905 pRec->afReserved[0] = 0;
906 pRec->afReserved[1] = 0;
907 pRec->afReserved[2] = 0;
908 rtLockValidatorInitSrcPos(&pRec->SrcPos);
909 pRec->hThread = NIL_RTTHREAD;
910 pRec->pDown = NULL;
911 pRec->hClass = hClass;
912 pRec->uSubClass = uSubClass;
913 pRec->cRecursion = 0;
914 pRec->hLock = hLock;
915 pRec->pszName = pszName;
916 pRec->pSibling = NULL;
917
918 /* Lazily initialize the crossroads semaphore. */
919 static uint32_t volatile s_fInitializing = false;
920 if (RT_UNLIKELY( g_hLockValidatorXRoads == NIL_RTSEMXROADS
921 && ASMAtomicCmpXchgU32(&s_fInitializing, true, false)))
922 {
923 RTSEMXROADS hXRoads;
924 int rc = RTSemXRoadsCreate(&hXRoads);
925 if (RT_SUCCESS(rc))
926 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
927 ASMAtomicWriteU32(&s_fInitializing, false);
928 }
929}
930
931
932RTDECL(int) RTLockValidatorRecExclCreate(PRTLOCKVALRECEXCL *ppRec, RTLOCKVALIDATORCLASS hClass,
933 uint32_t uSubClass, const char *pszName, void *pvLock)
934{
935 PRTLOCKVALRECEXCL pRec;
936 *ppRec = pRec = (PRTLOCKVALRECEXCL)RTMemAlloc(sizeof(*pRec));
937 if (!pRec)
938 return VERR_NO_MEMORY;
939
940 RTLockValidatorRecExclInit(pRec, hClass, uSubClass, pszName, pvLock);
941
942 return VINF_SUCCESS;
943}
944
945
946RTDECL(void) RTLockValidatorRecExclDelete(PRTLOCKVALRECEXCL pRec)
947{
948 Assert(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
949
950 rtLockValidatorSerializeDestructEnter();
951
952 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECEXCL_MAGIC_DEAD);
953 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
954 ASMAtomicWriteHandle(&pRec->hClass, NIL_RTLOCKVALIDATORCLASS);
955 if (pRec->pSibling)
956 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
957 rtLockValidatorSerializeDestructLeave();
958}
959
960
961RTDECL(void) RTLockValidatorRecExclDestroy(PRTLOCKVALRECEXCL *ppRec)
962{
963 PRTLOCKVALRECEXCL pRec = *ppRec;
964 *ppRec = NULL;
965 if (pRec)
966 {
967 RTLockValidatorRecExclDelete(pRec);
968 RTMemFree(pRec);
969 }
970}
971
972
973RTDECL(void) RTLockValidatorRecExclSetOwner(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
974 PCRTLOCKVALSRCPOS pSrcPos, bool fFirstRecursion)
975{
976 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
977 if (!pRec->fEnabled)
978 return;
979 if (hThreadSelf == NIL_RTTHREAD)
980 {
981 hThreadSelf = RTThreadSelfAutoAdopt();
982 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
983 }
984 Assert(hThreadSelf == RTThreadSelf());
985
986 ASMAtomicIncS32(&hThreadSelf->LockValidator.cWriteLocks);
987
988 if (pRec->hThread == hThreadSelf)
989 {
990 Assert(!fFirstRecursion);
991 pRec->cRecursion++;
992 }
993 else
994 {
995 Assert(pRec->hThread == NIL_RTTHREAD);
996
997 /*
998 * Update the record.
999 */
1000 rtLockValidatorCopySrcPos(&pRec->SrcPos, pSrcPos);
1001 ASMAtomicUoWriteU32(&pRec->cRecursion, 1);
1002 ASMAtomicWriteHandle(&pRec->hThread, hThreadSelf);
1003
1004 /*
1005 * Push the lock onto the lock stack.
1006 */
1007 /** @todo push it onto the per-thread lock stack. */
1008 }
1009}
1010
1011
1012RTDECL(int) RTLockValidatorRecExclReleaseOwner(PRTLOCKVALRECEXCL pRec, bool fFinalRecursion)
1013{
1014 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1015 if (!pRec->fEnabled)
1016 return VINF_SUCCESS;
1017 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1018
1019 RTLockValidatorRecExclReleaseOwnerUnchecked(pRec);
1020 return VINF_SUCCESS;
1021}
1022
1023
1024RTDECL(void) RTLockValidatorRecExclReleaseOwnerUnchecked(PRTLOCKVALRECEXCL pRec)
1025{
1026 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC);
1027 if (!pRec->fEnabled)
1028 return;
1029 RTTHREADINT *pThread = pRec->hThread;
1030 AssertReturnVoid(pThread != NIL_RTTHREAD);
1031 Assert(pThread == RTThreadSelf());
1032
1033 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
1034
1035 if (ASMAtomicDecU32(&pRec->cRecursion) == 0)
1036 {
1037 /*
1038 * Pop (remove) the lock.
1039 */
1040 /** @todo remove it from the per-thread stack/whatever. */
1041
1042 /*
1043 * Update the record.
1044 */
1045 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
1046 }
1047}
1048
1049
1050RTDECL(int) RTLockValidatorRecExclRecursion(PRTLOCKVALRECEXCL pRec, PCRTLOCKVALSRCPOS pSrcPos)
1051{
1052 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1053 if (!pRec->fEnabled)
1054 return VINF_SUCCESS;
1055 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1056
1057 Assert(pRec->cRecursion < _1M);
1058 pRec->cRecursion++;
1059
1060 return VINF_SUCCESS;
1061}
1062
1063
1064RTDECL(int) RTLockValidatorRecExclUnwind(PRTLOCKVALRECEXCL pRec)
1065{
1066 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1067 if (!pRec->fEnabled)
1068 return VINF_SUCCESS;
1069 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1070 AssertReturn(pRec->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
1071
1072 Assert(pRec->cRecursion);
1073 pRec->cRecursion--;
1074 return VINF_SUCCESS;
1075}
1076
1077
1078RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
1079{
1080 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1081 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
1082 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1083 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1084 , VERR_SEM_LV_INVALID_PARAMETER);
1085 if (!pRec->fEnabled)
1086 return VINF_SUCCESS;
1087 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1088 AssertReturn(pRec->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
1089
1090 Assert(pRec->cRecursion < _1M);
1091 pRec->cRecursion++;
1092
1093 return VINF_SUCCESS;
1094}
1095
1096
1097RTDECL(int) RTLockValidatorRecExclUnwindMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed)
1098{
1099 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1100 PRTLOCKVALRECUNION pRecMixedU = (PRTLOCKVALRECUNION)pRecMixed;
1101 AssertReturn( pRecMixedU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1102 || pRecMixedU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC
1103 , VERR_SEM_LV_INVALID_PARAMETER);
1104 if (!pRec->fEnabled)
1105 return VINF_SUCCESS;
1106 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1107 AssertReturn(pRec->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
1108
1109 Assert(pRec->cRecursion);
1110 pRec->cRecursion--;
1111 return VINF_SUCCESS;
1112}
1113
1114
1115RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
1116{
1117 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1118 if (!pRec->fEnabled)
1119 return VINF_SUCCESS;
1120
1121 /*
1122 * Check it locks we're currently holding.
1123 */
1124 /** @todo later */
1125
1126 /*
1127 * If missing order rules, add them.
1128 */
1129
1130 return VINF_SUCCESS;
1131}
1132
1133
1134RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
1135 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk)
1136{
1137 /*
1138 * Fend off wild life.
1139 */
1140 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
1141 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
1142 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECEXCL_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1143 if (!pRecU->Excl.fEnabled)
1144 return VINF_SUCCESS;
1145
1146 PRTTHREADINT pThreadSelf = hThreadSelf;
1147 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
1148 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1149 Assert(pThreadSelf == RTThreadSelf());
1150
1151 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
1152 AssertReturn( enmThreadState == RTTHREADSTATE_RUNNING
1153 || enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
1154 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
1155 , VERR_SEM_LV_INVALID_PARAMETER);
1156
1157 /*
1158 * Record the location.
1159 */
1160 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
1161 rtLockValidatorCopySrcPos(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
1162
1163 /*
1164 * Don't do deadlock detection if we're recursing.
1165 *
1166 * On some hosts we don't do recursion accounting our selves and there
1167 * isn't any other place to check for this. semmutex-win.cpp for instance.
1168 */
1169 if (rtLockValidatorReadThreadHandle(&pRecU->Excl.hThread) == pThreadSelf)
1170 {
1171 if (fRecursiveOk)
1172 return VINF_SUCCESS;
1173 rtLockValidatorComplainFirst("Recursion not allowed", pSrcPos, pThreadSelf, pRecU);
1174 rtLockValidatorComplainPanic();
1175 return VERR_SEM_LV_NESTED;
1176 }
1177
1178 /*
1179 * Perform deadlock detection.
1180 */
1181 if (rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
1182 return VINF_SUCCESS;
1183 return rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
1184}
1185RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckBlocking);
1186
1187
1188RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
1189 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk)
1190{
1191 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos);
1192 if (RT_SUCCESS(rc))
1193 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk);
1194 return rc;
1195}
1196RT_EXPORT_SYMBOL(RTLockValidatorRecExclCheckOrderAndBlocking);
1197
1198
1199RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALIDATORCLASS hClass,
1200 uint32_t uSubClass, const char *pszName, void *hLock)
1201{
1202 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec);
1203 RTLOCKVAL_ASSERT_PTR_ALIGN(hLock);
1204
1205 pRec->Core.u32Magic = RTLOCKVALRECSHRD_MAGIC;
1206 pRec->uSubClass = uSubClass;
1207 pRec->hClass = hClass;
1208 pRec->hLock = hLock;
1209 pRec->pszName = pszName;
1210 pRec->fEnabled = RTLockValidatorIsEnabled();
1211 pRec->pSibling = NULL;
1212
1213 /* the table */
1214 pRec->cEntries = 0;
1215 pRec->iLastEntry = 0;
1216 pRec->cAllocated = 0;
1217 pRec->fReallocating = false;
1218 pRec->afPadding[0] = false;
1219 pRec->afPadding[1] = false;
1220 pRec->papOwners = NULL;
1221#if HC_ARCH_BITS == 32
1222 pRec->u32Alignment = UINT32_MAX;
1223#endif
1224}
1225
1226
1227RTDECL(void) RTLockValidatorRecSharedDelete(PRTLOCKVALRECSHRD pRec)
1228{
1229 Assert(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
1230
1231 /*
1232 * Flip it into table realloc mode and take the destruction lock.
1233 */
1234 rtLockValidatorSerializeDestructEnter();
1235 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
1236 {
1237 rtLockValidatorSerializeDestructLeave();
1238
1239 rtLockValidatorSerializeDetectionEnter();
1240 rtLockValidatorSerializeDetectionLeave();
1241
1242 rtLockValidatorSerializeDestructEnter();
1243 }
1244
1245 ASMAtomicWriteU32(&pRec->Core.u32Magic, RTLOCKVALRECSHRD_MAGIC_DEAD);
1246 ASMAtomicUoWriteHandle(&pRec->hClass, NIL_RTLOCKVALIDATORCLASS);
1247 if (pRec->papOwners)
1248 {
1249 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners;
1250 ASMAtomicUoWritePtr((void * volatile *)&pRec->papOwners, NULL);
1251 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
1252
1253 RTMemFree((void *)pRec->papOwners);
1254 }
1255 if (pRec->pSibling)
1256 rtLockValidatorUnlinkAllSiblings(&pRec->Core);
1257 ASMAtomicWriteBool(&pRec->fReallocating, false);
1258
1259 rtLockValidatorSerializeDestructLeave();
1260}
1261
1262
1263/**
1264 * Locates an owner (thread) in a shared lock record.
1265 *
1266 * @returns Pointer to the owner entry on success, NULL on failure..
1267 * @param pShared The shared lock record.
1268 * @param hThread The thread (owner) to find.
1269 * @param piEntry Where to optionally return the table in index.
1270 * Optional.
1271 */
1272DECLINLINE(PRTLOCKVALRECSHRDOWN)
1273rtLockValidatorRecSharedFindOwner(PRTLOCKVALRECSHRD pShared, RTTHREAD hThread, uint32_t *piEntry)
1274{
1275 rtLockValidatorSerializeDetectionEnter();
1276
1277 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
1278 if (papOwners)
1279 {
1280 uint32_t const cMax = pShared->cAllocated;
1281 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
1282 {
1283 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[iEntry]);
1284 if (pEntry && pEntry->hThread == hThread)
1285 {
1286 rtLockValidatorSerializeDetectionLeave();
1287 if (piEntry)
1288 *piEntry = iEntry;
1289 return pEntry;
1290 }
1291 }
1292 }
1293
1294 rtLockValidatorSerializeDetectionLeave();
1295 return NULL;
1296}
1297
1298
1299RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
1300{
1301 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1302 if (!pRec->fEnabled)
1303 return VINF_SUCCESS;
1304 Assert(hThreadSelf == NIL_RTTHREAD || hThreadSelf == RTThreadSelf());
1305
1306 /*
1307 * Check it locks we're currently holding.
1308 */
1309 /** @todo later */
1310
1311 /*
1312 * If missing order rules, add them.
1313 */
1314
1315 return VINF_SUCCESS;
1316}
1317
1318
1319RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
1320 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk)
1321{
1322 /*
1323 * Fend off wild life.
1324 */
1325 PRTLOCKVALRECUNION pRecU = (PRTLOCKVALRECUNION)pRec;
1326 AssertPtrReturn(pRecU, VERR_SEM_LV_INVALID_PARAMETER);
1327 AssertReturn(pRecU->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1328 if (!pRecU->Shared.fEnabled)
1329 return VINF_SUCCESS;
1330
1331 PRTTHREADINT pThreadSelf = hThreadSelf;
1332 AssertPtrReturn(pThreadSelf, VERR_SEM_LV_INVALID_PARAMETER);
1333 AssertReturn(pThreadSelf->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1334 Assert(pThreadSelf == RTThreadSelf());
1335
1336 RTTHREADSTATE enmThreadState = rtThreadGetState(pThreadSelf);
1337 AssertReturn( enmThreadState == RTTHREADSTATE_RUNNING
1338 || enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
1339 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
1340 , VERR_SEM_LV_INVALID_PARAMETER);
1341
1342 /*
1343 * Record the location.
1344 */
1345 rtLockValidatorWriteRecUnionPtr(&pThreadSelf->LockValidator.pRec, pRecU);
1346 rtLockValidatorCopySrcPos(&pThreadSelf->LockValidator.SrcPos, pSrcPos);
1347
1348 /*
1349 * Don't do deadlock detection if we're recursing.
1350 */
1351 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL);
1352 if (pEntry)
1353 {
1354 if (fRecursiveOk)
1355 return VINF_SUCCESS;
1356 rtLockValidatorComplainFirst("Recursion not allowed", pSrcPos, pThreadSelf, pRecU);
1357 rtLockValidatorComplainPanic();
1358 return VERR_SEM_LV_NESTED;
1359 }
1360
1361 /*
1362 * Perform deadlock detection.
1363 */
1364 if (rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
1365 return VINF_SUCCESS;
1366 return rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos);
1367}
1368RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckBlocking);
1369
1370
1371RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk)
1372{
1373 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos);
1374 if (RT_SUCCESS(rc))
1375 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk);
1376 return rc;
1377}
1378RT_EXPORT_SYMBOL(RTLockValidatorRecSharedCheckOrderAndBlocking);
1379
1380
1381/**
1382 * Allocates and initializes an owner entry for the shared lock record.
1383 *
1384 * @returns The new owner entry.
1385 * @param pShared The shared lock record.
1386 * @param pThreadSelf The calling thread and owner. Used for record
1387 * initialization and allocation.
1388 * @param pSrcPos The source position.
1389 */
1390DECLINLINE(PRTLOCKVALRECSHRDOWN)
1391rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRead, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
1392{
1393 PRTLOCKVALRECSHRDOWN pEntry;
1394
1395 /*
1396 * Check if the thread has any statically allocated records we can use.
1397 */
1398 unsigned iEntry = ASMBitFirstSetU32(pThreadSelf->LockValidator.bmFreeShrdOwners);
1399 if (iEntry > 0)
1400 {
1401 iEntry--;
1402 pThreadSelf->LockValidator.bmFreeShrdOwners &= ~RT_BIT_32(iEntry);
1403 pEntry = &pThreadSelf->LockValidator.aShrdOwners[iEntry];
1404 Assert(!pEntry->fReserved);
1405 pEntry->fStaticAlloc = true;
1406 }
1407 else
1408 {
1409 pEntry = (PRTLOCKVALRECSHRDOWN)RTMemAlloc(sizeof(RTLOCKVALRECSHRDOWN));
1410 if (RT_UNLIKELY(!pEntry))
1411 return NULL;
1412 pEntry->fStaticAlloc = false;
1413 }
1414
1415 pEntry->Core.u32Magic = RTLOCKVALRECSHRDOWN_MAGIC;
1416 pEntry->cRecursion = 1;
1417 pEntry->fReserved = true;
1418 pEntry->hThread = pThreadSelf;
1419 pEntry->pDown = NULL;
1420 pEntry->pSharedRec = pRead;
1421#if HC_ARCH_BITS == 32
1422 pEntry->pvReserved = NULL;
1423#endif
1424 if (pSrcPos)
1425 pEntry->SrcPos = *pSrcPos;
1426 else
1427 rtLockValidatorInitSrcPos(&pEntry->SrcPos);
1428 return pEntry;
1429}
1430
1431
1432/**
1433 * Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
1434 *
1435 * @param pEntry The owner entry.
1436 */
1437DECLINLINE(void) rtLockValidatorRecSharedFreeOwner(PRTLOCKVALRECSHRDOWN pEntry)
1438{
1439 if (pEntry)
1440 {
1441 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD);
1442
1443 PRTTHREADINT pThreadSelf = pEntry->hThread;
1444 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThreadSelf);
1445 Assert(pThreadSelf == RTThreadSelf());
1446
1447 Assert(pEntry->fReserved);
1448 pEntry->fReserved = false;
1449
1450 if (pEntry->fStaticAlloc)
1451 {
1452 uintptr_t iEntry = pEntry - &pThreadSelf->LockValidator.aShrdOwners[0];
1453 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThreadSelf->LockValidator.aShrdOwners));
1454 pThreadSelf->LockValidator.bmFreeShrdOwners |= RT_BIT_32(iEntry);
1455 }
1456 else
1457 {
1458 rtLockValidatorSerializeDestructEnter();
1459 rtLockValidatorSerializeDestructLeave();
1460
1461 RTMemFree(pEntry);
1462 }
1463 }
1464}
1465
1466
1467/**
1468 * Make more room in the table.
1469 *
1470 * @retval true on success
1471 * @retval false if we're out of memory or running into a bad race condition
1472 * (probably a bug somewhere). No longer holding the lock.
1473 *
1474 * @param pShared The shared lock record.
1475 */
1476static bool rtLockValidatorRecSharedMakeRoom(PRTLOCKVALRECSHRD pShared)
1477{
1478 for (unsigned i = 0; i < 1000; i++)
1479 {
1480 /*
1481 * Switch to the other data access direction.
1482 */
1483 rtLockValidatorSerializeDetectionLeave();
1484 if (i >= 10)
1485 {
1486 Assert(i != 10 && i != 100);
1487 RTThreadSleep(i >= 100);
1488 }
1489 rtLockValidatorSerializeDestructEnter();
1490
1491 /*
1492 * Try grab the privilege to reallocating the table.
1493 */
1494 if ( pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC
1495 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
1496 {
1497 uint32_t cAllocated = pShared->cAllocated;
1498 if (cAllocated < pShared->cEntries)
1499 {
1500 /*
1501 * Ok, still not enough space. Reallocate the table.
1502 */
1503#if 0 /** @todo enable this after making sure growing works flawlessly. */
1504 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
1505#else
1506 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
1507#endif
1508 PRTLOCKVALRECSHRDOWN *papOwners;
1509 papOwners = (PRTLOCKVALRECSHRDOWN *)RTMemRealloc((void *)pShared->papOwners,
1510 (cAllocated + cInc) * sizeof(void *));
1511 if (!papOwners)
1512 {
1513 ASMAtomicWriteBool(&pShared->fReallocating, false);
1514 rtLockValidatorSerializeDestructLeave();
1515 /* RTMemRealloc will assert */
1516 return false;
1517 }
1518
1519 while (cInc-- > 0)
1520 {
1521 papOwners[cAllocated] = NULL;
1522 cAllocated++;
1523 }
1524
1525 ASMAtomicWritePtr((void * volatile *)&pShared->papOwners, papOwners);
1526 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
1527 }
1528 ASMAtomicWriteBool(&pShared->fReallocating, false);
1529 }
1530 rtLockValidatorSerializeDestructLeave();
1531
1532 rtLockValidatorSerializeDetectionEnter();
1533 if (RT_UNLIKELY(pShared->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC))
1534 break;
1535
1536 if (pShared->cAllocated >= pShared->cEntries)
1537 return true;
1538 }
1539
1540 rtLockValidatorSerializeDetectionLeave();
1541 AssertFailed(); /* too many iterations or destroyed while racing. */
1542 return false;
1543}
1544
1545
1546/**
1547 * Adds an owner entry to a shared lock record.
1548 *
1549 * @returns true on success, false on serious race or we're if out of memory.
1550 * @param pShared The shared lock record.
1551 * @param pEntry The owner entry.
1552 */
1553DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
1554{
1555 rtLockValidatorSerializeDetectionEnter();
1556 if (RT_LIKELY(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC)) /* paranoia */
1557 {
1558 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
1559 && !rtLockValidatorRecSharedMakeRoom(pShared))
1560 return false; /* the worker leave the lock */
1561
1562 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
1563 uint32_t const cMax = pShared->cAllocated;
1564 for (unsigned i = 0; i < 100; i++)
1565 {
1566 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
1567 {
1568 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], pEntry, NULL))
1569 {
1570 rtLockValidatorSerializeDetectionLeave();
1571 return true;
1572 }
1573 }
1574 Assert(i != 25);
1575 }
1576 AssertFailed();
1577 }
1578 rtLockValidatorSerializeDetectionLeave();
1579 return false;
1580}
1581
1582
1583/**
1584 * Remove an owner entry from a shared lock record and free it.
1585 *
1586 * @param pShared The shared lock record.
1587 * @param pEntry The owner entry to remove.
1588 * @param iEntry The last known index.
1589 */
1590DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
1591 uint32_t iEntry)
1592{
1593 /*
1594 * Remove it from the table.
1595 */
1596 rtLockValidatorSerializeDetectionEnter();
1597 AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
1598 if (RT_UNLIKELY( iEntry >= pShared->cAllocated
1599 || !ASMAtomicCmpXchgPtr((void * volatile *)&pShared->papOwners[iEntry], NULL, pEntry)))
1600 {
1601 /* this shouldn't happen yet... */
1602 AssertFailed();
1603 PRTLOCKVALRECSHRDOWN volatile *papOwners = pShared->papOwners;
1604 uint32_t const cMax = pShared->cAllocated;
1605 for (iEntry = 0; iEntry < cMax; iEntry++)
1606 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], NULL, pEntry))
1607 break;
1608 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
1609 }
1610 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
1611 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
1612 rtLockValidatorSerializeDetectionLeave();
1613
1614 /*
1615 * Successfully removed, now free it.
1616 */
1617 rtLockValidatorRecSharedFreeOwner(pEntry);
1618}
1619
1620
1621RTDECL(void) RTLockValidatorSharedRecAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
1622{
1623 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
1624 if (!pRec->fEnabled)
1625 return;
1626 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
1627 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
1628 Assert(hThreadSelf == RTThreadSelf());
1629
1630 /*
1631 * Recursive?
1632 *
1633 * Note! This code can be optimized to try avoid scanning the table on
1634 * insert. However, that's annoying work that makes the code big,
1635 * so it can wait til later sometime.
1636 */
1637 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, NULL);
1638 if (pEntry)
1639 {
1640 pEntry->cRecursion++;
1641 return;
1642 }
1643
1644 /*
1645 * Allocate a new owner entry and insert it into the table.
1646 */
1647 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThreadSelf, pSrcPos);
1648 if ( pEntry
1649 && !rtLockValidatorRecSharedAddOwner(pRec, pEntry))
1650 rtLockValidatorRecSharedFreeOwner(pEntry);
1651}
1652RT_EXPORT_SYMBOL(RTLockValidatorSharedRecAddOwner);
1653
1654
1655RTDECL(void) RTLockValidatorSharedRecRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)
1656{
1657 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC);
1658 if (!pRec->fEnabled)
1659 return;
1660 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD);
1661 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC);
1662 Assert(hThreadSelf == RTThreadSelf());
1663
1664 /*
1665 * Find the entry hope it's a recursive one.
1666 */
1667 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */
1668 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry);
1669 AssertReturnVoid(pEntry);
1670 if (pEntry->cRecursion > 1)
1671 pEntry->cRecursion--;
1672 else
1673 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, pEntry, iEntry);
1674}
1675RT_EXPORT_SYMBOL(RTLockValidatorSharedRecRemoveOwner);
1676
1677
1678RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRead, RTTHREAD hThreadSelf)
1679{
1680 AssertReturn(pRead->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
1681 if (!pRead->fEnabled)
1682 return VINF_SUCCESS;
1683 if (hThreadSelf == NIL_RTTHREAD)
1684 {
1685 hThreadSelf = RTThreadSelfAutoAdopt();
1686 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
1687 }
1688 Assert(hThreadSelf == RTThreadSelf());
1689
1690 /*
1691 * Locate the entry for this thread in the table.
1692 */
1693 uint32_t iEntry = 0;
1694 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRead, hThreadSelf, &iEntry);
1695 AssertReturn(pEntry, VERR_SEM_LV_NOT_OWNER);
1696
1697 /*
1698 * Check the release order.
1699 */
1700 if (pRead->hClass != NIL_RTLOCKVALIDATORCLASS)
1701 {
1702 /** @todo order validation */
1703 }
1704
1705 /*
1706 * Release the ownership or unwind a level of recursion.
1707 */
1708 Assert(pEntry->cRecursion > 0);
1709 if (pEntry->cRecursion > 1)
1710 pEntry->cRecursion--;
1711 else
1712 rtLockValidatorRecSharedRemoveAndFreeOwner(pRead, pEntry, iEntry);
1713
1714 return VINF_SUCCESS;
1715}
1716
1717
1718RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
1719{
1720 if (Thread == NIL_RTTHREAD)
1721 return 0;
1722
1723 PRTTHREADINT pThread = rtThreadGet(Thread);
1724 if (!pThread)
1725 return VERR_INVALID_HANDLE;
1726 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
1727 rtThreadRelease(pThread);
1728 return cWriteLocks;
1729}
1730RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
1731
1732
1733RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
1734{
1735 PRTTHREADINT pThread = rtThreadGet(Thread);
1736 AssertReturnVoid(pThread);
1737 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
1738 rtThreadRelease(pThread);
1739}
1740RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
1741
1742
1743RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
1744{
1745 PRTTHREADINT pThread = rtThreadGet(Thread);
1746 AssertReturnVoid(pThread);
1747 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
1748 rtThreadRelease(pThread);
1749}
1750RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
1751
1752
1753RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
1754{
1755 if (Thread == NIL_RTTHREAD)
1756 return 0;
1757
1758 PRTTHREADINT pThread = rtThreadGet(Thread);
1759 if (!pThread)
1760 return VERR_INVALID_HANDLE;
1761 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
1762 rtThreadRelease(pThread);
1763 return cReadLocks;
1764}
1765RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
1766
1767
1768RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
1769{
1770 PRTTHREADINT pThread = rtThreadGet(Thread);
1771 Assert(pThread);
1772 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
1773 rtThreadRelease(pThread);
1774}
1775RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
1776
1777
1778RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
1779{
1780 PRTTHREADINT pThread = rtThreadGet(Thread);
1781 Assert(pThread);
1782 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
1783 rtThreadRelease(pThread);
1784}
1785RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
1786
1787
1788RTDECL(void *) RTLockValidatorQueryBlocking(RTTHREAD hThread)
1789{
1790 void *pvLock = NULL;
1791 PRTTHREADINT pThread = rtThreadGet(hThread);
1792 if (pThread)
1793 {
1794 RTTHREADSTATE enmState = rtThreadGetState(pThread);
1795 if (RTTHREAD_IS_SLEEPING(enmState))
1796 {
1797 rtLockValidatorSerializeDetectionEnter();
1798
1799 enmState = rtThreadGetState(pThread);
1800 if (RTTHREAD_IS_SLEEPING(enmState))
1801 {
1802 PRTLOCKVALRECUNION pRec = rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec);
1803 if (pRec)
1804 {
1805 switch (pRec->Core.u32Magic)
1806 {
1807 case RTLOCKVALRECEXCL_MAGIC:
1808 pvLock = pRec->Excl.hLock;
1809 break;
1810
1811 case RTLOCKVALRECSHRDOWN_MAGIC:
1812 pRec = (PRTLOCKVALRECUNION)pRec->ShrdOwner.pSharedRec;
1813 if (!pRec || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC)
1814 break;
1815 case RTLOCKVALRECSHRD_MAGIC:
1816 pvLock = pRec->Shared.hLock;
1817 break;
1818 }
1819 if (RTThreadGetState(pThread) != enmState)
1820 pvLock = NULL;
1821 }
1822 }
1823
1824 rtLockValidatorSerializeDetectionLeave();
1825 }
1826 rtThreadRelease(pThread);
1827 }
1828 return pvLock;
1829}
1830RT_EXPORT_SYMBOL(RTLockValidatorQueryBlocking);
1831
1832
1833RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
1834{
1835 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
1836}
1837RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
1838
1839
1840RTDECL(bool) RTLockValidatorIsEnabled(void)
1841{
1842 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
1843}
1844RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
1845
1846
1847RTDECL(bool) RTLockValidatorSetQuiet(bool fQuiet)
1848{
1849 return ASMAtomicXchgBool(&g_fLockValidatorQuiet, fQuiet);
1850}
1851RT_EXPORT_SYMBOL(RTLockValidatorSetQuiet);
1852
1853
1854RTDECL(bool) RTLockValidatorAreQuiet(void)
1855{
1856 return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
1857}
1858RT_EXPORT_SYMBOL(RTLockValidatorAreQuiet);
1859
1860
1861RTDECL(bool) RTLockValidatorSetMayPanic(bool fMayPanic)
1862{
1863 return ASMAtomicXchgBool(&g_fLockValidatorMayPanic, fMayPanic);
1864}
1865RT_EXPORT_SYMBOL(RTLockValidatorSetMayPanic);
1866
1867
1868RTDECL(bool) RTLockValidatorMayPanic(void)
1869{
1870 return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
1871}
1872RT_EXPORT_SYMBOL(RTLockValidatorMayPanic);
1873
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette