VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp@ 25524

Last change on this file since 25524 was 25519, checked in by vboxsync, 15 years ago

iprt/lockvalidator.h: Added a enabled setting to assist testing.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 38.1 KB
Line 
1/* $Id: lockvalidator.cpp 25519 2009-12-20 16:41:19Z vboxsync $ */
2/** @file
3 * IPRT - Lock Validator.
4 */
5
6/*
7 * Copyright (C) 2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include <iprt/lockvalidator.h>
35#include "internal/iprt.h"
36
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/err.h>
40#include <iprt/mem.h>
41#include <iprt/once.h>
42#include <iprt/semaphore.h>
43#include <iprt/thread.h>
44
45#include "internal/lockvalidator.h"
46#include "internal/magics.h"
47#include "internal/thread.h"
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53
54
55/*******************************************************************************
56* Global Variables *
57*******************************************************************************/
58/** Serializing object destruction and deadlock detection.
59 * NS: RTLOCKVALIDATORREC and RTTHREADINT destruction.
60 * EW: Deadlock detection.
61 */
62static RTSEMXROADS g_hLockValidatorXRoads = NIL_RTSEMXROADS;
63/** Whether the lock validator is enabled or disabled.
64 * Only applies to new locks. */
65static bool volatile g_fLockValidatorEnabled = true;
66
67
68/**
69 * Copy a source position record.
70 *
71 * @param pDst The destination.
72 * @param pSrc The source.
73 */
74DECL_FORCE_INLINE(void) rtLockValidatorCopySrcPos(PRTLOCKVALIDATORSRCPOS pDst, PCRTLOCKVALIDATORSRCPOS pSrc)
75{
76 ASMAtomicUoWriteU32(&pDst->uLine, pSrc->uLine);
77 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFile, pSrc->pszFile);
78 ASMAtomicUoWritePtr((void * volatile *)&pDst->pszFunction, pSrc->pszFunction);
79 ASMAtomicUoWritePtr((void * volatile *)&pDst->uId, (void *)pSrc->uId);
80}
81
82
83/**
84 * Init a source position record.
85 *
86 * @param pSrcPos The source position record.
87 */
88DECL_FORCE_INLINE(void) rtLockValidatorInitSrcPos(PRTLOCKVALIDATORSRCPOS pSrcPos)
89{
90 pSrcPos->pszFile = NULL;
91 pSrcPos->pszFunction = NULL;
92 pSrcPos->uId = 0;
93 pSrcPos->uLine = 0;
94#if HC_ARCH_BITS == 64
95 pSrcPos->u32Padding = 0;
96#endif
97}
98
99
100/**
101 * Serializes destruction of RTLOCKVALIDATORREC and RTTHREADINT structures.
102 */
103DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
104{
105 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
106 if (hXRoads != NIL_RTSEMXROADS)
107 RTSemXRoadsNSEnter(hXRoads);
108}
109
110
111/**
112 * Call after rtLockValidatorSerializeDestructEnter.
113 */
114DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
115{
116 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
117 if (hXRoads != NIL_RTSEMXROADS)
118 RTSemXRoadsNSLeave(hXRoads);
119}
120
121
122/**
123 * Serializes deadlock detection against destruction of the objects being
124 * inspected.
125 */
126DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
127{
128 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
129 if (hXRoads != NIL_RTSEMXROADS)
130 RTSemXRoadsEWEnter(hXRoads);
131}
132
133
134/**
135 * Call after rtLockValidatorSerializeDetectionEnter.
136 */
137DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
138{
139 RTSEMXROADS hXRoads = g_hLockValidatorXRoads;
140 if (hXRoads != NIL_RTSEMXROADS)
141 RTSemXRoadsEWLeave(hXRoads);
142}
143
144
145RTDECL(void) RTLockValidatorRecInit(PRTLOCKVALIDATORREC pRec, RTLOCKVALIDATORCLASS hClass,
146 uint32_t uSubClass, const char *pszName, void *hLock)
147{
148 pRec->u32Magic = RTLOCKVALIDATORREC_MAGIC;
149 pRec->fEnabled = RTLockValidatorIsEnabled();
150 pRec->afReserved[0] = 0;
151 pRec->afReserved[1] = 0;
152 pRec->afReserved[2] = 0;
153 rtLockValidatorInitSrcPos(&pRec->SrcPos);
154 pRec->hThread = NIL_RTTHREAD;
155 pRec->pDown = NULL;
156 pRec->hClass = hClass;
157 pRec->uSubClass = uSubClass;
158 pRec->cRecursion = 0;
159 pRec->hLock = hLock;
160 pRec->pszName = pszName;
161
162 /* Lazily initialize the crossroads semaphore. */
163 static uint32_t volatile s_fInitializing = false;
164 if (RT_UNLIKELY( g_hLockValidatorXRoads == NIL_RTSEMXROADS
165 && ASMAtomicCmpXchgU32(&s_fInitializing, true, false)))
166 {
167 RTSEMXROADS hXRoads;
168 int rc = RTSemXRoadsCreate(&hXRoads);
169 if (RT_SUCCESS(rc))
170 ASMAtomicWriteHandle(&g_hLockValidatorXRoads, hXRoads);
171 ASMAtomicWriteU32(&s_fInitializing, false);
172 }
173}
174
175
176RTDECL(int) RTLockValidatorRecCreate(PRTLOCKVALIDATORREC *ppRec, RTLOCKVALIDATORCLASS hClass,
177 uint32_t uSubClass, const char *pszName, void *pvLock)
178{
179 PRTLOCKVALIDATORREC pRec;
180 *ppRec = pRec = (PRTLOCKVALIDATORREC)RTMemAlloc(sizeof(*pRec));
181 if (!pRec)
182 return VERR_NO_MEMORY;
183
184 RTLockValidatorRecInit(pRec, hClass, uSubClass, pszName, pvLock);
185
186 return VINF_SUCCESS;
187}
188
189
190RTDECL(void) RTLockValidatorRecDelete(PRTLOCKVALIDATORREC pRec)
191{
192 Assert(pRec->u32Magic == RTLOCKVALIDATORREC_MAGIC);
193
194 rtLockValidatorSerializeDestructEnter();
195
196 ASMAtomicWriteU32(&pRec->u32Magic, RTLOCKVALIDATORREC_MAGIC_DEAD);
197 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
198 ASMAtomicWriteHandle(&pRec->hClass, NIL_RTLOCKVALIDATORCLASS);
199
200 rtLockValidatorSerializeDestructLeave();
201}
202
203
204RTDECL(void) RTLockValidatorRecDestroy(PRTLOCKVALIDATORREC *ppRec)
205{
206 PRTLOCKVALIDATORREC pRec = *ppRec;
207 *ppRec = NULL;
208 if (pRec)
209 {
210 RTLockValidatorRecDelete(pRec);
211 RTMemFree(pRec);
212 }
213}
214
215
216RTDECL(void) RTLockValidatorSharedRecInit(PRTLOCKVALIDATORSHARED pRec, RTLOCKVALIDATORCLASS hClass,
217 uint32_t uSubClass, const char *pszName, void *hLock)
218{
219 pRec->u32Magic = RTLOCKVALIDATORSHARED_MAGIC;
220 pRec->uSubClass = uSubClass;
221 pRec->hClass = hClass;
222 pRec->hLock = hLock;
223 pRec->pszName = pszName;
224 pRec->fEnabled = RTLockValidatorIsEnabled();
225
226 /* the table */
227 pRec->cEntries = 0;
228 pRec->iLastEntry = 0;
229 pRec->cAllocated = 0;
230 pRec->fReallocating = false;
231 pRec->afPadding[0] = false;
232 pRec->afPadding[1] = false;
233 pRec->papOwners = NULL;
234 pRec->u64Alignment = UINT64_MAX;
235}
236
237
238RTDECL(void) RTLockValidatorSharedRecDelete(PRTLOCKVALIDATORSHARED pRec)
239{
240 Assert(pRec->u32Magic == RTLOCKVALIDATORSHARED_MAGIC);
241
242 /*
243 * Flip it into table realloc mode and take the destruction lock.
244 */
245 rtLockValidatorSerializeDestructEnter();
246 while (!ASMAtomicCmpXchgBool(&pRec->fReallocating, true, false))
247 {
248 rtLockValidatorSerializeDestructLeave();
249
250 rtLockValidatorSerializeDetectionEnter();
251 rtLockValidatorSerializeDetectionLeave();
252
253 rtLockValidatorSerializeDestructEnter();
254 }
255
256 ASMAtomicWriteU32(&pRec->u32Magic, RTLOCKVALIDATORSHARED_MAGIC_DEAD);
257 ASMAtomicUoWriteHandle(&pRec->hClass, NIL_RTLOCKVALIDATORCLASS);
258 if (pRec->papOwners)
259 {
260 PRTLOCKVALIDATORSHAREDONE volatile *papOwners = pRec->papOwners;
261 ASMAtomicUoWritePtr((void * volatile *)&pRec->papOwners, NULL);
262 ASMAtomicUoWriteU32(&pRec->cAllocated, 0);
263
264 RTMemFree((void *)pRec->papOwners);
265 }
266 ASMAtomicWriteBool(&pRec->fReallocating, false);
267
268 rtLockValidatorSerializeDestructLeave();
269}
270
271
272/**
273 * Locates a thread in a shared lock record.
274 *
275 * @returns Pointer to the thread record on success, NULL on failure..
276 * @param pShared The shared lock record.
277 * @param hThread The thread to find.
278 * @param piEntry Where to optionally return the table in index.
279 */
280DECLINLINE(PRTLOCKVALIDATORSHAREDONE)
281rtLockValidatorSharedRecFindThread(PRTLOCKVALIDATORSHARED pShared, RTTHREAD hThread, uint32_t *piEntry)
282{
283 rtLockValidatorSerializeDetectionEnter();
284 if (pShared->papOwners)
285 {
286 PRTLOCKVALIDATORSHAREDONE volatile *papOwners = pShared->papOwners;
287 uint32_t const cMax = pShared->cAllocated;
288 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
289 {
290 PRTLOCKVALIDATORSHAREDONE pEntry;
291 pEntry = (PRTLOCKVALIDATORSHAREDONE)ASMAtomicUoReadPtr((void * volatile *)&papOwners[iEntry]);
292 if (pEntry && pEntry->hThread == hThread)
293 {
294 rtLockValidatorSerializeDetectionLeave();
295 if (piEntry)
296 *piEntry = iEntry;
297 return pEntry;
298 }
299 }
300 }
301 rtLockValidatorSerializeDetectionLeave();
302 return NULL;
303}
304
305
306/**
307 * Allocates and initializes a thread entry for the shared lock record.
308 *
309 * @returns The new thread entry.
310 * @param pShared The shared lock record.
311 * @param hThread The thread handle.
312 * @param pSrcPos The source position.
313 */
314DECLINLINE(PRTLOCKVALIDATORSHAREDONE)
315rtLockValidatorSharedRecAllocThread(PRTLOCKVALIDATORSHARED pRead, RTTHREAD hThread, PCRTLOCKVALIDATORSRCPOS pSrcPos)
316{
317 PRTLOCKVALIDATORSHAREDONE pEntry;
318
319 pEntry = (PRTLOCKVALIDATORSHAREDONE)RTMemAlloc(sizeof(RTLOCKVALIDATORSHAREDONE));
320 if (pEntry)
321 {
322 pEntry->u32Magic = RTLOCKVALIDATORSHAREDONE_MAGIC;
323 pEntry->cRecursion = 1;
324 pEntry->hThread = hThread;
325 pEntry->pDown = NULL;
326 pEntry->pSharedRec = pRead;
327#if HC_ARCH_BITS == 32
328 pEntry->pvReserved = NULL;
329#endif
330 if (pSrcPos)
331 pEntry->SrcPos = *pSrcPos;
332 else
333 rtLockValidatorInitSrcPos(&pEntry->SrcPos);
334 }
335
336 return pEntry;
337}
338
339/**
340 * Frees a thread entry allocated by rtLockValidatorSharedRecAllocThread.
341 *
342 * @param pEntry The thread entry.
343 */
344DECLINLINE(void) rtLockValidatorSharedRecFreeThread(PRTLOCKVALIDATORSHAREDONE pEntry)
345{
346 if (pEntry)
347 {
348 rtLockValidatorSerializeDestructEnter();
349 ASMAtomicWriteU32(&pEntry->u32Magic, RTLOCKVALIDATORSHAREDONE_MAGIC_DEAD);
350 ASMAtomicWriteHandle(&pEntry->hThread, NIL_RTTHREAD);
351 rtLockValidatorSerializeDestructLeave();
352
353 RTMemFree(pEntry);
354 }
355}
356
357
358/**
359 * Make more room in the table.
360 *
361 * @retval true on success
362 * @retval false if we're out of memory or running into a bad race condition
363 * (probably a bug somewhere). No longer holding the lock.
364 *
365 * @param pShared The shared lock record.
366 */
367static bool rtLockValidatorSharedRecMakeRoom(PRTLOCKVALIDATORSHARED pShared)
368{
369 for (unsigned i = 0; i < 1000; i++)
370 {
371 /*
372 * Switch to the other data access direction.
373 */
374 rtLockValidatorSerializeDetectionLeave();
375 if (i >= 10)
376 {
377 Assert(i != 10 && i != 100);
378 RTThreadSleep(i >= 100);
379 }
380 rtLockValidatorSerializeDestructEnter();
381
382 /*
383 * Try grab the privilege to reallocating the table.
384 */
385 if ( pShared->u32Magic == RTLOCKVALIDATORSHARED_MAGIC
386 && ASMAtomicCmpXchgBool(&pShared->fReallocating, true, false))
387 {
388 uint32_t cAllocated = pShared->cAllocated;
389 if (cAllocated < pShared->cEntries)
390 {
391 /*
392 * Ok, still not enough space. Reallocate the table.
393 */
394#if 0 /** @todo enable this after making sure growing works flawlessly. */
395 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 16);
396#else
397 uint32_t cInc = RT_ALIGN_32(pShared->cEntries - cAllocated, 1);
398#endif
399 PRTLOCKVALIDATORSHAREDONE *papOwners;
400 papOwners = (PRTLOCKVALIDATORSHAREDONE *)RTMemRealloc((void *)pShared->papOwners,
401 (cAllocated + cInc) * sizeof(void *));
402 if (!papOwners)
403 {
404 ASMAtomicWriteBool(&pShared->fReallocating, false);
405 rtLockValidatorSerializeDestructLeave();
406 /* RTMemRealloc will assert */
407 return false;
408 }
409
410 while (cInc-- > 0)
411 {
412 papOwners[cAllocated] = NULL;
413 cAllocated++;
414 }
415
416 ASMAtomicWritePtr((void * volatile *)&pShared->papOwners, papOwners);
417 ASMAtomicWriteU32(&pShared->cAllocated, cAllocated);
418 }
419 ASMAtomicWriteBool(&pShared->fReallocating, false);
420 }
421 rtLockValidatorSerializeDestructLeave();
422
423 rtLockValidatorSerializeDetectionEnter();
424 if (RT_UNLIKELY(pShared->u32Magic != RTLOCKVALIDATORSHARED_MAGIC))
425 break;
426
427 if (pShared->cAllocated >= pShared->cEntries)
428 return true;
429 }
430
431 rtLockValidatorSerializeDetectionLeave();
432 AssertFailed(); /* too many iterations or destroyed while racing. */
433 return false;
434}
435
436
437/**
438 * Adds a thread entry to a shared lock record.
439 *
440 * @returns true on success, false on serious race or we're if out of memory.
441 * @param pShared The shared lock record.
442 * @param pEntry The thread entry.
443 */
444DECLINLINE(bool) rtLockValidatorSharedRecAddThread(PRTLOCKVALIDATORSHARED pShared, PRTLOCKVALIDATORSHAREDONE pEntry)
445{
446 rtLockValidatorSerializeDetectionEnter();
447 if (RT_LIKELY(pShared->u32Magic == RTLOCKVALIDATORSHARED_MAGIC)) /* paranoia */
448 {
449 if ( ASMAtomicIncU32(&pShared->cEntries) > pShared->cAllocated /** @todo add fudge */
450 && !rtLockValidatorSharedRecMakeRoom(pShared))
451 return false; /* the worker leave the lock */
452
453 PRTLOCKVALIDATORSHAREDONE volatile *papOwners = pShared->papOwners;
454 uint32_t const cMax = pShared->cAllocated;
455 for (unsigned i = 0; i < 100; i++)
456 {
457 for (uint32_t iEntry = 0; iEntry < cMax; iEntry++)
458 {
459 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], pEntry, NULL))
460 {
461 rtLockValidatorSerializeDetectionLeave();
462 return true;
463 }
464 }
465 Assert(i != 25);
466 }
467 AssertFailed();
468 }
469 rtLockValidatorSerializeDetectionLeave();
470 return false;
471}
472
473
474/**
475 * Remove a thread entry from a shared lock record.
476 *
477 * @param pShared The shared lock record.
478 * @param pEntry The thread entry to remove.
479 * @param iEntry The last known index.
480 */
481DECLINLINE(void) rtLockValidatorSharedRecDelete(PRTLOCKVALIDATORSHARED pShared, PRTLOCKVALIDATORSHAREDONE pEntry,
482 uint32_t iEntry)
483{
484 rtLockValidatorSerializeDetectionEnter();
485 if (RT_LIKELY(pShared->u32Magic == RTLOCKVALIDATORSHARED_MAGIC))
486 {
487 if ( iEntry >= pShared->cAllocated
488 || !ASMAtomicCmpXchgPtr((void * volatile *)&pShared->papOwners[iEntry], NULL, pEntry))
489 {
490 /* this shouldn't happen yet... */
491 AssertFailed();
492 PRTLOCKVALIDATORSHAREDONE volatile *papOwners = pShared->papOwners;
493 uint32_t const cMax = pShared->cAllocated;
494 for (iEntry = 0; iEntry < cMax; iEntry++)
495 if (ASMAtomicCmpXchgPtr((void * volatile *)&papOwners[iEntry], NULL, pEntry))
496 break;
497 AssertReturnVoidStmt(iEntry < cMax, rtLockValidatorSerializeDetectionLeave());
498 }
499 uint32_t cNow = ASMAtomicDecU32(&pShared->cEntries);
500 Assert(!(cNow & RT_BIT_32(31))); NOREF(cNow);
501 }
502 rtLockValidatorSerializeDetectionLeave();
503}
504
505
506RTDECL(int) RTLockValidatorCheckOrder(PRTLOCKVALIDATORREC pRec, RTTHREAD hThread, PCRTLOCKVALIDATORSRCPOS pSrcPos)
507{
508 AssertReturn(pRec->u32Magic == RTLOCKVALIDATORREC_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
509 if (!pRec->fEnabled)
510 return VINF_SUCCESS;
511
512 /*
513 * Check it locks we're currently holding.
514 */
515 /** @todo later */
516
517 /*
518 * If missing order rules, add them.
519 */
520
521 return VINF_SUCCESS;
522}
523
524
525RTDECL(int) RTLockValidatorCheckAndRelease(PRTLOCKVALIDATORREC pRec)
526{
527 AssertReturn(pRec->u32Magic == RTLOCKVALIDATORREC_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
528 if (!pRec->fEnabled)
529 return VINF_SUCCESS;
530 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
531
532 RTLockValidatorUnsetOwner(pRec);
533 return VINF_SUCCESS;
534}
535
536
537RTDECL(int) RTLockValidatorCheckAndReleaseReadOwner(PRTLOCKVALIDATORSHARED pRead, RTTHREAD hThread)
538{
539 AssertReturn(pRead->u32Magic == RTLOCKVALIDATORSHARED_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
540 if (!pRead->fEnabled)
541 return VINF_SUCCESS;
542 AssertReturn(hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
543
544 /*
545 * Locate the entry for this thread in the table.
546 */
547 uint32_t iEntry = 0;
548 PRTLOCKVALIDATORSHAREDONE pEntry = rtLockValidatorSharedRecFindThread(pRead, hThread, &iEntry);
549 AssertReturn(pEntry, VERR_SEM_LV_NOT_OWNER);
550
551 /*
552 * Check the release order.
553 */
554 if (pRead->hClass != NIL_RTLOCKVALIDATORCLASS)
555 {
556 /** @todo order validation */
557 }
558
559 /*
560 * Release the ownership or unwind a level of recursion.
561 */
562 Assert(pEntry->cRecursion > 0);
563 if (pEntry->cRecursion > 1)
564 pEntry->cRecursion--;
565 else
566 rtLockValidatorSharedRecDelete(pRead, pEntry, iEntry);
567
568 return VINF_SUCCESS;
569}
570
571
572RTDECL(int) RTLockValidatorRecordRecursion(PRTLOCKVALIDATORREC pRec, PCRTLOCKVALIDATORSRCPOS pSrcPos)
573{
574 AssertReturn(pRec->u32Magic == RTLOCKVALIDATORREC_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
575 if (!pRec->fEnabled)
576 return VINF_SUCCESS;
577 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
578
579 Assert(pRec->cRecursion < _1M);
580 pRec->cRecursion++;
581
582 return VINF_SUCCESS;
583}
584
585
586RTDECL(int) RTLockValidatorUnwindRecursion(PRTLOCKVALIDATORREC pRec)
587{
588 AssertReturn(pRec->u32Magic == RTLOCKVALIDATORREC_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
589 if (!pRec->fEnabled)
590 return VINF_SUCCESS;
591 AssertReturn(pRec->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
592 AssertReturn(pRec->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
593
594 Assert(pRec->cRecursion);
595 pRec->cRecursion--;
596 return VINF_SUCCESS;
597}
598
599
600RTDECL(int) RTLockValidatorRecordReadWriteRecursion(PRTLOCKVALIDATORREC pWrite, PRTLOCKVALIDATORSHARED pRead, PCRTLOCKVALIDATORSRCPOS pSrcPos)
601{
602 AssertReturn(pWrite->u32Magic == RTLOCKVALIDATORREC_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
603 AssertReturn(pRead->u32Magic == RTLOCKVALIDATORSHARED_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
604 AssertReturn(pRead->fEnabled == pWrite->fEnabled, VERR_SEM_LV_INVALID_PARAMETER);
605 if (!pWrite->fEnabled)
606 return VINF_SUCCESS;
607 AssertReturn(pWrite->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
608 AssertReturn(pWrite->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
609
610 Assert(pWrite->cRecursion < _1M);
611 pWrite->cRecursion++;
612
613 return VINF_SUCCESS;
614}
615
616
617RTDECL(int) RTLockValidatorUnwindReadWriteRecursion(PRTLOCKVALIDATORREC pWrite, PRTLOCKVALIDATORSHARED pRead)
618{
619 AssertReturn(pWrite->u32Magic == RTLOCKVALIDATORREC_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
620 AssertReturn(pRead->u32Magic == RTLOCKVALIDATORSHARED_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
621 AssertReturn(pRead->fEnabled == pWrite->fEnabled, VERR_SEM_LV_INVALID_PARAMETER);
622 if (!pWrite->fEnabled)
623 return VINF_SUCCESS;
624 AssertReturn(pWrite->hThread != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER);
625 AssertReturn(pWrite->cRecursion > 0, VERR_SEM_LV_INVALID_PARAMETER);
626
627 Assert(pWrite->cRecursion);
628 pWrite->cRecursion--;
629 return VINF_SUCCESS;
630}
631
632
633RTDECL(RTTHREAD) RTLockValidatorSetOwner(PRTLOCKVALIDATORREC pRec, RTTHREAD hThread, PCRTLOCKVALIDATORSRCPOS pSrcPos)
634{
635 AssertReturn(pRec->u32Magic == RTLOCKVALIDATORREC_MAGIC, NIL_RTTHREAD);
636 if (!pRec->fEnabled)
637 return VINF_SUCCESS;
638 if (hThread == NIL_RTTHREAD)
639 {
640 hThread = RTThreadSelfAutoAdopt();
641 AssertReturn(hThread != NIL_RTTHREAD, hThread);
642 }
643
644 ASMAtomicIncS32(&hThread->LockValidator.cWriteLocks);
645
646 if (pRec->hThread == hThread)
647 pRec->cRecursion++;
648 else
649 {
650 Assert(pRec->hThread == NIL_RTTHREAD);
651
652 /*
653 * Update the record.
654 */
655 rtLockValidatorCopySrcPos(&pRec->SrcPos, pSrcPos);
656 ASMAtomicUoWriteU32(&pRec->cRecursion, 1);
657 ASMAtomicWriteHandle(&pRec->hThread, hThread);
658
659 /*
660 * Push the lock onto the lock stack.
661 */
662 /** @todo push it onto the per-thread lock stack. */
663 }
664
665 return hThread;
666}
667
668
669RTDECL(RTTHREAD) RTLockValidatorUnsetOwner(PRTLOCKVALIDATORREC pRec)
670{
671 AssertReturn(pRec->u32Magic == RTLOCKVALIDATORREC_MAGIC, NIL_RTTHREAD);
672 if (!pRec->fEnabled)
673 return VINF_SUCCESS;
674 RTTHREADINT *pThread = pRec->hThread;
675 AssertReturn(pThread != NIL_RTTHREAD, pThread);
676
677 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
678
679 if (ASMAtomicDecU32(&pRec->cRecursion) == 0)
680 {
681 /*
682 * Pop (remove) the lock.
683 */
684 /** @todo remove it from the per-thread stack/whatever. */
685
686 /*
687 * Update the record.
688 */
689 ASMAtomicWriteHandle(&pRec->hThread, NIL_RTTHREAD);
690 }
691
692 return pThread;
693}
694
695
696RTDECL(void) RTLockValidatorAddReadOwner(PRTLOCKVALIDATORSHARED pRead, RTTHREAD hThread, PCRTLOCKVALIDATORSRCPOS pSrcPos)
697{
698 AssertReturnVoid(pRead->u32Magic == RTLOCKVALIDATORSHARED_MAGIC);
699 if (!pRead->fEnabled)
700 return;
701 AssertReturnVoid(hThread != NIL_RTTHREAD);
702
703 /*
704 * Recursive?
705 *
706 * Note! This code can be optimized to try avoid scanning the table on
707 * insert. However, that's annoying work that makes the code big,
708 * so it can wait til later sometime.
709 */
710 PRTLOCKVALIDATORSHAREDONE pEntry = rtLockValidatorSharedRecFindThread(pRead, hThread, NULL);
711 if (pEntry)
712 {
713 pEntry->cRecursion++;
714 return;
715 }
716
717 /*
718 * Allocate a new thread entry and insert it into the table.
719 */
720 pEntry = rtLockValidatorSharedRecAllocThread(pRead, hThread, pSrcPos);
721 if ( pEntry
722 && !rtLockValidatorSharedRecAddThread(pRead, pEntry))
723 rtLockValidatorSharedRecFreeThread(pEntry);
724}
725
726
727RTDECL(void) RTLockValidatorRemoveReadOwner(PRTLOCKVALIDATORSHARED pRead, RTTHREAD hThread)
728{
729 AssertReturnVoid(pRead->u32Magic == RTLOCKVALIDATORSHARED_MAGIC);
730 if (!pRead->fEnabled)
731 return;
732 AssertReturnVoid(hThread != NIL_RTTHREAD);
733
734 AssertMsgFailed(("Not implemented"));
735}
736
737
738RTDECL(int32_t) RTLockValidatorWriteLockGetCount(RTTHREAD Thread)
739{
740 if (Thread == NIL_RTTHREAD)
741 return 0;
742
743 PRTTHREADINT pThread = rtThreadGet(Thread);
744 if (!pThread)
745 return VERR_INVALID_HANDLE;
746 int32_t cWriteLocks = ASMAtomicReadS32(&pThread->LockValidator.cWriteLocks);
747 rtThreadRelease(pThread);
748 return cWriteLocks;
749}
750RT_EXPORT_SYMBOL(RTLockValidatorWriteLockGetCount);
751
752
753RTDECL(void) RTLockValidatorWriteLockInc(RTTHREAD Thread)
754{
755 PRTTHREADINT pThread = rtThreadGet(Thread);
756 AssertReturnVoid(pThread);
757 ASMAtomicIncS32(&pThread->LockValidator.cWriteLocks);
758 rtThreadRelease(pThread);
759}
760RT_EXPORT_SYMBOL(RTLockValidatorWriteLockInc);
761
762
763RTDECL(void) RTLockValidatorWriteLockDec(RTTHREAD Thread)
764{
765 PRTTHREADINT pThread = rtThreadGet(Thread);
766 AssertReturnVoid(pThread);
767 ASMAtomicDecS32(&pThread->LockValidator.cWriteLocks);
768 rtThreadRelease(pThread);
769}
770RT_EXPORT_SYMBOL(RTLockValidatorWriteLockDec);
771
772
773RTDECL(int32_t) RTLockValidatorReadLockGetCount(RTTHREAD Thread)
774{
775 if (Thread == NIL_RTTHREAD)
776 return 0;
777
778 PRTTHREADINT pThread = rtThreadGet(Thread);
779 if (!pThread)
780 return VERR_INVALID_HANDLE;
781 int32_t cReadLocks = ASMAtomicReadS32(&pThread->LockValidator.cReadLocks);
782 rtThreadRelease(pThread);
783 return cReadLocks;
784}
785RT_EXPORT_SYMBOL(RTLockValidatorReadLockGetCount);
786
787
788RTDECL(void) RTLockValidatorReadLockInc(RTTHREAD Thread)
789{
790 PRTTHREADINT pThread = rtThreadGet(Thread);
791 Assert(pThread);
792 ASMAtomicIncS32(&pThread->LockValidator.cReadLocks);
793 rtThreadRelease(pThread);
794}
795RT_EXPORT_SYMBOL(RTLockValidatorReadLockInc);
796
797
798RTDECL(void) RTLockValidatorReadLockDec(RTTHREAD Thread)
799{
800 PRTTHREADINT pThread = rtThreadGet(Thread);
801 Assert(pThread);
802 ASMAtomicDecS32(&pThread->LockValidator.cReadLocks);
803 rtThreadRelease(pThread);
804}
805RT_EXPORT_SYMBOL(RTLockValidatorReadLockDec);
806
807
808
809/**
810 * Bitch about a deadlock.
811 *
812 * @param pRec The lock validator record we're going to block on.
813 * @param pThread This thread.
814 * @param pCur The thread we're deadlocking with.
815 * @param enmState The sleep state.
816 * @param pSrcPos Where we are going to deadlock.
817 */
818static void rtLockValidatorComplainAboutDeadlock(PRTLOCKVALIDATORREC pRec, PRTTHREADINT pThread, RTTHREADSTATE enmState,
819 PRTTHREADINT pCur, PCRTLOCKVALIDATORSRCPOS pSrcPos)
820{
821 AssertMsg1(pCur == pThread ? "!!Deadlock detected!!" : "!!Deadlock exists!!", pSrcPos->uLine, pSrcPos->pszFile, pSrcPos->pszFunction);
822
823 /*
824 * Print the threads and locks involved.
825 */
826 PRTTHREADINT apSeenThreads[8] = {0,0,0,0,0,0,0,0};
827 unsigned iSeenThread = 0;
828 pCur = pThread;
829 for (unsigned iEntry = 0; pCur && iEntry < 256; iEntry++)
830 {
831 /*
832 * Print info on pCur. Determin next while doing so.
833 */
834 AssertMsg2(" #%u: %RTthrd/%RTnthrd %s: %s(%u) %RTptr\n",
835 iEntry, pCur, pCur->Core.Key, pCur->szName,
836 pCur->LockValidator.SrcPos.pszFile, pCur->LockValidator.SrcPos.uLine,
837 pCur->LockValidator.SrcPos.pszFunction, pCur->LockValidator.SrcPos.uId);
838 PRTTHREADINT pNext = NULL;
839 RTTHREADSTATE enmCurState = rtThreadGetState(pCur);
840 switch (enmCurState)
841 {
842 case RTTHREADSTATE_CRITSECT:
843 case RTTHREADSTATE_EVENT:
844 case RTTHREADSTATE_EVENT_MULTI:
845 case RTTHREADSTATE_FAST_MUTEX:
846 case RTTHREADSTATE_MUTEX:
847 case RTTHREADSTATE_RW_READ:
848 case RTTHREADSTATE_RW_WRITE:
849 case RTTHREADSTATE_SPIN_MUTEX:
850 {
851 PRTLOCKVALIDATORREC pCurRec = pCur->LockValidator.pRec;
852 RTTHREADSTATE enmCurState2 = rtThreadGetState(pCur);
853 if (enmCurState2 != enmCurState)
854 {
855 AssertMsg2(" Impossible!!! enmState=%s -> %s (%d)\n",
856 RTThreadStateName(enmCurState), RTThreadStateName(enmCurState2), enmCurState2);
857 break;
858 }
859 if ( VALID_PTR(pCurRec)
860 && pCurRec->u32Magic == RTLOCKVALIDATORREC_MAGIC)
861 {
862 AssertMsg2(" Waiting on %s %p [%s]: Entered %s(%u) %s %p\n",
863 RTThreadStateName(enmCurState), pCurRec->hLock, pCurRec->pszName,
864 pCurRec->SrcPos.pszFile, pCurRec->SrcPos.uLine, pCurRec->SrcPos.pszFunction, pCurRec->SrcPos.uId);
865 pNext = pCurRec->hThread;
866 }
867 else if (VALID_PTR(pCurRec))
868 AssertMsg2(" Waiting on %s pCurRec=%p: invalid magic number: %#x\n",
869 RTThreadStateName(enmCurState), pCurRec, pCurRec->u32Magic);
870 else
871 AssertMsg2(" Waiting on %s pCurRec=%p: invalid pointer\n",
872 RTThreadStateName(enmCurState), pCurRec);
873 break;
874 }
875
876 default:
877 AssertMsg2(" Impossible!!! enmState=%s (%d)\n", RTThreadStateName(enmCurState), enmCurState);
878 break;
879 }
880
881 /*
882 * Check for cycle.
883 */
884 if (iEntry && pCur == pThread)
885 break;
886 for (unsigned i = 0; i < RT_ELEMENTS(apSeenThreads); i++)
887 if (apSeenThreads[i] == pCur)
888 {
889 AssertMsg2(" Cycle!\n");
890 pNext = NULL;
891 break;
892 }
893
894 /*
895 * Advance to the next thread.
896 */
897 iSeenThread = (iSeenThread + 1) % RT_ELEMENTS(apSeenThreads);
898 apSeenThreads[iSeenThread] = pCur;
899 pCur = pNext;
900 }
901 AssertBreakpoint();
902}
903
904
905RTDECL(int) RTLockValidatorCheckWriteOrderBlocking(PRTLOCKVALIDATORREC pWrite, PRTLOCKVALIDATORSHARED pRead,
906 RTTHREAD hThread, RTTHREADSTATE enmState, bool fRecursiveOk,
907 PCRTLOCKVALIDATORSRCPOS pSrcPos)
908{
909 /*
910 * Fend off wild life.
911 */
912 AssertPtrReturn(pWrite, VERR_SEM_LV_INVALID_PARAMETER);
913 AssertReturn(pWrite->u32Magic == RTLOCKVALIDATORREC_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
914 AssertPtrReturn(pRead, VERR_SEM_LV_INVALID_PARAMETER);
915 AssertReturn(pRead->u32Magic == RTLOCKVALIDATORSHARED_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
916 AssertReturn(pRead->fEnabled == pWrite->fEnabled, VERR_SEM_LV_INVALID_PARAMETER);
917 if (!pWrite->fEnabled)
918 return VINF_SUCCESS;
919 AssertReturn(RTTHREAD_IS_SLEEPING(enmState), VERR_SEM_LV_INVALID_PARAMETER);
920 PRTTHREADINT pThread = hThread;
921 AssertPtrReturn(pThread, VERR_SEM_LV_INVALID_PARAMETER);
922 AssertReturn(pThread->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
923 RTTHREADSTATE enmThreadState = rtThreadGetState(pThread);
924 AssertReturn( enmThreadState == RTTHREADSTATE_RUNNING
925 || enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
926 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
927 , VERR_SEM_LV_INVALID_PARAMETER);
928
929 /*
930 * Check for attempts at doing a read upgrade.
931 */
932 PRTLOCKVALIDATORSHAREDONE pEntry = rtLockValidatorSharedRecFindThread(pRead, hThread, NULL);
933 if (pEntry)
934 {
935 AssertMsgFailed(("Read lock upgrade at %s(%d) %s %p!\nRead lock take at %s(%d) %s %p!\n",
936 pSrcPos->pszFile, pSrcPos->uLine, pSrcPos->pszFunction, pSrcPos->uId,
937 pEntry->SrcPos.pszFile, pEntry->SrcPos.uLine, pEntry->SrcPos.pszFunction, pEntry->SrcPos.uId));
938 return VERR_SEM_LV_UPGRADE;
939 }
940
941
942
943 return VINF_SUCCESS;
944}
945
946
947RTDECL(int) RTLockValidatorCheckReadOrderBlocking(PRTLOCKVALIDATORSHARED pRead, PRTLOCKVALIDATORREC pWrite,
948 RTTHREAD hThread, RTTHREADSTATE enmState, bool fRecursiveOk,
949 PCRTLOCKVALIDATORSRCPOS pSrcPos)
950{
951 /*
952 * Fend off wild life.
953 */
954 AssertPtrReturn(pRead, VERR_SEM_LV_INVALID_PARAMETER);
955 AssertReturn(pRead->u32Magic == RTLOCKVALIDATORSHARED_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
956 AssertPtrReturn(pWrite, VERR_SEM_LV_INVALID_PARAMETER);
957 AssertReturn(pWrite->u32Magic == RTLOCKVALIDATORREC_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
958 AssertReturn(pRead->fEnabled == pWrite->fEnabled, VERR_SEM_LV_INVALID_PARAMETER);
959 if (!pRead->fEnabled)
960 return VINF_SUCCESS;
961 AssertReturn(RTTHREAD_IS_SLEEPING(enmState), VERR_SEM_LV_INVALID_PARAMETER);
962 PRTTHREADINT pThread = hThread;
963 AssertPtrReturn(pThread, VERR_SEM_LV_INVALID_PARAMETER);
964 AssertReturn(pThread->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
965 RTTHREADSTATE enmThreadState = rtThreadGetState(pThread);
966 AssertReturn( enmThreadState == RTTHREADSTATE_RUNNING
967 || enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
968 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
969 , VERR_SEM_LV_INVALID_PARAMETER);
970 Assert(pWrite->hThread != pThread);
971
972
973 return VINF_SUCCESS;
974}
975
976
977RTDECL(int) RTLockValidatorCheckBlocking(PRTLOCKVALIDATORREC pRec, RTTHREAD hThread,
978 RTTHREADSTATE enmState, bool fRecursiveOk,
979 PCRTLOCKVALIDATORSRCPOS pSrcPos)
980{
981 /*
982 * Fend off wild life.
983 */
984 AssertPtrReturn(pRec, VERR_SEM_LV_INVALID_PARAMETER);
985 AssertReturn(pRec->u32Magic == RTLOCKVALIDATORREC_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
986 if (!pRec->fEnabled)
987 return VINF_SUCCESS;
988 AssertReturn(RTTHREAD_IS_SLEEPING(enmState), VERR_SEM_LV_INVALID_PARAMETER);
989 PRTTHREADINT pThread = hThread;
990 AssertPtrReturn(pThread, VERR_SEM_LV_INVALID_PARAMETER);
991 AssertReturn(pThread->u32Magic == RTTHREADINT_MAGIC, VERR_SEM_LV_INVALID_PARAMETER);
992 RTTHREADSTATE enmThreadState = rtThreadGetState(pThread);
993 AssertReturn( enmThreadState == RTTHREADSTATE_RUNNING
994 || enmThreadState == RTTHREADSTATE_TERMINATED /* rtThreadRemove uses locks too */
995 || enmThreadState == RTTHREADSTATE_INITIALIZING /* rtThreadInsert uses locks too */
996 , VERR_SEM_LV_INVALID_PARAMETER);
997
998 /*
999 * Record the location and everything before changing the state and
1000 * performing deadlock detection.
1001 */
1002 pThread->LockValidator.pRec = pRec;
1003 rtLockValidatorCopySrcPos(&pThread->LockValidator.SrcPos, pSrcPos);
1004
1005 /*
1006 * Don't do deadlock detection if we're recursing and that's OK.
1007 *
1008 * On some hosts we don't do recursion accounting our selves and there
1009 * isn't any other place to check for this. semmutex-win.cpp for instance.
1010 */
1011 if (pRec->hThread == pThread)
1012 {
1013 if (fRecursiveOk)
1014 return VINF_SUCCESS;
1015 AssertMsgFailed(("%p (%s)\n", pRec->hLock, pRec->pszName));
1016 return VERR_SEM_LV_NESTED;
1017 }
1018
1019 /*
1020 * Do deadlock detection.
1021 *
1022 * Since we're missing proper serialization, we don't declare it a
1023 * deadlock until we've got three runs with the same list length.
1024 * While this isn't perfect, it should avoid out the most obvious
1025 * races on SMP boxes.
1026 */
1027 rtLockValidatorSerializeDetectionEnter();
1028
1029 PRTTHREADINT pCur;
1030 unsigned cPrevLength = ~0U;
1031 unsigned cEqualRuns = 0;
1032 unsigned iParanoia = 256;
1033 do
1034 {
1035 unsigned cLength = 0;
1036 pCur = pThread;
1037 for (;;)
1038 {
1039 /*
1040 * Get the next thread.
1041 */
1042 PRTTHREADINT pNext = NULL;
1043 for (;;)
1044 {
1045 RTTHREADSTATE enmCurState = rtThreadGetState(pCur);
1046 switch (enmCurState)
1047 {
1048 case RTTHREADSTATE_CRITSECT:
1049 case RTTHREADSTATE_EVENT:
1050 case RTTHREADSTATE_EVENT_MULTI:
1051 case RTTHREADSTATE_FAST_MUTEX:
1052 case RTTHREADSTATE_MUTEX:
1053 case RTTHREADSTATE_RW_READ:
1054 case RTTHREADSTATE_RW_WRITE:
1055 case RTTHREADSTATE_SPIN_MUTEX:
1056 {
1057 PRTLOCKVALIDATORREC pCurRec = pCur->LockValidator.pRec;
1058 if ( rtThreadGetState(pCur) != enmCurState
1059 || !VALID_PTR(pCurRec)
1060 || pCurRec->u32Magic != RTLOCKVALIDATORREC_MAGIC)
1061 continue;
1062 pNext = pCurRec->hThread;
1063 if ( rtThreadGetState(pCur) != enmCurState
1064 || pCurRec->u32Magic != RTLOCKVALIDATORREC_MAGIC
1065 || pCurRec->hThread != pNext)
1066 continue;
1067 break;
1068 }
1069
1070 default:
1071 pNext = NULL;
1072 break;
1073 }
1074 break;
1075 }
1076
1077 /*
1078 * If we arrive at the end of the list we're good.
1079 */
1080 pCur = pNext;
1081 if (!pCur)
1082 {
1083 rtLockValidatorSerializeDetectionLeave();
1084 return VINF_SUCCESS;
1085 }
1086
1087 /*
1088 * If we've got back to the blocking thread id we've
1089 * got a deadlock.
1090 */
1091 if (pCur == pThread)
1092 break;
1093
1094 /*
1095 * If we've got a chain of more than 256 items, there is some
1096 * kind of cycle in the list, which means that there is already
1097 * a deadlock somewhere.
1098 */
1099 if (cLength >= 256)
1100 break;
1101
1102 cLength++;
1103 }
1104
1105 /* compare with previous list run. */
1106 if (cLength != cPrevLength)
1107 {
1108 cPrevLength = cLength;
1109 cEqualRuns = 0;
1110 }
1111 else
1112 cEqualRuns++;
1113 } while (cEqualRuns < 3 && --iParanoia > 0);
1114
1115 /*
1116 * Ok, if we ever get here, it's most likely a genuine deadlock.
1117 */
1118 rtLockValidatorComplainAboutDeadlock(pRec, pThread, enmState, pCur, pSrcPos);
1119
1120 rtLockValidatorSerializeDetectionLeave();
1121
1122 return VERR_SEM_LV_DEADLOCK;
1123}
1124RT_EXPORT_SYMBOL(RTLockValidatorCheckBlocking);
1125
1126
1127RTDECL(bool) RTLockValidatorSetEnabled(bool fEnabled)
1128{
1129 return ASMAtomicXchgBool(&g_fLockValidatorEnabled, fEnabled);
1130}
1131RT_EXPORT_SYMBOL(RTLockValidatorSetEnabled);
1132
1133
1134RTDECL(bool) RTLockValidatorIsEnabled(void)
1135{
1136 return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
1137}
1138RT_EXPORT_SYMBOL(RTLockValidatorIsEnabled);
1139
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette