VirtualBox

source: vbox/trunk/src/VBox/Runtime/testcase/tstRTLockValidator.cpp@ 25759

Last change on this file since 25759 was 25759, checked in by vboxsync, 15 years ago

iprt/semaphore.h: RT_LOCK_CHECK_ORDER && IN_RING3 -> wrap RTSemRWCreate and RTSemMutexCreate so automatic order validation is performed.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 57.3 KB
Line 
1/* $Id: tstRTLockValidator.cpp 25759 2010-01-12 13:06:06Z vboxsync $ */
2/** @file
3 * IPRT Testcase - RTLockValidator.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include <iprt/lockvalidator.h>
36
37#include <iprt/asm.h> /* for return addresses */
38#include <iprt/critsect.h>
39#include <iprt/err.h>
40#include <iprt/semaphore.h>
41#include <iprt/test.h>
42#include <iprt/thread.h>
43#include <iprt/time.h>
44
45
46/*******************************************************************************
47* Defined Constants And Macros *
48*******************************************************************************/
49#define SECS_SIMPLE_TEST 1
50#define SECS_RACE_TEST 3
51#define TEST_SMALL_TIMEOUT ( 10*1000)
52#define TEST_LARGE_TIMEOUT ( 60*1000)
53#define TEST_DEBUG_TIMEOUT (3600*1000)
54
55
56/*******************************************************************************
57* Global Variables *
58*******************************************************************************/
59/** The testcase handle. */
60static RTTEST g_hTest;
61/** Flip this in the debugger to get some peace to single step wild code. */
62bool volatile g_fDoNotSpin = false;
63
64/** Set when the main thread wishes to terminate the test. */
65bool volatile g_fShutdown = false;
66/** The number of threads. */
67static uint32_t g_cThreads;
68static uint32_t g_iDeadlockThread;
69static RTTHREAD g_ahThreads[32];
70static RTLOCKVALCLASS g_ahClasses[32];
71static RTCRITSECT g_aCritSects[32];
72static RTSEMRW g_ahSemRWs[32];
73static RTSEMMUTEX g_ahSemMtxes[32];
74static RTSEMEVENT g_hSemEvt;
75static RTSEMEVENTMULTI g_hSemEvtMulti;
76
77/** Multiple release event semaphore that is signalled by the main thread after
78 * it has started all the threads. */
79static RTSEMEVENTMULTI g_hThreadsStartedEvt;
80
81/** The number of threads that have called testThreadBlocking */
82static uint32_t volatile g_cThreadsBlocking;
83/** Multiple release event semaphore that is signalled by the last thread to
84 * call testThreadBlocking. testWaitForAllOtherThreadsToSleep waits on this. */
85static RTSEMEVENTMULTI g_hThreadsBlockingEvt;
86
87/** When to stop testing. */
88static uint64_t g_NanoTSStop;
89/** The number of deadlocks. */
90static uint32_t volatile g_cDeadlocks;
91/** The number of loops. */
92static uint32_t volatile g_cLoops;
93
94
95/**
96 * Spin until the callback stops returning VERR_TRY_AGAIN.
97 *
98 * @returns Callback result. VERR_TIMEOUT if too much time elapses.
99 * @param pfnCallback Callback for checking the state.
100 * @param pvWhat Callback parameter.
101 */
102static int testWaitForSomethingToBeOwned(int (*pfnCallback)(void *), void *pvWhat)
103{
104 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
105 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadsStartedEvt, TEST_SMALL_TIMEOUT));
106
107 uint64_t u64StartMS = RTTimeMilliTS();
108 for (unsigned iLoop = 0; ; iLoop++)
109 {
110 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
111
112 int rc = pfnCallback(pvWhat);
113 if (rc != VERR_TRY_AGAIN/* && !g_fDoNotSpin*/)
114 {
115 RTTEST_CHECK_RC_OK(g_hTest, rc);
116 return rc;
117 }
118
119 uint64_t cMsElapsed = RTTimeMilliTS() - u64StartMS;
120 if (!g_fDoNotSpin)
121 RTTEST_CHECK_RET(g_hTest, cMsElapsed <= TEST_SMALL_TIMEOUT, VERR_TIMEOUT);
122
123 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
124 RTThreadSleep(/*g_fDoNotSpin ? TEST_DEBUG_TIMEOUT :*/ iLoop > 256 ? 1 : 0);
125 }
126}
127
128
129static int testCheckIfCritSectIsOwned(void *pvWhat)
130{
131 PRTCRITSECT pCritSect = (PRTCRITSECT)pvWhat;
132 if (!RTCritSectIsInitialized(pCritSect))
133 return VERR_SEM_DESTROYED;
134 if (RTCritSectIsOwned(pCritSect))
135 return VINF_SUCCESS;
136 return VERR_TRY_AGAIN;
137}
138
139
140static int testWaitForCritSectToBeOwned(PRTCRITSECT pCritSect)
141{
142 return testWaitForSomethingToBeOwned(testCheckIfCritSectIsOwned, pCritSect);
143}
144
145
146static int testCheckIfSemRWIsOwned(void *pvWhat)
147{
148 RTSEMRW hSemRW = (RTSEMRW)pvWhat;
149 if (RTSemRWGetWriteRecursion(hSemRW) > 0)
150 return VINF_SUCCESS;
151 if (RTSemRWGetReadCount(hSemRW) > 0)
152 return VINF_SUCCESS;
153 return VERR_TRY_AGAIN;
154}
155
156static int testWaitForSemRWToBeOwned(RTSEMRW hSemRW)
157{
158 return testWaitForSomethingToBeOwned(testCheckIfSemRWIsOwned, hSemRW);
159}
160
161
162static int testCheckIfSemMutexIsOwned(void *pvWhat)
163{
164 RTSEMMUTEX hSemRW = (RTSEMMUTEX)pvWhat;
165 if (RTSemMutexIsOwned(hSemRW))
166 return VINF_SUCCESS;
167 return VERR_TRY_AGAIN;
168}
169
170static int testWaitForSemMutexToBeOwned(RTSEMMUTEX hSemMutex)
171{
172 return testWaitForSomethingToBeOwned(testCheckIfSemMutexIsOwned, hSemMutex);
173}
174
175
176/**
177 * For reducing spin in testWaitForAllOtherThreadsToSleep.
178 */
179static void testThreadBlocking(void)
180{
181 if (ASMAtomicIncU32(&g_cThreadsBlocking) == g_cThreads)
182 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiSignal(g_hThreadsBlockingEvt));
183}
184
185
186/**
187 * Waits for all the other threads to enter sleeping states.
188 *
189 * @returns VINF_SUCCESS on success, VERR_INTERNAL_ERROR on failure.
190 * @param enmDesiredState The desired thread sleep state.
191 * @param cWaitOn The distance to the lock they'll be waiting on,
192 * the lock type is derived from the desired state.
193 * UINT32_MAX means no special lock.
194 */
195static int testWaitForAllOtherThreadsToSleep(RTTHREADSTATE enmDesiredState, uint32_t cWaitOn)
196{
197 testThreadBlocking();
198 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
199 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadsBlockingEvt, TEST_SMALL_TIMEOUT));
200
201 RTTHREAD hThreadSelf = RTThreadSelf();
202 for (uint32_t iOuterLoop = 0; ; iOuterLoop++)
203 {
204 uint32_t cMissing = 0;
205 uint32_t cWaitedOn = 0;
206 for (uint32_t i = 0; i < g_cThreads; i++)
207 {
208 RTTHREAD hThread = g_ahThreads[i];
209 if (hThread == NIL_RTTHREAD)
210 cMissing++;
211 else if (hThread != hThreadSelf)
212 {
213 /*
214 * Figure out which lock to wait for.
215 */
216 void *pvLock = NULL;
217 if (cWaitOn != UINT32_MAX)
218 {
219 uint32_t j = (i + cWaitOn) % g_cThreads;
220 switch (enmDesiredState)
221 {
222 case RTTHREADSTATE_CRITSECT: pvLock = &g_aCritSects[j]; break;
223 case RTTHREADSTATE_RW_WRITE:
224 case RTTHREADSTATE_RW_READ: pvLock = g_ahSemRWs[j]; break;
225 case RTTHREADSTATE_MUTEX: pvLock = g_ahSemMtxes[j]; break;
226 default: break;
227 }
228 }
229
230 /*
231 * Wait for this thread.
232 */
233 for (unsigned iLoop = 0; ; iLoop++)
234 {
235 RTTHREADSTATE enmState = RTThreadGetReallySleeping(hThread);
236 if (RTTHREAD_IS_SLEEPING(enmState))
237 {
238 if ( enmState == enmDesiredState
239 && ( !pvLock
240 || ( pvLock == RTLockValidatorQueryBlocking(hThread)
241 && !RTLockValidatorIsBlockedThreadInValidator(hThread) )
242 )
243 && RTThreadGetNativeState(hThread) != RTTHREADNATIVESTATE_RUNNING
244 )
245 break;
246 }
247 else if ( enmState != RTTHREADSTATE_RUNNING
248 && enmState != RTTHREADSTATE_INITIALIZING)
249 return VERR_INTERNAL_ERROR;
250 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
251 RTThreadSleep(g_fDoNotSpin ? TEST_DEBUG_TIMEOUT : iOuterLoop + iLoop > 256 ? 1 : 0);
252 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
253 cWaitedOn++;
254 }
255 }
256 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
257 }
258
259 if (!cMissing && !cWaitedOn)
260 break;
261 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
262 RTThreadSleep(g_fDoNotSpin ? TEST_DEBUG_TIMEOUT : iOuterLoop > 256 ? 1 : 0);
263 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
264 }
265
266 RTThreadSleep(0); /* fudge factor */
267 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
268 return VINF_SUCCESS;
269}
270
271
272/**
273 * Worker that starts the threads.
274 *
275 * @returns Same as RTThreadCreate.
276 * @param cThreads The number of threads to start.
277 * @param pfnThread Thread function.
278 */
279static int testStartThreads(uint32_t cThreads, PFNRTTHREAD pfnThread)
280{
281 RTSemEventMultiReset(g_hThreadsStartedEvt);
282
283 for (uint32_t i = 0; i < RT_ELEMENTS(g_ahThreads); i++)
284 g_ahThreads[i] = NIL_RTTHREAD;
285
286 int rc = VINF_SUCCESS;
287 for (uint32_t i = 0; i < cThreads; i++)
288 {
289 rc = RTThreadCreateF(&g_ahThreads[i], pfnThread, (void *)(uintptr_t)i, 0,
290 RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "thread-%02u", i);
291 RTTEST_CHECK_RC_OK(g_hTest, rc);
292 if (RT_FAILURE(rc))
293 break;
294 }
295
296 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiSignal(g_hThreadsStartedEvt), rcCheck);
297 return rc;
298}
299
300
301/**
302 * Worker that waits for the threads to complete.
303 *
304 * @param cMillies How long to wait for each.
305 * @param fStopOnError Whether to stop on error and heed the thread
306 * return status.
307 */
308static void testWaitForThreads(uint32_t cMillies, bool fStopOnError)
309{
310 uint32_t i = RT_ELEMENTS(g_ahThreads);
311 while (i-- > 0)
312 if (g_ahThreads[i] != NIL_RTTHREAD)
313 {
314 int rcThread;
315 int rc2;
316 RTTEST_CHECK_RC_OK(g_hTest, rc2 = RTThreadWait(g_ahThreads[i], cMillies, &rcThread));
317 if (RT_SUCCESS(rc2))
318 g_ahThreads[i] = NIL_RTTHREAD;
319 if (fStopOnError && (RT_FAILURE(rc2) || RT_FAILURE(rcThread)))
320 return;
321 }
322}
323
324
325static void testIt(uint32_t cThreads, uint32_t cSecs, bool fLoops, PFNRTTHREAD pfnThread, const char *pszName)
326{
327 /*
328 * Init test.
329 */
330 if (cSecs > 0)
331 RTTestSubF(g_hTest, "%s, %u threads, %u secs", pszName, cThreads, cSecs);
332 else
333 RTTestSubF(g_hTest, "%s, %u threads, single pass", pszName, cThreads);
334
335 RTTEST_CHECK_RETV(g_hTest, RT_ELEMENTS(g_ahThreads) >= cThreads);
336 RTTEST_CHECK_RETV(g_hTest, RT_ELEMENTS(g_aCritSects) >= cThreads);
337
338 g_cThreads = cThreads;
339 g_fShutdown = false;
340
341 for (uint32_t i = 0; i < cThreads; i++)
342 {
343 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
344 RTLOCKVAL_SUB_CLASS_ANY, "RTCritSect"), VINF_SUCCESS);
345 RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWCreateEx(&g_ahSemRWs[i], 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
346 RTLOCKVAL_SUB_CLASS_ANY, "RTSemRW"), VINF_SUCCESS);
347 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexCreateEx(&g_ahSemMtxes[i], 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
348 RTLOCKVAL_SUB_CLASS_ANY, "RTSemMutex"), VINF_SUCCESS);
349 }
350 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventCreate(&g_hSemEvt), VINF_SUCCESS);
351 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hSemEvtMulti), VINF_SUCCESS);
352 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hThreadsStartedEvt), VINF_SUCCESS);
353 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hThreadsBlockingEvt), VINF_SUCCESS);
354
355 /*
356 * The test loop.
357 */
358 uint32_t cPasses = 0;
359 uint32_t cLoops = 0;
360 uint32_t cDeadlocks = 0;
361 uint32_t cErrors = RTTestErrorCount(g_hTest);
362 uint64_t uStartNS = RTTimeNanoTS();
363 g_NanoTSStop = uStartNS + cSecs * UINT64_C(1000000000);
364 do
365 {
366 g_iDeadlockThread = (cThreads - 1 + cPasses) % cThreads;
367 g_cLoops = 0;
368 g_cDeadlocks = 0;
369 g_cThreadsBlocking = 0;
370 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiReset(g_hThreadsBlockingEvt), VINF_SUCCESS);
371
372 int rc = testStartThreads(cThreads, pfnThread);
373 if (RT_SUCCESS(rc))
374 {
375 testWaitForThreads(TEST_LARGE_TIMEOUT + cSecs*1000, true);
376 if (g_fDoNotSpin && RTTestErrorCount(g_hTest) != cErrors)
377 testWaitForThreads(TEST_DEBUG_TIMEOUT, true);
378 }
379
380 RTTEST_CHECK(g_hTest, !fLoops || g_cLoops > 0);
381 cLoops += g_cLoops;
382 RTTEST_CHECK(g_hTest, !fLoops || g_cDeadlocks > 0);
383 cDeadlocks += g_cDeadlocks;
384 cPasses++;
385 } while ( RTTestErrorCount(g_hTest) == cErrors
386 && !fLoops
387 && RTTimeNanoTS() < g_NanoTSStop);
388
389 /*
390 * Cleanup.
391 */
392 ASMAtomicWriteBool(&g_fShutdown, true);
393 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hThreadsBlockingEvt), VINF_SUCCESS);
394 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hThreadsStartedEvt), VINF_SUCCESS);
395 RTThreadSleep(RTTestErrorCount(g_hTest) == cErrors ? 0 : 50);
396
397 for (uint32_t i = 0; i < cThreads; i++)
398 {
399 RTTEST_CHECK_RC(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
400 RTTEST_CHECK_RC(g_hTest, RTSemRWDestroy(g_ahSemRWs[i]), VINF_SUCCESS);
401 RTTEST_CHECK_RC(g_hTest, RTSemMutexDestroy(g_ahSemMtxes[i]), VINF_SUCCESS);
402 }
403 RTTEST_CHECK_RC(g_hTest, RTSemEventDestroy(g_hSemEvt), VINF_SUCCESS);
404 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hSemEvtMulti), VINF_SUCCESS);
405 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hThreadsStartedEvt), VINF_SUCCESS);
406 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hThreadsBlockingEvt), VINF_SUCCESS);
407
408 testWaitForThreads(TEST_SMALL_TIMEOUT, false);
409
410 /*
411 * Print results if applicable.
412 */
413 if (cSecs)
414 {
415 if (fLoops)
416 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cLoops=%u cDeadlocks=%u (%u%%)\n",
417 cLoops, cDeadlocks, cLoops ? cDeadlocks * 100 / cLoops : 0);
418 else
419 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cPasses=%u\n", cPasses);
420 }
421}
422
423
424static DECLCALLBACK(int) testDd1Thread(RTTHREAD ThreadSelf, void *pvUser)
425{
426 uintptr_t i = (uintptr_t)pvUser;
427 PRTCRITSECT pMine = &g_aCritSects[i];
428 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
429
430 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
431 if (!(i & 1))
432 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
433 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
434 {
435 int rc;
436 if (i != g_iDeadlockThread)
437 {
438 testThreadBlocking();
439 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
440 }
441 else
442 {
443 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
444 if (RT_SUCCESS(rc))
445 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VERR_SEM_LV_DEADLOCK);
446 }
447 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
448 if (RT_SUCCESS(rc))
449 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
450 }
451 if (!(i & 1))
452 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
453 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
454 return VINF_SUCCESS;
455}
456
457
458static void testDd1(uint32_t cThreads, uint32_t cSecs)
459{
460 testIt(cThreads, cSecs, false, testDd1Thread, "deadlock, critsect");
461}
462
463
464static DECLCALLBACK(int) testDd2Thread(RTTHREAD ThreadSelf, void *pvUser)
465{
466 uintptr_t i = (uintptr_t)pvUser;
467 RTSEMRW hMine = g_ahSemRWs[i];
468 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
469 int rc;
470
471 if (i & 1)
472 {
473 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
474 if ((i & 3) == 3)
475 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS);
476 }
477 else
478 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestRead(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
479 if (RT_SUCCESS(testWaitForSemRWToBeOwned(hNext)))
480 {
481 if (i != g_iDeadlockThread)
482 {
483 testThreadBlocking();
484 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VINF_SUCCESS);
485 }
486 else
487 {
488 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_RW_WRITE, 1));
489 if (RT_SUCCESS(rc))
490 {
491 if (g_cThreads > 1)
492 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_DEADLOCK);
493 else
494 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_ILLEGAL_UPGRADE);
495 }
496 }
497 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
498 if (RT_SUCCESS(rc))
499 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
500 }
501 if (i & 1)
502 {
503 if ((i & 3) == 3)
504 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
505 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
506 }
507 else
508 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(hMine), VINF_SUCCESS);
509 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
510 return VINF_SUCCESS;
511}
512
513
514static void testDd2(uint32_t cThreads, uint32_t cSecs)
515{
516 testIt(cThreads, cSecs, false, testDd2Thread, "deadlock, read-write");
517}
518
519
520static DECLCALLBACK(int) testDd3Thread(RTTHREAD ThreadSelf, void *pvUser)
521{
522 uintptr_t i = (uintptr_t)pvUser;
523 RTSEMRW hMine = g_ahSemRWs[i];
524 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
525 int rc;
526
527 if (i & 1)
528 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
529 else
530 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestRead(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
531 if (RT_SUCCESS(testWaitForSemRWToBeOwned(hNext)))
532 {
533 do
534 {
535 rc = RTSemRWRequestWrite(hNext, TEST_SMALL_TIMEOUT);
536 if (rc != VINF_SUCCESS && rc != VERR_SEM_LV_DEADLOCK && rc != VERR_SEM_LV_ILLEGAL_UPGRADE)
537 {
538 RTTestFailed(g_hTest, "#%u: RTSemRWRequestWrite -> %Rrc\n", i, rc);
539 break;
540 }
541 if (RT_SUCCESS(rc))
542 {
543 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
544 if (RT_FAILURE(rc))
545 break;
546 }
547 else
548 ASMAtomicIncU32(&g_cDeadlocks);
549 ASMAtomicIncU32(&g_cLoops);
550 } while (RTTimeNanoTS() < g_NanoTSStop);
551 }
552 if (i & 1)
553 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
554 else
555 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(hMine), VINF_SUCCESS);
556 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
557 return VINF_SUCCESS;
558}
559
560
561static void testDd3(uint32_t cThreads, uint32_t cSecs)
562{
563 testIt(cThreads, cSecs, true, testDd3Thread, "deadlock, read-write race");
564}
565
566
567static DECLCALLBACK(int) testDd4Thread(RTTHREAD ThreadSelf, void *pvUser)
568{
569 uintptr_t i = (uintptr_t)pvUser;
570 RTSEMRW hMine = g_ahSemRWs[i];
571 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
572
573 do
574 {
575 int rc1 = (i & 1 ? RTSemRWRequestWrite : RTSemRWRequestRead)(hMine, TEST_SMALL_TIMEOUT); /* ugly ;-) */
576 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
577 if (rc1 != VINF_SUCCESS && rc1 != VERR_SEM_LV_DEADLOCK && rc1 != VERR_SEM_LV_ILLEGAL_UPGRADE)
578 {
579 RTTestFailed(g_hTest, "#%u: RTSemRWRequest%s(hMine,) -> %Rrc\n", i, i & 1 ? "Write" : "read", rc1);
580 break;
581 }
582 if (RT_SUCCESS(rc1))
583 {
584 for (unsigned iInner = 0; iInner < 4; iInner++)
585 {
586 int rc2 = RTSemRWRequestWrite(hNext, TEST_SMALL_TIMEOUT);
587 if (rc2 != VINF_SUCCESS && rc2 != VERR_SEM_LV_DEADLOCK && rc2 != VERR_SEM_LV_ILLEGAL_UPGRADE)
588 {
589 RTTestFailed(g_hTest, "#%u: RTSemRWRequestWrite -> %Rrc\n", i, rc2);
590 break;
591 }
592 if (RT_SUCCESS(rc2))
593 {
594 RTTEST_CHECK_RC(g_hTest, rc2 = RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
595 if (RT_FAILURE(rc2))
596 break;
597 }
598 else
599 ASMAtomicIncU32(&g_cDeadlocks);
600 ASMAtomicIncU32(&g_cLoops);
601 }
602
603 RTTEST_CHECK_RC(g_hTest, rc1 = (i & 1 ? RTSemRWReleaseWrite : RTSemRWReleaseRead)(hMine), VINF_SUCCESS);
604 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
605 if (RT_FAILURE(rc1))
606 break;
607 }
608 else
609 ASMAtomicIncU32(&g_cDeadlocks);
610 ASMAtomicIncU32(&g_cLoops);
611 } while (RTTimeNanoTS() < g_NanoTSStop);
612
613 return VINF_SUCCESS;
614}
615
616
617static void testDd4(uint32_t cThreads, uint32_t cSecs)
618{
619 testIt(cThreads, cSecs, true, testDd4Thread, "deadlock, read-write race v2");
620}
621
622
623static DECLCALLBACK(int) testDd5Thread(RTTHREAD ThreadSelf, void *pvUser)
624{
625 uintptr_t i = (uintptr_t)pvUser;
626 RTSEMMUTEX hMine = g_ahSemMtxes[i];
627 RTSEMMUTEX hNext = g_ahSemMtxes[(i + 1) % g_cThreads];
628
629 RTTEST_CHECK_RC_RET(g_hTest, RTSemMutexRequest(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
630 if (i & 1)
631 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS);
632 if (RT_SUCCESS(testWaitForSemMutexToBeOwned(hNext)))
633 {
634 int rc;
635 if (i != g_iDeadlockThread)
636 {
637 testThreadBlocking();
638 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(hNext, RT_INDEFINITE_WAIT), VINF_SUCCESS);
639 }
640 else
641 {
642 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_MUTEX, 1));
643 if (RT_SUCCESS(rc))
644 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_DEADLOCK);
645 }
646 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
647 if (RT_SUCCESS(rc))
648 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRelease(hNext), VINF_SUCCESS);
649 }
650 if (i & 1)
651 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(hMine), VINF_SUCCESS);
652 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(hMine), VINF_SUCCESS);
653 return VINF_SUCCESS;
654}
655
656
657static void testDd5(uint32_t cThreads, uint32_t cSecs)
658{
659 testIt(cThreads, cSecs, false, testDd5Thread, "deadlock, mutex");
660}
661
662
663static DECLCALLBACK(int) testDd6Thread(RTTHREAD ThreadSelf, void *pvUser)
664{
665 uintptr_t i = (uintptr_t)pvUser;
666 PRTCRITSECT pMine = &g_aCritSects[i];
667 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
668
669 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
670 if (i & 1)
671 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
672 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
673 {
674 int rc;
675 if (i != g_iDeadlockThread)
676 {
677 testThreadBlocking();
678 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
679 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
680 if (RT_SUCCESS(rc))
681 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
682 }
683 else
684 {
685 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
686 if (RT_SUCCESS(rc))
687 {
688 RTSemEventSetSignaller(g_hSemEvt, g_ahThreads[0]);
689 for (uint32_t iThread = 1; iThread < g_cThreads; iThread++)
690 RTSemEventAddSignaller(g_hSemEvt, g_ahThreads[iThread]);
691 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
692 RTTEST_CHECK_RC(g_hTest, RTSemEventWait(g_hSemEvt, TEST_SMALL_TIMEOUT), VERR_SEM_LV_DEADLOCK);
693 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
694 RTTEST_CHECK_RC(g_hTest, RTSemEventSignal(g_hSemEvt), VINF_SUCCESS);
695 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
696 RTTEST_CHECK_RC(g_hTest, RTSemEventWait(g_hSemEvt, TEST_SMALL_TIMEOUT), VINF_SUCCESS);
697 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
698 RTSemEventSetSignaller(g_hSemEvt, NIL_RTTHREAD);
699 }
700 }
701 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
702 }
703 if (i & 1)
704 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
705 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
706 return VINF_SUCCESS;
707}
708
709
710static void testDd6(uint32_t cThreads, uint32_t cSecs)
711{
712 testIt(cThreads, cSecs, false, testDd6Thread, "deadlock, event");
713}
714
715
716static DECLCALLBACK(int) testDd7Thread(RTTHREAD ThreadSelf, void *pvUser)
717{
718 uintptr_t i = (uintptr_t)pvUser;
719 PRTCRITSECT pMine = &g_aCritSects[i];
720 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
721
722 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
723 if (i & 1)
724 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
725 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
726 {
727 int rc;
728 if (i != g_iDeadlockThread)
729 {
730 testThreadBlocking();
731 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
732 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
733 if (RT_SUCCESS(rc))
734 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
735 }
736 else
737 {
738 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
739 if (RT_SUCCESS(rc))
740 {
741 RTSemEventMultiSetSignaller(g_hSemEvtMulti, g_ahThreads[0]);
742 for (uint32_t iThread = 1; iThread < g_cThreads; iThread++)
743 RTSemEventMultiAddSignaller(g_hSemEvtMulti, g_ahThreads[iThread]);
744 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
745 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiReset(g_hSemEvtMulti), VINF_SUCCESS);
746 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiWait(g_hSemEvtMulti, TEST_SMALL_TIMEOUT), VERR_SEM_LV_DEADLOCK);
747 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
748 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hSemEvtMulti), VINF_SUCCESS);
749 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
750 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiWait(g_hSemEvtMulti, TEST_SMALL_TIMEOUT), VINF_SUCCESS);
751 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
752 RTSemEventMultiSetSignaller(g_hSemEvtMulti, NIL_RTTHREAD);
753 }
754 }
755 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
756 }
757 if (i & 1)
758 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
759 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
760 return VINF_SUCCESS;
761}
762
763
764static void testDd7(uint32_t cThreads, uint32_t cSecs)
765{
766 testIt(cThreads, cSecs, false, testDd7Thread, "deadlock, event multi");
767}
768
769
770static void testLo1(void)
771{
772 RTTestSub(g_hTest, "locking order basics");
773
774 /* Initialize the critsections, the first 4 has their own classes, the rest
775 use the same class and relies on the sub-class mechanism for ordering. */
776 for (unsigned i = 0; i < RT_ELEMENTS(g_ahClasses); i++)
777 {
778 if (i <= 3)
779 {
780 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo1-%u", i), VINF_SUCCESS);
781 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO-Auto"), VINF_SUCCESS);
782 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
783 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
784 }
785 else
786 {
787 g_ahClasses[i] = RTLockValidatorClassForSrcPos(RT_SRC_POS, "testLo1-%u", i);
788 RTTEST_CHECK_RETV(g_hTest, g_ahClasses[i] != NIL_RTLOCKVALCLASS);
789 RTTEST_CHECK_RETV(g_hTest, i == 4 || g_ahClasses[i] == g_ahClasses[i - 1]);
790 if (i == 4)
791 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO-None"), VINF_SUCCESS);
792 else if (i == 5)
793 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_ANY, "RTCritSectLO-Any"), VINF_SUCCESS);
794 else
795 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_USER + i, "RTCritSectLO-User"), VINF_SUCCESS);
796
797 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 1 + (i - 4 + 1) * 2); /* released in cleanup. */
798 }
799 }
800
801 /* Enter the first 4 critsects in ascending order and thereby definining
802 this as a valid lock order. */
803 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
804 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
805 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
806 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
807
808 /* Now, leave and re-enter the critsects in a way that should break the
809 order and check that we get the appropriate response. */
810 int rc;
811 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
812 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VERR_SEM_LV_WRONG_ORDER);
813 if (RT_SUCCESS(rc))
814 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
815
816 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
817 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[1]), VERR_SEM_LV_WRONG_ORDER);
818 if (RT_SUCCESS(rc))
819 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
820
821 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
822 RTTEST_CHECK_RC(g_hTest, rc= RTCritSectEnter(&g_aCritSects[2]), VERR_SEM_LV_WRONG_ORDER);
823 if (RT_SUCCESS(rc))
824 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
825
826 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
827
828 /* Check that recursion isn't subject to order checks. */
829 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
830 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
831 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
832 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
833 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
834 if (RT_SUCCESS(rc))
835 {
836 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
837 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
838 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
839 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
840
841 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
842 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
843 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
844 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
845 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
846 }
847 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
848 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
849 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
850 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
851
852 /* Enable strict release order for class 2 and check that violations
853 are caught. */
854 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
855
856 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
857 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
858 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
859 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
860
861 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
862 if (RT_FAILURE(rc))
863 {
864 /* applies to recursions as well */
865 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
866 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
867 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
868 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
869 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
870 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
871 }
872 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
873 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
874 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
875 if (RT_FAILURE(rc))
876 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
877
878 /* Test that sub-class order works (4 = NONE, 5 = ANY, 6+ = USER). */
879 uint32_t cErrorsBefore = RTTestErrorCount(g_hTest);
880 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[7]), VINF_SUCCESS);
881
882 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[4]), VERR_SEM_LV_WRONG_ORDER);
883 if (RT_SUCCESS(rc))
884 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[4]), VINF_SUCCESS);
885
886 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
887 if (RT_SUCCESS(rc))
888 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
889
890 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[8]), VINF_SUCCESS);
891 if (RT_SUCCESS(rc))
892 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[8]), VINF_SUCCESS);
893
894 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[6]), VERR_SEM_LV_WRONG_ORDER);
895 if (RT_SUCCESS(rc))
896 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[6]), VINF_SUCCESS);
897
898 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[7]), VINF_SUCCESS);
899 if (RT_SUCCESS(rc))
900 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[7]), VINF_SUCCESS);
901 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[7]), VINF_SUCCESS);
902
903 /* Check that NONE trumps both ANY and USER. */
904 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[4]), VINF_SUCCESS);
905
906 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[5]), VERR_SEM_LV_WRONG_ORDER);
907 if (RT_SUCCESS(rc))
908 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
909
910 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[6]), VERR_SEM_LV_WRONG_ORDER);
911 if (RT_SUCCESS(rc))
912 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[6]), VINF_SUCCESS);
913
914 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[4]), VINF_SUCCESS);
915
916 /* Take all the locks using sub-classes. */
917 if (cErrorsBefore == RTTestErrorCount(g_hTest))
918 {
919 bool fSavedQuiet = RTLockValidatorSetQuiet(true);
920 for (uint32_t i = 6; i < RT_ELEMENTS(g_aCritSects); i++)
921 {
922 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[i]), VINF_SUCCESS);
923 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[4]), VERR_SEM_LV_WRONG_ORDER);
924 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
925 }
926 for (uint32_t i = 6; i < RT_ELEMENTS(g_aCritSects); i++)
927 {
928 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[i]), VINF_SUCCESS);
929 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
930 }
931 RTLockValidatorSetQuiet(fSavedQuiet);
932 }
933
934 /* Work up some hash statistics and trigger a violation to show them. */
935 for (uint32_t i = 0; i < 10240; i++)
936 {
937 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
938 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
939 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
940 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
941 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
942
943 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
944 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
945 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
946 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
947 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
948 }
949 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
950 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VERR_SEM_LV_WRONG_ORDER);
951 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
952
953 /* clean up */
954 for (unsigned i = 0; i < RT_ELEMENTS(g_ahClasses); i++)
955 {
956 if (i <= 3)
957 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 1);
958 else
959 {
960 uint32_t cExpect = 1 + (RT_ELEMENTS(g_ahClasses) - i) * 2 - 1;
961 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == cExpect);
962 }
963 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
964 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
965 }
966}
967
968
969static void testLo2(void)
970{
971 RTTestSub(g_hTest, "locking order, critsect");
972
973 /* Initialize the critsection with all different classes */
974 for (unsigned i = 0; i < 4; i++)
975 {
976 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo2-%u", i), VINF_SUCCESS);
977 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO"), VINF_SUCCESS);
978 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
979 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
980 }
981
982 /* Check the sub-class API.*/
983 RTTEST_CHECK(g_hTest, RTCritSectSetSubClass(&g_aCritSects[0], RTLOCKVAL_SUB_CLASS_ANY) == RTLOCKVAL_SUB_CLASS_NONE);
984 RTTEST_CHECK(g_hTest, RTCritSectSetSubClass(&g_aCritSects[0], RTLOCKVAL_SUB_CLASS_NONE) == RTLOCKVAL_SUB_CLASS_ANY);
985
986 /* Enter the first 4 critsects in ascending order and thereby definining
987 this as a valid lock order. */
988 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
989 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
990 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
991 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
992
993 /* Now, leave and re-enter the critsects in a way that should break the
994 order and check that we get the appropriate response. */
995 int rc;
996 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
997 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VERR_SEM_LV_WRONG_ORDER);
998 if (RT_SUCCESS(rc))
999 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
1000
1001 /* Check that recursion isn't subject to order checks. */
1002 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
1003 if (RT_SUCCESS(rc))
1004 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
1005
1006 /* Enable strict release order for class 2 and check that violations
1007 are caught - including recursion. */
1008 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1009 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS); /* start recursion */
1010 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
1011 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1012 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
1013 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS); /* end recursion */
1014 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1015 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
1016 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
1017 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
1018
1019 /* clean up */
1020 for (unsigned i = 0; i < 4; i++)
1021 {
1022 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 1);
1023 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1024 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
1025 }
1026}
1027
1028
1029static void testLo3(void)
1030{
1031 RTTestSub(g_hTest, "locking order, read-write");
1032
1033 /* Initialize the critsection with all different classes */
1034 for (unsigned i = 0; i < 6; i++)
1035 {
1036 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo3-%u", i), VINF_SUCCESS);
1037 RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWCreateEx(&g_ahSemRWs[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "hSemRW-Lo3-%u", i), VINF_SUCCESS);
1038 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 4);
1039 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 3);
1040 }
1041
1042 /* Check the sub-class API.*/
1043 RTTEST_CHECK(g_hTest, RTSemRWSetSubClass(g_ahSemRWs[0], RTLOCKVAL_SUB_CLASS_ANY) == RTLOCKVAL_SUB_CLASS_NONE);
1044 RTTEST_CHECK(g_hTest, RTSemRWSetSubClass(g_ahSemRWs[0], RTLOCKVAL_SUB_CLASS_NONE) == RTLOCKVAL_SUB_CLASS_ANY);
1045
1046 /* Enter the first 4 critsects in ascending order and thereby definining
1047 this as a valid lock order. */
1048 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[0], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1049 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[1], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1050 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[2], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1051 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1052 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[4], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1053 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[5], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1054
1055 /* Now, leave and re-enter the critsects in a way that should break the
1056 order and check that we get the appropriate response. */
1057 int rc;
1058 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[0]), VINF_SUCCESS);
1059 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(g_ahSemRWs[0], RT_INDEFINITE_WAIT), VERR_SEM_LV_WRONG_ORDER);
1060 if (RT_SUCCESS(rc))
1061 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[0]), VINF_SUCCESS);
1062
1063 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(g_ahSemRWs[1]), VINF_SUCCESS);
1064 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestRead(g_ahSemRWs[1], RT_INDEFINITE_WAIT), VERR_SEM_LV_WRONG_ORDER);
1065 if (RT_SUCCESS(rc))
1066 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(g_ahSemRWs[1]), VINF_SUCCESS);
1067
1068 /* Check that recursion isn't subject to order checks. */
1069 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestRead(g_ahSemRWs[2], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1070 if (RT_SUCCESS(rc))
1071 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(g_ahSemRWs[2]), VINF_SUCCESS);
1072 RTTEST_CHECK(g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 1);
1073
1074 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(g_ahSemRWs[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1075 if (RT_SUCCESS(rc))
1076 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VINF_SUCCESS);
1077 RTTEST_CHECK(g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 1);
1078
1079 /* Enable strict release order for class 2 and 3, then check that violations
1080 are caught - including recursion. */
1081 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1082 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[3], true), VINF_SUCCESS);
1083
1084 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[2], RT_INDEFINITE_WAIT), VINF_SUCCESS); /* start recursion */
1085 RTTEST_CHECK( g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 2);
1086 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(g_ahSemRWs[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1087 RTTEST_CHECK( g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 2);
1088 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestRead( g_ahSemRWs[4], RT_INDEFINITE_WAIT), VINF_SUCCESS); /* (mixed) */
1089
1090 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1091 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1092 RTTEST_CHECK( g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 2);
1093 RTTEST_CHECK( g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 2);
1094 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[4]), VINF_SUCCESS);
1095 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VINF_SUCCESS);
1096 RTTEST_CHECK( g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 1);
1097 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VINF_SUCCESS); /* end recursion */
1098 RTTEST_CHECK( g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 1);
1099
1100 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1101 RTTEST_CHECK(g_hTest, RTSemRWGetReadCount(g_ahSemRWs[2]) == 1);
1102 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1103 RTTEST_CHECK(g_hTest, RTSemRWGetWriteRecursion(g_ahSemRWs[3]) == 1);
1104 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[5]), VINF_SUCCESS);
1105 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[4]), VINF_SUCCESS);
1106 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(g_ahSemRWs[3]), VINF_SUCCESS);
1107 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead( g_ahSemRWs[2]), VINF_SUCCESS);
1108
1109 /* clean up */
1110 for (unsigned i = 0; i < 6; i++)
1111 {
1112 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
1113 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1114 RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWDestroy(g_ahSemRWs[i]), VINF_SUCCESS);
1115 g_ahSemRWs[i] = NIL_RTSEMRW;
1116 }
1117}
1118
1119
1120static void testLo4(void)
1121{
1122 RTTestSub(g_hTest, "locking order, mutex");
1123
1124 /* Initialize the critsection with all different classes */
1125 for (unsigned i = 0; i < 4; i++)
1126 {
1127 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo4-%u", i), VINF_SUCCESS);
1128 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexCreateEx(&g_ahSemMtxes[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTSemMutexLo4-%u", i), VINF_SUCCESS);
1129 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
1130 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
1131 }
1132
1133 /* Check the sub-class API.*/
1134 RTTEST_CHECK(g_hTest, RTSemMutexSetSubClass(g_ahSemMtxes[0], RTLOCKVAL_SUB_CLASS_ANY) == RTLOCKVAL_SUB_CLASS_NONE);
1135 RTTEST_CHECK(g_hTest, RTSemMutexSetSubClass(g_ahSemMtxes[0], RTLOCKVAL_SUB_CLASS_NONE) == RTLOCKVAL_SUB_CLASS_ANY);
1136
1137 /* Enter the first 4 critsects in ascending order and thereby definining
1138 this as a valid lock order. */
1139 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[0], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1140 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[1], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1141 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[2], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1142 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1143
1144 /* Now, leave and re-enter the critsects in a way that should break the
1145 order and check that we get the appropriate response. */
1146 int rc;
1147 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[0]), VINF_SUCCESS);
1148 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(g_ahSemMtxes[0], RT_INDEFINITE_WAIT), VERR_SEM_LV_WRONG_ORDER);
1149 if (RT_SUCCESS(rc))
1150 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[0]), VINF_SUCCESS);
1151
1152 /* Check that recursion isn't subject to order checks. */
1153 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(g_ahSemMtxes[1], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1154 if (RT_SUCCESS(rc))
1155 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[1]), VINF_SUCCESS);
1156
1157 /* Enable strict release order for class 2 and check that violations
1158 are caught - including recursion. */
1159 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1160
1161 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[2], RT_INDEFINITE_WAIT), VINF_SUCCESS); /* start recursion */
1162 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(g_ahSemMtxes[3], RT_INDEFINITE_WAIT), VINF_SUCCESS);
1163 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1164 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[3]), VINF_SUCCESS);
1165 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VINF_SUCCESS); /* end recursion */
1166
1167 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1168 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[1]), VINF_SUCCESS);
1169 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[3]), VINF_SUCCESS);
1170 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(g_ahSemMtxes[2]), VINF_SUCCESS);
1171
1172 /* clean up */
1173 for (unsigned i = 0; i < 4; i++)
1174 {
1175 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 1);
1176 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1177 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexDestroy(g_ahSemMtxes[i]), VINF_SUCCESS);
1178 }
1179}
1180
1181
1182
1183
1184static bool testIsLockValidationCompiledIn(void)
1185{
1186 RTCRITSECT CritSect;
1187 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectInit(&CritSect), false);
1188 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectEnter(&CritSect), false);
1189 bool fRet = CritSect.pValidatorRec
1190 && CritSect.pValidatorRec->hThread == RTThreadSelf();
1191 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectLeave(&CritSect), false);
1192 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectDelete(&CritSect), false);
1193
1194 RTSEMRW hSemRW;
1195 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWCreate(&hSemRW), false);
1196 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWRequestRead(hSemRW, 50), false);
1197 int rc = RTSemRWRequestWrite(hSemRW, 1);
1198 if (rc != VERR_SEM_LV_ILLEGAL_UPGRADE)
1199 fRet = false;
1200 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), false);
1201 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWReleaseRead(hSemRW), false);
1202 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWDestroy(hSemRW), false);
1203
1204#if 0 /** @todo detect it on RTSemMutex... wrong locking order? */
1205 RTSEMMUTEX hSemMtx;
1206 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexCreate(&hSemRW), false);
1207 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRequest(hSemRW, 50), false);
1208 /*??*/
1209 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), false);
1210 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWRelease(hSemRW), false);
1211 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWDestroy(hSemRW), false);
1212#endif
1213
1214 RTSEMEVENT hSemEvt;
1215 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventCreate(&hSemEvt), false);
1216 RTSemEventSetSignaller(hSemEvt, RTThreadSelf());
1217 RTSemEventSetSignaller(hSemEvt, NIL_RTTHREAD);
1218 rc = RTSemEventSignal(hSemEvt);
1219 if (rc != VERR_SEM_LV_NOT_SIGNALLER)
1220 fRet = false;
1221 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), false);
1222 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventDestroy(hSemEvt), false);
1223
1224 RTSEMEVENTMULTI hSemEvtMulti;
1225 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiCreate(&hSemEvtMulti), false);
1226 RTSemEventMultiSetSignaller(hSemEvtMulti, RTThreadSelf());
1227 RTSemEventMultiSetSignaller(hSemEvtMulti, NIL_RTTHREAD);
1228 rc = RTSemEventMultiSignal(hSemEvtMulti);
1229 if (rc != VERR_SEM_LV_NOT_SIGNALLER)
1230 fRet = false;
1231 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), false);
1232 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiDestroy(hSemEvtMulti), false);
1233
1234 return fRet;
1235}
1236
1237
1238int main()
1239{
1240 /*
1241 * Init.
1242 */
1243 int rc = RTTestInitAndCreate("tstRTLockValidator", &g_hTest);
1244 if (rc)
1245 return rc;
1246 RTTestBanner(g_hTest);
1247
1248 RTLockValidatorSetEnabled(true);
1249 RTLockValidatorSetMayPanic(false);
1250 RTLockValidatorSetQuiet(true);
1251 if (!testIsLockValidationCompiledIn())
1252 return RTTestErrorCount(g_hTest) > 0
1253 ? RTTestSummaryAndDestroy(g_hTest)
1254 : RTTestSkipAndDestroy(g_hTest, "deadlock detection is not compiled in");
1255 RTLockValidatorSetQuiet(false);
1256
1257 bool fTestDd = true;
1258 bool fTestLo = true;
1259
1260 /*
1261 * Some initial tests with verbose output (all single pass).
1262 */
1263 if (fTestDd)
1264 {
1265 testDd1(3, 0);
1266 testDd2(1, 0);
1267 testDd2(3, 0);
1268 testDd5(3, 0);
1269 testDd6(3, 0);
1270 testDd7(3, 0);
1271 }
1272 if (fTestLo)
1273 {
1274 testLo1();
1275 testLo2();
1276 testLo3();
1277 testLo4();
1278 }
1279
1280
1281 /*
1282 * If successful, perform more thorough testing without noisy output.
1283 */
1284 if (RTTestErrorCount(g_hTest) == 0)
1285 {
1286 RTLockValidatorSetQuiet(true);
1287
1288 if (fTestDd)
1289 {
1290 testDd1( 2, SECS_SIMPLE_TEST);
1291 testDd1( 3, SECS_SIMPLE_TEST);
1292 testDd1( 7, SECS_SIMPLE_TEST);
1293 testDd1(10, SECS_SIMPLE_TEST);
1294 testDd1(15, SECS_SIMPLE_TEST);
1295 testDd1(30, SECS_SIMPLE_TEST);
1296
1297 testDd2( 1, SECS_SIMPLE_TEST);
1298 testDd2( 2, SECS_SIMPLE_TEST);
1299 testDd2( 3, SECS_SIMPLE_TEST);
1300 testDd2( 7, SECS_SIMPLE_TEST);
1301 testDd2(10, SECS_SIMPLE_TEST);
1302 testDd2(15, SECS_SIMPLE_TEST);
1303 testDd2(30, SECS_SIMPLE_TEST);
1304
1305 testDd3( 2, SECS_SIMPLE_TEST);
1306 testDd3(10, SECS_SIMPLE_TEST);
1307
1308 testDd4( 2, SECS_RACE_TEST);
1309 testDd4( 6, SECS_RACE_TEST);
1310 testDd4(10, SECS_RACE_TEST);
1311 testDd4(30, SECS_RACE_TEST);
1312
1313 testDd5( 2, SECS_RACE_TEST);
1314 testDd5( 3, SECS_RACE_TEST);
1315 testDd5( 7, SECS_RACE_TEST);
1316 testDd5(10, SECS_RACE_TEST);
1317 testDd5(15, SECS_RACE_TEST);
1318 testDd5(30, SECS_RACE_TEST);
1319
1320 testDd6( 2, SECS_SIMPLE_TEST);
1321 testDd6( 3, SECS_SIMPLE_TEST);
1322 testDd6( 7, SECS_SIMPLE_TEST);
1323 testDd6(10, SECS_SIMPLE_TEST);
1324 testDd6(15, SECS_SIMPLE_TEST);
1325 testDd6(30, SECS_SIMPLE_TEST);
1326
1327 testDd7( 2, SECS_SIMPLE_TEST);
1328 testDd7( 3, SECS_SIMPLE_TEST);
1329 testDd7( 7, SECS_SIMPLE_TEST);
1330 testDd7(10, SECS_SIMPLE_TEST);
1331 testDd7(15, SECS_SIMPLE_TEST);
1332 testDd7(30, SECS_SIMPLE_TEST);
1333 }
1334 }
1335
1336 return RTTestSummaryAndDestroy(g_hTest);
1337}
1338
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette