VirtualBox

source: vbox/trunk/src/VBox/Runtime/testcase/tstRTLockValidator.cpp@ 25704

Last change on this file since 25704 was 25704, checked in by vboxsync, 15 years ago

iprt,pdmcritsect: More flexible lock naming, added RTCritSectSetSubClass and made some RTCritSectInitEx.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 50.8 KB
Line 
1/* $Id: tstRTLockValidator.cpp 25704 2010-01-10 20:12:30Z vboxsync $ */
2/** @file
3 * IPRT Testcase - RTLockValidator.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include <iprt/lockvalidator.h>
36
37#include <iprt/asm.h> /* for return addresses */
38#include <iprt/critsect.h>
39#include <iprt/err.h>
40#include <iprt/semaphore.h>
41#include <iprt/test.h>
42#include <iprt/thread.h>
43#include <iprt/time.h>
44
45
46/*******************************************************************************
47* Defined Constants And Macros *
48*******************************************************************************/
49#define SECS_SIMPLE_TEST 1
50#define SECS_RACE_TEST 3
51#define TEST_SMALL_TIMEOUT ( 10*1000)
52#define TEST_LARGE_TIMEOUT ( 60*1000)
53#define TEST_DEBUG_TIMEOUT (3600*1000)
54
55
56/*******************************************************************************
57* Global Variables *
58*******************************************************************************/
59/** The testcase handle. */
60static RTTEST g_hTest;
61/** Flip this in the debugger to get some peace to single step wild code. */
62bool volatile g_fDoNotSpin = false;
63
64/** Set when the main thread wishes to terminate the test. */
65bool volatile g_fShutdown = false;
66/** The number of threads. */
67static uint32_t g_cThreads;
68static uint32_t g_iDeadlockThread;
69static RTTHREAD g_ahThreads[32];
70static RTLOCKVALCLASS g_ahClasses[32];
71static RTCRITSECT g_aCritSects[32];
72static RTSEMRW g_ahSemRWs[32];
73static RTSEMMUTEX g_ahSemMtxes[32];
74static RTSEMEVENT g_hSemEvt;
75static RTSEMEVENTMULTI g_hSemEvtMulti;
76
77/** Multiple release event semaphore that is signalled by the main thread after
78 * it has started all the threads. */
79static RTSEMEVENTMULTI g_hThreadsStartedEvt;
80
81/** The number of threads that have called testThreadBlocking */
82static uint32_t volatile g_cThreadsBlocking;
83/** Multiple release event semaphore that is signalled by the last thread to
84 * call testThreadBlocking. testWaitForAllOtherThreadsToSleep waits on this. */
85static RTSEMEVENTMULTI g_hThreadsBlockingEvt;
86
87/** When to stop testing. */
88static uint64_t g_NanoTSStop;
89/** The number of deadlocks. */
90static uint32_t volatile g_cDeadlocks;
91/** The number of loops. */
92static uint32_t volatile g_cLoops;
93
94
95/**
96 * Spin until the callback stops returning VERR_TRY_AGAIN.
97 *
98 * @returns Callback result. VERR_TIMEOUT if too much time elapses.
99 * @param pfnCallback Callback for checking the state.
100 * @param pvWhat Callback parameter.
101 */
102static int testWaitForSomethingToBeOwned(int (*pfnCallback)(void *), void *pvWhat)
103{
104 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
105 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadsStartedEvt, TEST_SMALL_TIMEOUT));
106
107 uint64_t u64StartMS = RTTimeMilliTS();
108 for (unsigned iLoop = 0; ; iLoop++)
109 {
110 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
111
112 int rc = pfnCallback(pvWhat);
113 if (rc != VERR_TRY_AGAIN/* && !g_fDoNotSpin*/)
114 {
115 RTTEST_CHECK_RC_OK(g_hTest, rc);
116 return rc;
117 }
118
119 uint64_t cMsElapsed = RTTimeMilliTS() - u64StartMS;
120 if (!g_fDoNotSpin)
121 RTTEST_CHECK_RET(g_hTest, cMsElapsed <= TEST_SMALL_TIMEOUT, VERR_TIMEOUT);
122
123 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
124 RTThreadSleep(/*g_fDoNotSpin ? TEST_DEBUG_TIMEOUT :*/ iLoop > 256 ? 1 : 0);
125 }
126}
127
128
129static int testCheckIfCritSectIsOwned(void *pvWhat)
130{
131 PRTCRITSECT pCritSect = (PRTCRITSECT)pvWhat;
132 if (!RTCritSectIsInitialized(pCritSect))
133 return VERR_SEM_DESTROYED;
134 if (RTCritSectIsOwned(pCritSect))
135 return VINF_SUCCESS;
136 return VERR_TRY_AGAIN;
137}
138
139
140static int testWaitForCritSectToBeOwned(PRTCRITSECT pCritSect)
141{
142 return testWaitForSomethingToBeOwned(testCheckIfCritSectIsOwned, pCritSect);
143}
144
145
146static int testCheckIfSemRWIsOwned(void *pvWhat)
147{
148 RTSEMRW hSemRW = (RTSEMRW)pvWhat;
149 if (RTSemRWGetWriteRecursion(hSemRW) > 0)
150 return VINF_SUCCESS;
151 if (RTSemRWGetReadCount(hSemRW) > 0)
152 return VINF_SUCCESS;
153 return VERR_TRY_AGAIN;
154}
155
156static int testWaitForSemRWToBeOwned(RTSEMRW hSemRW)
157{
158 return testWaitForSomethingToBeOwned(testCheckIfSemRWIsOwned, hSemRW);
159}
160
161
162static int testCheckIfSemMutexIsOwned(void *pvWhat)
163{
164 RTSEMMUTEX hSemRW = (RTSEMMUTEX)pvWhat;
165 if (RTSemMutexIsOwned(hSemRW))
166 return VINF_SUCCESS;
167 return VERR_TRY_AGAIN;
168}
169
170static int testWaitForSemMutexToBeOwned(RTSEMMUTEX hSemMutex)
171{
172 return testWaitForSomethingToBeOwned(testCheckIfSemMutexIsOwned, hSemMutex);
173}
174
175
176/**
177 * For reducing spin in testWaitForAllOtherThreadsToSleep.
178 */
179static void testThreadBlocking(void)
180{
181 if (ASMAtomicIncU32(&g_cThreadsBlocking) == g_cThreads)
182 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiSignal(g_hThreadsBlockingEvt));
183}
184
185
186/**
187 * Waits for all the other threads to enter sleeping states.
188 *
189 * @returns VINF_SUCCESS on success, VERR_INTERNAL_ERROR on failure.
190 * @param enmDesiredState The desired thread sleep state.
191 * @param cWaitOn The distance to the lock they'll be waiting on,
192 * the lock type is derived from the desired state.
193 * UINT32_MAX means no special lock.
194 */
195static int testWaitForAllOtherThreadsToSleep(RTTHREADSTATE enmDesiredState, uint32_t cWaitOn)
196{
197 testThreadBlocking();
198 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
199 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadsBlockingEvt, TEST_SMALL_TIMEOUT));
200
201 RTTHREAD hThreadSelf = RTThreadSelf();
202 for (uint32_t iOuterLoop = 0; ; iOuterLoop++)
203 {
204 uint32_t cMissing = 0;
205 uint32_t cWaitedOn = 0;
206 for (uint32_t i = 0; i < g_cThreads; i++)
207 {
208 RTTHREAD hThread = g_ahThreads[i];
209 if (hThread == NIL_RTTHREAD)
210 cMissing++;
211 else if (hThread != hThreadSelf)
212 {
213 /*
214 * Figure out which lock to wait for.
215 */
216 void *pvLock = NULL;
217 if (cWaitOn != UINT32_MAX)
218 {
219 uint32_t j = (i + cWaitOn) % g_cThreads;
220 switch (enmDesiredState)
221 {
222 case RTTHREADSTATE_CRITSECT: pvLock = &g_aCritSects[j]; break;
223 case RTTHREADSTATE_RW_WRITE:
224 case RTTHREADSTATE_RW_READ: pvLock = g_ahSemRWs[j]; break;
225 case RTTHREADSTATE_MUTEX: pvLock = g_ahSemMtxes[j]; break;
226 default: break;
227 }
228 }
229
230 /*
231 * Wait for this thread.
232 */
233 for (unsigned iLoop = 0; ; iLoop++)
234 {
235 RTTHREADSTATE enmState = RTThreadGetReallySleeping(hThread);
236 if (RTTHREAD_IS_SLEEPING(enmState))
237 {
238 if ( enmState == enmDesiredState
239 && ( !pvLock
240 || ( pvLock == RTLockValidatorQueryBlocking(hThread)
241 && !RTLockValidatorIsBlockedThreadInValidator(hThread) )
242 )
243 && RTThreadGetNativeState(hThread) != RTTHREADNATIVESTATE_RUNNING
244 )
245 break;
246 }
247 else if ( enmState != RTTHREADSTATE_RUNNING
248 && enmState != RTTHREADSTATE_INITIALIZING)
249 return VERR_INTERNAL_ERROR;
250 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
251 RTThreadSleep(g_fDoNotSpin ? TEST_DEBUG_TIMEOUT : iOuterLoop + iLoop > 256 ? 1 : 0);
252 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
253 cWaitedOn++;
254 }
255 }
256 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
257 }
258
259 if (!cMissing && !cWaitedOn)
260 break;
261 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
262 RTThreadSleep(g_fDoNotSpin ? TEST_DEBUG_TIMEOUT : iOuterLoop > 256 ? 1 : 0);
263 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
264 }
265
266 RTThreadSleep(0); /* fudge factor */
267 RTTEST_CHECK_RET(g_hTest, !g_fShutdown, VERR_INTERNAL_ERROR);
268 return VINF_SUCCESS;
269}
270
271
272/**
273 * Worker that starts the threads.
274 *
275 * @returns Same as RTThreadCreate.
276 * @param cThreads The number of threads to start.
277 * @param pfnThread Thread function.
278 */
279static int testStartThreads(uint32_t cThreads, PFNRTTHREAD pfnThread)
280{
281 RTSemEventMultiReset(g_hThreadsStartedEvt);
282
283 for (uint32_t i = 0; i < RT_ELEMENTS(g_ahThreads); i++)
284 g_ahThreads[i] = NIL_RTTHREAD;
285
286 int rc = VINF_SUCCESS;
287 for (uint32_t i = 0; i < cThreads; i++)
288 {
289 rc = RTThreadCreateF(&g_ahThreads[i], pfnThread, (void *)(uintptr_t)i, 0,
290 RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "thread-%02u", i);
291 RTTEST_CHECK_RC_OK(g_hTest, rc);
292 if (RT_FAILURE(rc))
293 break;
294 }
295
296 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiSignal(g_hThreadsStartedEvt), rcCheck);
297 return rc;
298}
299
300
301/**
302 * Worker that waits for the threads to complete.
303 *
304 * @param cMillies How long to wait for each.
305 * @param fStopOnError Whether to stop on error and heed the thread
306 * return status.
307 */
308static void testWaitForThreads(uint32_t cMillies, bool fStopOnError)
309{
310 uint32_t i = RT_ELEMENTS(g_ahThreads);
311 while (i-- > 0)
312 if (g_ahThreads[i] != NIL_RTTHREAD)
313 {
314 int rcThread;
315 int rc2;
316 RTTEST_CHECK_RC_OK(g_hTest, rc2 = RTThreadWait(g_ahThreads[i], cMillies, &rcThread));
317 if (RT_SUCCESS(rc2))
318 g_ahThreads[i] = NIL_RTTHREAD;
319 if (fStopOnError && (RT_FAILURE(rc2) || RT_FAILURE(rcThread)))
320 return;
321 }
322}
323
324
325static void testIt(uint32_t cThreads, uint32_t cSecs, bool fLoops, PFNRTTHREAD pfnThread, const char *pszName)
326{
327 /*
328 * Init test.
329 */
330 if (cSecs > 0)
331 RTTestSubF(g_hTest, "%s, %u threads, %u secs", pszName, cThreads, cSecs);
332 else
333 RTTestSubF(g_hTest, "%s, %u threads, single pass", pszName, cThreads);
334
335 RTTEST_CHECK_RETV(g_hTest, RT_ELEMENTS(g_ahThreads) >= cThreads);
336 RTTEST_CHECK_RETV(g_hTest, RT_ELEMENTS(g_aCritSects) >= cThreads);
337
338 g_cThreads = cThreads;
339 g_fShutdown = false;
340
341 for (uint32_t i = 0; i < cThreads; i++)
342 {
343 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0 /*fFlags*/, NIL_RTLOCKVALCLASS,
344 RTLOCKVAL_SUB_CLASS_ANY, "RTCritSect"), VINF_SUCCESS);
345 RTTEST_CHECK_RC_RETV(g_hTest, RTSemRWCreate(&g_ahSemRWs[i]), VINF_SUCCESS);
346 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexCreate(&g_ahSemMtxes[i]), VINF_SUCCESS);
347 }
348 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventCreate(&g_hSemEvt), VINF_SUCCESS);
349 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hSemEvtMulti), VINF_SUCCESS);
350 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hThreadsStartedEvt), VINF_SUCCESS);
351 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hThreadsBlockingEvt), VINF_SUCCESS);
352
353 /*
354 * The test loop.
355 */
356 uint32_t cPasses = 0;
357 uint32_t cLoops = 0;
358 uint32_t cDeadlocks = 0;
359 uint32_t cErrors = RTTestErrorCount(g_hTest);
360 uint64_t uStartNS = RTTimeNanoTS();
361 g_NanoTSStop = uStartNS + cSecs * UINT64_C(1000000000);
362 do
363 {
364 g_iDeadlockThread = (cThreads - 1 + cPasses) % cThreads;
365 g_cLoops = 0;
366 g_cDeadlocks = 0;
367 g_cThreadsBlocking = 0;
368 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiReset(g_hThreadsBlockingEvt), VINF_SUCCESS);
369
370 int rc = testStartThreads(cThreads, pfnThread);
371 if (RT_SUCCESS(rc))
372 {
373 testWaitForThreads(TEST_LARGE_TIMEOUT + cSecs*1000, true);
374 if (g_fDoNotSpin && RTTestErrorCount(g_hTest) != cErrors)
375 testWaitForThreads(TEST_DEBUG_TIMEOUT, true);
376 }
377
378 RTTEST_CHECK(g_hTest, !fLoops || g_cLoops > 0);
379 cLoops += g_cLoops;
380 RTTEST_CHECK(g_hTest, !fLoops || g_cDeadlocks > 0);
381 cDeadlocks += g_cDeadlocks;
382 cPasses++;
383 } while ( RTTestErrorCount(g_hTest) == cErrors
384 && !fLoops
385 && RTTimeNanoTS() < g_NanoTSStop);
386
387 /*
388 * Cleanup.
389 */
390 ASMAtomicWriteBool(&g_fShutdown, true);
391 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hThreadsBlockingEvt), VINF_SUCCESS);
392 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hThreadsStartedEvt), VINF_SUCCESS);
393 RTThreadSleep(RTTestErrorCount(g_hTest) == cErrors ? 0 : 50);
394
395 for (uint32_t i = 0; i < cThreads; i++)
396 {
397 RTTEST_CHECK_RC(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
398 RTTEST_CHECK_RC(g_hTest, RTSemRWDestroy(g_ahSemRWs[i]), VINF_SUCCESS);
399 RTTEST_CHECK_RC(g_hTest, RTSemMutexDestroy(g_ahSemMtxes[i]), VINF_SUCCESS);
400 }
401 RTTEST_CHECK_RC(g_hTest, RTSemEventDestroy(g_hSemEvt), VINF_SUCCESS);
402 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hSemEvtMulti), VINF_SUCCESS);
403 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hThreadsStartedEvt), VINF_SUCCESS);
404 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hThreadsBlockingEvt), VINF_SUCCESS);
405
406 testWaitForThreads(TEST_SMALL_TIMEOUT, false);
407
408 /*
409 * Print results if applicable.
410 */
411 if (cSecs)
412 {
413 if (fLoops)
414 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cLoops=%u cDeadlocks=%u (%u%%)\n",
415 cLoops, cDeadlocks, cLoops ? cDeadlocks * 100 / cLoops : 0);
416 else
417 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cPasses=%u\n", cPasses);
418 }
419}
420
421
422static DECLCALLBACK(int) testDd1Thread(RTTHREAD ThreadSelf, void *pvUser)
423{
424 uintptr_t i = (uintptr_t)pvUser;
425 PRTCRITSECT pMine = &g_aCritSects[i];
426 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
427
428 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
429 if (!(i & 1))
430 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
431 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
432 {
433 int rc;
434 if (i != g_iDeadlockThread)
435 {
436 testThreadBlocking();
437 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
438 }
439 else
440 {
441 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
442 if (RT_SUCCESS(rc))
443 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VERR_SEM_LV_DEADLOCK);
444 }
445 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
446 if (RT_SUCCESS(rc))
447 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
448 }
449 if (!(i & 1))
450 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
451 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
452 return VINF_SUCCESS;
453}
454
455
456static void testDd1(uint32_t cThreads, uint32_t cSecs)
457{
458 testIt(cThreads, cSecs, false, testDd1Thread, "deadlock, critsect");
459}
460
461
462static DECLCALLBACK(int) testDd2Thread(RTTHREAD ThreadSelf, void *pvUser)
463{
464 uintptr_t i = (uintptr_t)pvUser;
465 RTSEMRW hMine = g_ahSemRWs[i];
466 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
467 int rc;
468
469 if (i & 1)
470 {
471 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
472 if ((i & 3) == 3)
473 RTTEST_CHECK_RC(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS);
474 }
475 else
476 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestRead(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
477 if (RT_SUCCESS(testWaitForSemRWToBeOwned(hNext)))
478 {
479 if (i != g_iDeadlockThread)
480 {
481 testThreadBlocking();
482 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VINF_SUCCESS);
483 }
484 else
485 {
486 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_RW_WRITE, 1));
487 if (RT_SUCCESS(rc))
488 {
489 if (g_cThreads > 1)
490 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_DEADLOCK);
491 else
492 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWRequestWrite(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_ILLEGAL_UPGRADE);
493 }
494 }
495 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
496 if (RT_SUCCESS(rc))
497 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
498 }
499 if (i & 1)
500 {
501 if ((i & 3) == 3)
502 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
503 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
504 }
505 else
506 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(hMine), VINF_SUCCESS);
507 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
508 return VINF_SUCCESS;
509}
510
511
512static void testDd2(uint32_t cThreads, uint32_t cSecs)
513{
514 testIt(cThreads, cSecs, false, testDd2Thread, "deadlock, read-write");
515}
516
517
518static DECLCALLBACK(int) testDd3Thread(RTTHREAD ThreadSelf, void *pvUser)
519{
520 uintptr_t i = (uintptr_t)pvUser;
521 RTSEMRW hMine = g_ahSemRWs[i];
522 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
523 int rc;
524
525 if (i & 1)
526 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestWrite(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
527 else
528 RTTEST_CHECK_RC_RET(g_hTest, RTSemRWRequestRead(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
529 if (RT_SUCCESS(testWaitForSemRWToBeOwned(hNext)))
530 {
531 do
532 {
533 rc = RTSemRWRequestWrite(hNext, TEST_SMALL_TIMEOUT);
534 if (rc != VINF_SUCCESS && rc != VERR_SEM_LV_DEADLOCK && rc != VERR_SEM_LV_ILLEGAL_UPGRADE)
535 {
536 RTTestFailed(g_hTest, "#%u: RTSemRWRequestWrite -> %Rrc\n", i, rc);
537 break;
538 }
539 if (RT_SUCCESS(rc))
540 {
541 RTTEST_CHECK_RC(g_hTest, rc = RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
542 if (RT_FAILURE(rc))
543 break;
544 }
545 else
546 ASMAtomicIncU32(&g_cDeadlocks);
547 ASMAtomicIncU32(&g_cLoops);
548 } while (RTTimeNanoTS() < g_NanoTSStop);
549 }
550 if (i & 1)
551 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseWrite(hMine), VINF_SUCCESS);
552 else
553 RTTEST_CHECK_RC(g_hTest, RTSemRWReleaseRead(hMine), VINF_SUCCESS);
554 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
555 return VINF_SUCCESS;
556}
557
558
559static void testDd3(uint32_t cThreads, uint32_t cSecs)
560{
561 testIt(cThreads, cSecs, true, testDd3Thread, "deadlock, read-write race");
562}
563
564
565static DECLCALLBACK(int) testDd4Thread(RTTHREAD ThreadSelf, void *pvUser)
566{
567 uintptr_t i = (uintptr_t)pvUser;
568 RTSEMRW hMine = g_ahSemRWs[i];
569 RTSEMRW hNext = g_ahSemRWs[(i + 1) % g_cThreads];
570
571 do
572 {
573 int rc1 = (i & 1 ? RTSemRWRequestWrite : RTSemRWRequestRead)(hMine, TEST_SMALL_TIMEOUT); /* ugly ;-) */
574 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
575 if (rc1 != VINF_SUCCESS && rc1 != VERR_SEM_LV_DEADLOCK && rc1 != VERR_SEM_LV_ILLEGAL_UPGRADE)
576 {
577 RTTestFailed(g_hTest, "#%u: RTSemRWRequest%s(hMine,) -> %Rrc\n", i, i & 1 ? "Write" : "read", rc1);
578 break;
579 }
580 if (RT_SUCCESS(rc1))
581 {
582 for (unsigned iInner = 0; iInner < 4; iInner++)
583 {
584 int rc2 = RTSemRWRequestWrite(hNext, TEST_SMALL_TIMEOUT);
585 if (rc2 != VINF_SUCCESS && rc2 != VERR_SEM_LV_DEADLOCK && rc2 != VERR_SEM_LV_ILLEGAL_UPGRADE)
586 {
587 RTTestFailed(g_hTest, "#%u: RTSemRWRequestWrite -> %Rrc\n", i, rc2);
588 break;
589 }
590 if (RT_SUCCESS(rc2))
591 {
592 RTTEST_CHECK_RC(g_hTest, rc2 = RTSemRWReleaseWrite(hNext), VINF_SUCCESS);
593 if (RT_FAILURE(rc2))
594 break;
595 }
596 else
597 ASMAtomicIncU32(&g_cDeadlocks);
598 ASMAtomicIncU32(&g_cLoops);
599 }
600
601 RTTEST_CHECK_RC(g_hTest, rc1 = (i & 1 ? RTSemRWReleaseWrite : RTSemRWReleaseRead)(hMine), VINF_SUCCESS);
602 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
603 if (RT_FAILURE(rc1))
604 break;
605 }
606 else
607 ASMAtomicIncU32(&g_cDeadlocks);
608 ASMAtomicIncU32(&g_cLoops);
609 } while (RTTimeNanoTS() < g_NanoTSStop);
610
611 return VINF_SUCCESS;
612}
613
614
615static void testDd4(uint32_t cThreads, uint32_t cSecs)
616{
617 testIt(cThreads, cSecs, true, testDd4Thread, "deadlock, read-write race v2");
618}
619
620
621static DECLCALLBACK(int) testDd5Thread(RTTHREAD ThreadSelf, void *pvUser)
622{
623 uintptr_t i = (uintptr_t)pvUser;
624 RTSEMMUTEX hMine = g_ahSemMtxes[i];
625 RTSEMMUTEX hNext = g_ahSemMtxes[(i + 1) % g_cThreads];
626
627 RTTEST_CHECK_RC_RET(g_hTest, RTSemMutexRequest(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS, rcCheck);
628 if (i & 1)
629 RTTEST_CHECK_RC(g_hTest, RTSemMutexRequest(hMine, RT_INDEFINITE_WAIT), VINF_SUCCESS);
630 if (RT_SUCCESS(testWaitForSemMutexToBeOwned(hNext)))
631 {
632 int rc;
633 if (i != g_iDeadlockThread)
634 {
635 testThreadBlocking();
636 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(hNext, RT_INDEFINITE_WAIT), VINF_SUCCESS);
637 }
638 else
639 {
640 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_MUTEX, 1));
641 if (RT_SUCCESS(rc))
642 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRequest(hNext, RT_INDEFINITE_WAIT), VERR_SEM_LV_DEADLOCK);
643 }
644 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
645 if (RT_SUCCESS(rc))
646 RTTEST_CHECK_RC(g_hTest, rc = RTSemMutexRelease(hNext), VINF_SUCCESS);
647 }
648 if (i & 1)
649 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(hMine), VINF_SUCCESS);
650 RTTEST_CHECK_RC(g_hTest, RTSemMutexRelease(hMine), VINF_SUCCESS);
651 return VINF_SUCCESS;
652}
653
654
655static void testDd5(uint32_t cThreads, uint32_t cSecs)
656{
657 testIt(cThreads, cSecs, false, testDd5Thread, "deadlock, mutex");
658}
659
660
661static DECLCALLBACK(int) testDd6Thread(RTTHREAD ThreadSelf, void *pvUser)
662{
663 uintptr_t i = (uintptr_t)pvUser;
664 PRTCRITSECT pMine = &g_aCritSects[i];
665 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
666
667 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
668 if (i & 1)
669 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
670 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
671 {
672 int rc;
673 if (i != g_iDeadlockThread)
674 {
675 testThreadBlocking();
676 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
677 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
678 if (RT_SUCCESS(rc))
679 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
680 }
681 else
682 {
683 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
684 if (RT_SUCCESS(rc))
685 {
686 RTSemEventSetSignaller(g_hSemEvt, g_ahThreads[0]);
687 for (uint32_t iThread = 1; iThread < g_cThreads; iThread++)
688 RTSemEventAddSignaller(g_hSemEvt, g_ahThreads[iThread]);
689 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
690 RTTEST_CHECK_RC(g_hTest, RTSemEventWait(g_hSemEvt, TEST_SMALL_TIMEOUT), VERR_SEM_LV_DEADLOCK);
691 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
692 RTTEST_CHECK_RC(g_hTest, RTSemEventSignal(g_hSemEvt), VINF_SUCCESS);
693 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
694 RTTEST_CHECK_RC(g_hTest, RTSemEventWait(g_hSemEvt, TEST_SMALL_TIMEOUT), VINF_SUCCESS);
695 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
696 RTSemEventSetSignaller(g_hSemEvt, NIL_RTTHREAD);
697 }
698 }
699 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
700 }
701 if (i & 1)
702 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
703 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
704 return VINF_SUCCESS;
705}
706
707
708static void testDd6(uint32_t cThreads, uint32_t cSecs)
709{
710 testIt(cThreads, cSecs, false, testDd6Thread, "deadlock, event");
711}
712
713
714static DECLCALLBACK(int) testDd7Thread(RTTHREAD ThreadSelf, void *pvUser)
715{
716 uintptr_t i = (uintptr_t)pvUser;
717 PRTCRITSECT pMine = &g_aCritSects[i];
718 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads];
719
720 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck);
721 if (i & 1)
722 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS);
723 if (RT_SUCCESS(testWaitForCritSectToBeOwned(pNext)))
724 {
725 int rc;
726 if (i != g_iDeadlockThread)
727 {
728 testThreadBlocking();
729 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS);
730 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
731 if (RT_SUCCESS(rc))
732 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS);
733 }
734 else
735 {
736 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1));
737 if (RT_SUCCESS(rc))
738 {
739 RTSemEventMultiSetSignaller(g_hSemEvtMulti, g_ahThreads[0]);
740 for (uint32_t iThread = 1; iThread < g_cThreads; iThread++)
741 RTSemEventMultiAddSignaller(g_hSemEvtMulti, g_ahThreads[iThread]);
742 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
743 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiReset(g_hSemEvtMulti), VINF_SUCCESS);
744 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiWait(g_hSemEvtMulti, TEST_SMALL_TIMEOUT), VERR_SEM_LV_DEADLOCK);
745 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
746 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiSignal(g_hSemEvtMulti), VINF_SUCCESS);
747 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
748 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiWait(g_hSemEvtMulti, TEST_SMALL_TIMEOUT), VINF_SUCCESS);
749 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
750 RTSemEventMultiSetSignaller(g_hSemEvtMulti, NIL_RTTHREAD);
751 }
752 }
753 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);
754 }
755 if (i & 1)
756 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
757 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS);
758 return VINF_SUCCESS;
759}
760
761
762static void testDd7(uint32_t cThreads, uint32_t cSecs)
763{
764 testIt(cThreads, cSecs, false, testDd7Thread, "deadlock, event multi");
765}
766
767
768static void testLo1(void)
769{
770 RTTestSub(g_hTest, "locking order basics");
771
772 /* Initialize the critsections, the first 4 has their own classes, the rest
773 use the same class and relies on the sub-class mechanism for ordering. */
774 for (unsigned i = 0; i < RT_ELEMENTS(g_ahClasses); i++)
775 {
776 if (i <= 3)
777 {
778 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo1-%u", i), VINF_SUCCESS);
779 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO-Auto"), VINF_SUCCESS);
780 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
781 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
782 }
783 else
784 {
785 g_ahClasses[i] = RTLockValidatorClassForSrcPos(RT_SRC_POS);
786 RTTEST_CHECK_RETV(g_hTest, g_ahClasses[i] != NIL_RTLOCKVALCLASS);
787 RTTEST_CHECK_RETV(g_hTest, i == 4 || g_ahClasses[i] == g_ahClasses[i - 1]);
788 if (i == 4)
789 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO-None"), VINF_SUCCESS);
790 else if (i == 5)
791 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_ANY, "RTCritSectLO-Any"), VINF_SUCCESS);
792 else
793 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_USER + i, "RTCritSectLO-User"), VINF_SUCCESS);
794
795 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 1 + (i - 4 + 1) * 2); /* released in cleanup. */
796 }
797 }
798
799 /* Enter the first 4 critsects in ascending order and thereby definining
800 this as a valid lock order. */
801 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
802 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
803 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
804 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
805
806 /* Now, leave and re-enter the critsects in a way that should break the
807 order and check that we get the appropriate response. */
808 int rc;
809 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
810 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VERR_SEM_LV_WRONG_ORDER);
811 if (RT_SUCCESS(rc))
812 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
813
814 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
815 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[1]), VERR_SEM_LV_WRONG_ORDER);
816 if (RT_SUCCESS(rc))
817 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
818
819 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
820 RTTEST_CHECK_RC(g_hTest, rc= RTCritSectEnter(&g_aCritSects[2]), VERR_SEM_LV_WRONG_ORDER);
821 if (RT_SUCCESS(rc))
822 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
823
824 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
825
826 /* Check that recursion isn't subject to order checks. */
827 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
828 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
829 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
830 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
831 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
832 if (RT_SUCCESS(rc))
833 {
834 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
835 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
836 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
837 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
838
839 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
840 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
841 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
842 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
843 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
844 }
845 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
846 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
847 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
848 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
849
850 /* Enable strict release order for class 2 and check that violations
851 are caught. */
852 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
853
854 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
855 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
856 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
857 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
858
859 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
860 if (RT_FAILURE(rc))
861 {
862 /* applies to recursions as well */
863 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
864 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
865 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
866 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
867 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
868 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
869 }
870 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
871 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
872 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
873 if (RT_FAILURE(rc))
874 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
875
876 /* Test that sub-class order works (4 = NONE, 5 = ANY, 6+ = USER). */
877 uint32_t cErrorsBefore = RTTestErrorCount(g_hTest);
878 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[7]), VINF_SUCCESS);
879
880 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[4]), VERR_SEM_LV_WRONG_ORDER);
881 if (RT_SUCCESS(rc))
882 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[4]), VINF_SUCCESS);
883
884 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
885 if (RT_SUCCESS(rc))
886 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
887
888 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[8]), VINF_SUCCESS);
889 if (RT_SUCCESS(rc))
890 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[8]), VINF_SUCCESS);
891
892 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[6]), VERR_SEM_LV_WRONG_ORDER);
893 if (RT_SUCCESS(rc))
894 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[6]), VINF_SUCCESS);
895
896 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[7]), VINF_SUCCESS);
897 if (RT_SUCCESS(rc))
898 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[7]), VINF_SUCCESS);
899 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[7]), VINF_SUCCESS);
900
901 /* Check that NONE trumps both ANY and USER. */
902 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[4]), VINF_SUCCESS);
903
904 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[5]), VERR_SEM_LV_WRONG_ORDER);
905 if (RT_SUCCESS(rc))
906 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
907
908 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[6]), VERR_SEM_LV_WRONG_ORDER);
909 if (RT_SUCCESS(rc))
910 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[6]), VINF_SUCCESS);
911
912 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[4]), VINF_SUCCESS);
913
914 /* Take all the locks using sub-classes. */
915 if (cErrorsBefore == RTTestErrorCount(g_hTest))
916 {
917 bool fSavedQuiet = RTLockValidatorSetQuiet(true);
918 for (uint32_t i = 6; i < RT_ELEMENTS(g_aCritSects); i++)
919 {
920 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[i]), VINF_SUCCESS);
921 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[4]), VERR_SEM_LV_WRONG_ORDER);
922 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
923 }
924 for (uint32_t i = 6; i < RT_ELEMENTS(g_aCritSects); i++)
925 {
926 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[i]), VINF_SUCCESS);
927 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
928 }
929 RTLockValidatorSetQuiet(fSavedQuiet);
930 }
931
932 /* Work up some hash statistics and trigger a violation to show them. */
933 for (uint32_t i = 0; i < 10240; i++)
934 {
935 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
936 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
937 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
938 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
939 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
940
941 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
942 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
943 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
944 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
945 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
946 }
947 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[5]), VINF_SUCCESS);
948 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VERR_SEM_LV_WRONG_ORDER);
949 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[5]), VINF_SUCCESS);
950
951 /* clean up */
952 for (unsigned i = 0; i < RT_ELEMENTS(g_ahClasses); i++)
953 {
954 if (i <= 3)
955 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 1);
956 else
957 {
958 uint32_t cExpect = 1 + (RT_ELEMENTS(g_ahClasses) - i) * 2 - 1;
959 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == cExpect);
960 }
961 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
962 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
963 }
964}
965
966
967static void testLo2(void)
968{
969 RTTestSub(g_hTest, "locking order, critsect");
970
971 /* Initialize the critsection with all different classes */
972 for (unsigned i = 0; i < 4; i++)
973 {
974 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo2-%u", i), VINF_SUCCESS);
975 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO"), VINF_SUCCESS);
976 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 3);
977 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 2);
978 }
979
980 /* Enter the first 4 critsects in ascending order and thereby definining
981 this as a valid lock order. */
982 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
983 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
984 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
985 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
986
987 /* Now, leave and re-enter the critsects in a way that should break the
988 order and check that we get the appropriate response. */
989 int rc;
990 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
991 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VERR_SEM_LV_WRONG_ORDER);
992 if (RT_SUCCESS(rc))
993 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
994
995 /* Check that recursion isn't subject to order checks. */
996 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
997 if (RT_SUCCESS(rc))
998 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
999
1000 /* Enable strict release order for class 2 and check that violations
1001 are caught - including recursion. */
1002 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1003 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS); /* start recursion */
1004 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
1005 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1006 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
1007 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS); /* end recursion */
1008 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1009 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
1010 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
1011 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
1012
1013 /* clean up */
1014 for (unsigned i = 0; i < 4; i++)
1015 {
1016 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 1);
1017 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1018 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
1019 }
1020}
1021
1022
1023static void testLo3(void)
1024{
1025 RTTestSub(g_hTest, "locking order, read-write");
1026
1027 /* Initialize the critsection with all different classes */
1028 for (unsigned i = 0; i < 4; i++)
1029 {
1030 RTTEST_CHECK_RC_RETV(g_hTest, RTLockValidatorClassCreate(&g_ahClasses[i], true /*fAutodidact*/, RT_SRC_POS, "testLo3-%u", i), VINF_SUCCESS);
1031 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectInitEx(&g_aCritSects[i], 0, g_ahClasses[i], RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectLO"), VINF_SUCCESS);
1032 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRetain(g_ahClasses[i]) == 4);
1033 RTTEST_CHECK_RETV(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 3);
1034 }
1035
1036 /* Enter the first 4 critsects in ascending order and thereby definining
1037 this as a valid lock order. */
1038 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[0]), VINF_SUCCESS);
1039 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
1040 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS);
1041 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
1042
1043 /* Now, leave and re-enter the critsects in a way that should break the
1044 order and check that we get the appropriate response. */
1045 int rc;
1046 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
1047 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[0]), VERR_SEM_LV_WRONG_ORDER);
1048 if (RT_SUCCESS(rc))
1049 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[0]), VINF_SUCCESS);
1050
1051 /* Check that recursion isn't subject to order checks. */
1052 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(&g_aCritSects[1]), VINF_SUCCESS);
1053 if (RT_SUCCESS(rc))
1054 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
1055
1056 /* Enable strict release order for class 2 and check that violations
1057 are caught - including recursion. */
1058 RTTEST_CHECK_RC(g_hTest, RTLockValidatorClassEnforceStrictReleaseOrder(g_ahClasses[2], true), VINF_SUCCESS);
1059 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[2]), VINF_SUCCESS); /* start recursion */
1060 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(&g_aCritSects[3]), VINF_SUCCESS);
1061 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1062 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
1063 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS); /* end recursion */
1064 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VERR_SEM_LV_WRONG_RELEASE_ORDER);
1065 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[1]), VINF_SUCCESS);
1066 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[3]), VINF_SUCCESS);
1067 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(&g_aCritSects[2]), VINF_SUCCESS);
1068
1069 /* clean up */
1070 for (unsigned i = 0; i < 4; i++)
1071 {
1072 RTTEST_CHECK(g_hTest, RTLockValidatorClassRelease(g_ahClasses[i]) == 1);
1073 g_ahClasses[i] = NIL_RTLOCKVALCLASS;
1074 RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectDelete(&g_aCritSects[i]), VINF_SUCCESS);
1075 }
1076}
1077
1078
1079static bool testIsLockValidationCompiledIn(void)
1080{
1081 RTCRITSECT CritSect;
1082 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectInit(&CritSect), false);
1083 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectEnter(&CritSect), false);
1084 bool fRet = CritSect.pValidatorRec
1085 && CritSect.pValidatorRec->hThread == RTThreadSelf();
1086 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectLeave(&CritSect), false);
1087 RTTEST_CHECK_RC_OK_RET(g_hTest, RTCritSectDelete(&CritSect), false);
1088
1089 RTSEMRW hSemRW;
1090 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWCreate(&hSemRW), false);
1091 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWRequestRead(hSemRW, 50), false);
1092 int rc = RTSemRWRequestWrite(hSemRW, 1);
1093 if (rc != VERR_SEM_LV_ILLEGAL_UPGRADE)
1094 fRet = false;
1095 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), false);
1096 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWReleaseRead(hSemRW), false);
1097 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWDestroy(hSemRW), false);
1098
1099#if 0 /** @todo detect it on RTSemMutex... */
1100 RTSEMMUTEX hSemMtx;
1101 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexCreate(&hSemRW), false);
1102 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRequest(hSemRW, 50), false);
1103 /*??*/
1104 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), false);
1105 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWRelease(hSemRW), false);
1106 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWDestroy(hSemRW), false);
1107#endif
1108
1109 RTSEMEVENT hSemEvt;
1110 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventCreate(&hSemEvt), false);
1111 RTSemEventSetSignaller(hSemEvt, RTThreadSelf());
1112 RTSemEventSetSignaller(hSemEvt, NIL_RTTHREAD);
1113 rc = RTSemEventSignal(hSemEvt);
1114 if (rc != VERR_SEM_LV_NOT_SIGNALLER)
1115 fRet = false;
1116 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), false);
1117 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventDestroy(hSemEvt), false);
1118
1119 RTSEMEVENTMULTI hSemEvtMulti;
1120 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiCreate(&hSemEvtMulti), false);
1121 RTSemEventMultiSetSignaller(hSemEvtMulti, RTThreadSelf());
1122 RTSemEventMultiSetSignaller(hSemEvtMulti, NIL_RTTHREAD);
1123 rc = RTSemEventMultiSignal(hSemEvtMulti);
1124 if (rc != VERR_SEM_LV_NOT_SIGNALLER)
1125 fRet = false;
1126 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), false);
1127 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiDestroy(hSemEvtMulti), false);
1128
1129 return fRet;
1130}
1131
1132
1133int main()
1134{
1135 /*
1136 * Init.
1137 */
1138 int rc = RTTestInitAndCreate("tstRTLockValidator", &g_hTest);
1139 if (rc)
1140 return rc;
1141 RTTestBanner(g_hTest);
1142
1143 RTLockValidatorSetEnabled(true);
1144 RTLockValidatorSetMayPanic(false);
1145 RTLockValidatorSetQuiet(true);
1146 if (!testIsLockValidationCompiledIn())
1147 return RTTestErrorCount(g_hTest) > 0
1148 ? RTTestSummaryAndDestroy(g_hTest)
1149 : RTTestSkipAndDestroy(g_hTest, "deadlock detection is not compiled in");
1150 RTLockValidatorSetQuiet(false);
1151
1152 bool fTestDd = false;//true;
1153 bool fTestLo = true;
1154
1155 /*
1156 * Some initial tests with verbose output (all single pass).
1157 */
1158 if (fTestDd)
1159 {
1160 testDd1(3, 0);
1161 testDd2(1, 0);
1162 testDd2(3, 0);
1163 testDd5(3, 0);
1164 testDd6(3, 0);
1165 testDd7(3, 0);
1166 }
1167 if (fTestLo)
1168 {
1169 testLo1();
1170 testLo2();
1171 }
1172
1173
1174 /*
1175 * If successful, perform more thorough testing without noisy output.
1176 */
1177 if (RTTestErrorCount(g_hTest) == 0)
1178 {
1179 RTLockValidatorSetQuiet(true);
1180
1181 if (fTestDd)
1182 {
1183 testDd1( 2, SECS_SIMPLE_TEST);
1184 testDd1( 3, SECS_SIMPLE_TEST);
1185 testDd1( 7, SECS_SIMPLE_TEST);
1186 testDd1(10, SECS_SIMPLE_TEST);
1187 testDd1(15, SECS_SIMPLE_TEST);
1188 testDd1(30, SECS_SIMPLE_TEST);
1189
1190 testDd2( 1, SECS_SIMPLE_TEST);
1191 testDd2( 2, SECS_SIMPLE_TEST);
1192 testDd2( 3, SECS_SIMPLE_TEST);
1193 testDd2( 7, SECS_SIMPLE_TEST);
1194 testDd2(10, SECS_SIMPLE_TEST);
1195 testDd2(15, SECS_SIMPLE_TEST);
1196 testDd2(30, SECS_SIMPLE_TEST);
1197
1198 testDd3( 2, SECS_SIMPLE_TEST);
1199 testDd3(10, SECS_SIMPLE_TEST);
1200
1201 testDd4( 2, SECS_RACE_TEST);
1202 testDd4( 6, SECS_RACE_TEST);
1203 testDd4(10, SECS_RACE_TEST);
1204 testDd4(30, SECS_RACE_TEST);
1205
1206 testDd5( 2, SECS_RACE_TEST);
1207 testDd5( 3, SECS_RACE_TEST);
1208 testDd5( 7, SECS_RACE_TEST);
1209 testDd5(10, SECS_RACE_TEST);
1210 testDd5(15, SECS_RACE_TEST);
1211 testDd5(30, SECS_RACE_TEST);
1212
1213 testDd6( 2, SECS_SIMPLE_TEST);
1214 testDd6( 3, SECS_SIMPLE_TEST);
1215 testDd6( 7, SECS_SIMPLE_TEST);
1216 testDd6(10, SECS_SIMPLE_TEST);
1217 testDd6(15, SECS_SIMPLE_TEST);
1218 testDd6(30, SECS_SIMPLE_TEST);
1219
1220 testDd7( 2, SECS_SIMPLE_TEST);
1221 testDd7( 3, SECS_SIMPLE_TEST);
1222 testDd7( 7, SECS_SIMPLE_TEST);
1223 testDd7(10, SECS_SIMPLE_TEST);
1224 testDd7(15, SECS_SIMPLE_TEST);
1225 testDd7(30, SECS_SIMPLE_TEST);
1226 }
1227 }
1228
1229 return RTTestSummaryAndDestroy(g_hTest);
1230}
1231
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette