Changeset 25638 in vbox
- Timestamp:
- Jan 4, 2010 4:08:04 PM (15 years ago)
- Location:
- trunk
- Files:
-
- 2 added
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/iprt/err.h
r25614 r25638 860 860 /** An illegal lock upgrade was attempted. */ 861 861 #define VERR_SEM_LV_ILLEGAL_UPGRADE (-375) 862 /** The thread is not a valid signaller of the event. */ 863 #define VERR_SEM_LV_NOT_SIGNALLER (-376) 862 864 /** @} */ 863 865 -
trunk/include/iprt/lockvalidator.h
r25622 r25638 220 220 /** Whether it's enabled or not. */ 221 221 bool fEnabled; 222 /** Set if event semaphore signaller, clear if read-write semaphore. */ 223 bool fSignaller; 222 224 /** Alignment padding. */ 223 bool afPadding[2];225 bool fPadding; 224 226 /** Pointer to a table containing pointers to records of all the owners. */ 225 227 R3R0PTRTYPE(PRTLOCKVALRECSHRDOWN volatile *) papOwners; … … 452 454 * @param fRecursiveOk Whether it's ok to recurse. 453 455 * @param enmSleepState The sleep state to enter on successful return. 456 * @param fReallySleeping Is it really going to sleep now or not. Use 457 * false before calls to other IPRT synchronization 458 * methods. 454 459 */ 455 460 RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf, 456 461 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, 457 RTTHREADSTATE enmSleepState );462 RTTHREADSTATE enmSleepState, bool fReallySleeping); 458 463 459 464 /** … … 467 472 * @param fRecursiveOk Whether it's ok to recurse. 468 473 * @param enmSleepState The sleep state to enter on successful return. 474 * @param fReallySleeping Is it really going to sleep now or not. Use 475 * false before calls to other IPRT synchronization 476 * methods. 469 477 */ 470 478 RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf, 471 479 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, 472 RTTHREADSTATE enmSleepState );480 RTTHREADSTATE enmSleepState, bool fReallySleeping); 473 481 474 482 /** … … 485 493 * @param pszName The lock name (optional). 486 494 * @param hLock The lock handle. 487 */ 488 RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALIDATORCLASS hClass, 489 uint32_t uSubClass, const char *pszName, void *hLock); 495 * @param fSignaller Set if event semaphore signaller logic should be 496 * applied to this record, clear if read-write 497 * semaphore logic should be used. 498 */ 499 RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALIDATORCLASS hClass, uint32_t uSubClass, 500 const char *pszName, void *hLock, bool fSignaller); 490 501 /** 491 502 * Uninitialize a lock validator record previously initialized by … … 532 543 * @param fRecursiveOk Whether it's ok to recurse. 533 544 * @param enmSleepState The sleep state to enter on successful return. 545 * @param fReallySleeping Is it really going to sleep now or not. Use 546 * false before calls to other IPRT synchronization 547 * methods. 534 548 */ 535 549 RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, 536 550 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, 537 RTTHREADSTATE enmSleepState );551 RTTHREADSTATE enmSleepState, bool fReallySleeping); 538 552 539 553 /** … … 546 560 * @param pSrcPos The source position of the lock operation. 547 561 * @param fRecursiveOk Whether it's ok to recurse. 562 * @param enmSleepState The sleep state to enter on successful return. 563 * @param fReallySleeping Is it really going to sleep now or not. Use 564 * false before calls to other IPRT synchronization 565 * methods. 548 566 */ 549 567 RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, 550 568 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, 551 RTTHREADSTATE enmSleepState); 569 RTTHREADSTATE enmSleepState, bool fReallySleeping); 570 571 /** 572 * Removes all current owners and makes hThread the only owner. 573 * 574 * @param pRead The validator record. 575 * @param hThread The thread handle of the owner. NIL_RTTHREAD is 576 * an alias for the current thread. 577 * @param pSrcPos The source position of the lock operation. 578 */ 579 RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos); 552 580 553 581 /** … … 558 586 * 559 587 * @param pRead The validator record. 560 * @param hThreadSelf The calling thread and owner. 561 * @param pSrcPos The source position of the lock operation. 562 */ 563 RTDECL(void) RTLockValidatorSharedRecAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, PCRTLOCKVALSRCPOS pSrcPos); 588 * @param hThread The thread handle of the owner. NIL_RTTHREAD is 589 * an alias for the current thread. 590 * @param pSrcPos The source position of the lock operation. 591 */ 592 RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos); 564 593 565 594 /** … … 570 599 * 571 600 * @param pRec The validator record. 572 * @param hThreadSelf The calling thread and the owner to remove. 573 */ 574 RTDECL(void) RTLockValidatorSharedRecRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf); 601 * @param hThread The thread handle of the owner. NIL_RTTHREAD is 602 * an alias for the current thread. 603 */ 604 RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread); 575 605 576 606 /** … … 584 614 * @retval VERR_SEM_LV_INVALID_PARAMETER if the input is invalid. 585 615 * 586 * @param pRe adThe validator record.587 * @param hThreadSelf The handle of the calling thread. If not known,588 * pass NIL_RTTHREAD and we'll figure it out.616 * @param pRec The validator record. 617 * @param hThreadSelf The handle of the calling thread. NIL_RTTHREAD 618 * is an alias for the current thread. 589 619 */ 590 620 RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf); 621 622 /** 623 * Check the signaller of an event. 624 * 625 * This is called by routines implementing releasing the event sempahore (both 626 * kinds). 627 * 628 * @retval VINF_SUCCESS on success. 629 * @retval VERR_SEM_LV_NOT_SIGNALLER if the thread is not in the record. Will 630 * have done all necessary whining and breakpointing before returning. 631 * @retval VERR_SEM_LV_INVALID_PARAMETER if the input is invalid. 632 * 633 * @param pRec The validator record. 634 * @param hThreadSelf The handle of the calling thread. NIL_RTTHREAD 635 * is an alias for the current thread. 636 */ 637 RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf); 591 638 592 639 /** -
trunk/include/iprt/semaphore.h
r25624 r25638 109 109 */ 110 110 RTDECL(int) RTSemEventWaitNoResume(RTSEMEVENT EventSem, unsigned cMillies); 111 112 /** 113 * Sets the signaller thread to one specific thread. 114 * 115 * This is only used for validating usage and deadlock detection. When used 116 * after calls to RTSemEventAddSignaller, the specified thread will be the only 117 * signalling thread. 118 * 119 * @param hEventSem The event semaphore. 120 * @param hThread The thread that will signal it. Pass 121 * NIL_RTTHREAD to indicate that there is no 122 * special signalling thread. 123 */ 124 RTDECL(void) RTSemEventSetSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread); 125 126 /** 127 * To add more signalling threads. 128 * 129 * First call RTSemEventSetSignaller then add further threads with this. 130 * 131 * @param hEventSem The event semaphore. 132 * @param hThread The thread that will signal it. NIL_RTTHREAD is 133 * not accepted. 134 */ 135 RTDECL(void) RTSemEventAddSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread); 136 137 /** 138 * To remove a signalling thread. 139 * 140 * Reverts work done by RTSemEventAddSignaller and RTSemEventSetSignaller. 141 * 142 * @param hEventSem The event semaphore. 143 * @param hThread A previously added thread. 144 */ 145 RTDECL(void) RTSemEventRemoverSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread); 111 146 112 147 /** @} */ -
trunk/include/iprt/thread.h
r25598 r25638 586 586 * @param hThread The current thread. 587 587 * @param enmState The sleep state. 588 */ 589 RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState); 588 * @param fReallySleeping Really going to sleep now. Use false before calls 589 * to other IPRT synchronization methods. 590 */ 591 RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, bool fReallySleeping); 590 592 591 593 /** 592 594 * Get the current thread state. 595 * 596 * A thread that is reported as sleeping may actually still be running inside 597 * the lock validator or/and in the code of some other IPRT synchronization 598 * primitive. Use RTThreadGetReallySleeping 593 599 * 594 600 * @returns The thread state. … … 598 604 599 605 /** 606 * Checks if the thread is really sleeping or not. 607 * 608 * @returns RTTHREADSTATE_RUNNING if not really sleeping, otherwise the state it 609 * is sleeping in. 610 * @param hThread The thread. 611 */ 612 RTDECL(RTTHREADSTATE) RTThreadGetReallySleeping(RTTHREAD hThread); 613 614 /** 600 615 * Translate a thread state into a string. 601 616 * … … 604 619 */ 605 620 RTDECL(const char *) RTThreadStateName(RTTHREADSTATE enmState); 621 622 623 /** 624 * Native thread states returned by RTThreadNativeState. 625 */ 626 typedef enum RTTHREADNATIVESTATE 627 { 628 /** Invalid thread handle. */ 629 RTTHREADNATIVESTATE_INVALID = 0, 630 /** Unable to determine the thread state. */ 631 RTTHREADNATIVESTATE_UNKNOWN, 632 /** The thread is running. */ 633 RTTHREADNATIVESTATE_RUNNING, 634 /** The thread is blocked. */ 635 RTTHREADNATIVESTATE_BLOCKED, 636 /** The thread is suspended / stopped. */ 637 RTTHREADNATIVESTATE_SUSPENDED, 638 /** The thread has terminated. */ 639 RTTHREADNATIVESTATE_TERMINATED, 640 /** Make sure it's a 32-bit type. */ 641 RTTHREADNATIVESTATE_32BIT_HACK = 0x7fffffff 642 } RTTHREADNATIVESTATE; 643 644 645 /** 646 * Get the native state of a thread. 647 * 648 * @returns Native state. 649 * @param hThread The thread handle. 650 * 651 * @remarks Not yet implemented on all systems, so have a backup plan for 652 * RTTHREADNATIVESTATE_UNKNOWN. 653 */ 654 RTDECL(RTTHREADNATIVESTATE) RTThreadGetNativeState(RTTHREAD hThread); 606 655 607 656 -
trunk/src/VBox/Runtime/Makefile.kmk
r25536 r25638 413 413 generic/uuid-generic.cpp \ 414 414 generic/RTProcIsRunningByName-generic.cpp \ 415 generic/RTThreadGetNativeState-generic.cpp \ 415 416 nt/RTErrConvertFromNtStatus.cpp \ 416 417 r3/posix/env-posix.cpp \ … … 457 458 generic/utf16locale-generic.cpp \ 458 459 generic/uuid-generic.cpp \ 460 r3/linux/RTThreadGetNativeState-linux.cpp \ 459 461 r3/linux/mp-linux.cpp \ 460 462 r3/linux/rtProcInitExePath-linux.cpp \ … … 527 529 generic/RTMpGetMaxFrequency-generic.cpp \ 528 530 generic/RTProcIsRunningByName-generic.cpp \ 531 generic/RTThreadGetNativeState-generic.cpp \ 529 532 os2/RTErrConvertFromOS2.cpp \ 530 533 r3/os2/filelock-os2.cpp \ … … 568 571 generic/uuid-generic.cpp\ 569 572 generic/RTProcIsRunningByName-generic.cpp \ 573 generic/RTThreadGetNativeState-generic.cpp \ 570 574 r3/darwin/alloc-darwin.cpp \ 571 575 r3/darwin/filelock-darwin.cpp \ … … 615 619 generic/RTMpIsCpuOnline-generic.cpp \ 616 620 generic/RTProcIsRunningByName-generic.cpp \ 621 generic/RTThreadGetNativeState-generic.cpp \ 617 622 r3/freebsd/mp-freebsd.cpp \ 618 623 r3/freebsd/alloc-freebsd.cpp \ … … 655 660 generic/uuid-generic.cpp \ 656 661 generic/RTProcIsRunningByName-generic.cpp \ 662 generic/RTThreadGetNativeState-generic.cpp \ 657 663 r3/posix/RTFileQueryFsSizes-posix.cpp \ 658 664 r3/posix/RTSystemQueryOSInfo-posix.cpp \ … … 716 722 generic/uuid-generic.cpp \ 717 723 generic/RTProcIsRunningByName-generic.cpp \ 724 generic/RTThreadGetNativeState-generic.cpp \ 718 725 l4/l4-errno.cpp \ 719 726 l4/rtProcInitExePath-l4.cpp \ -
trunk/src/VBox/Runtime/common/misc/lockvalidator.cpp
r25625 r25638 274 274 if (!ASMAtomicUoReadBool(&g_fLockValidatorQuiet)) 275 275 { 276 ASMCompilerBarrier(); /* paranoia */ 276 277 RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL); 277 278 if (pSrcPos && pSrcPos->uId) … … 417 418 418 419 /** 420 * Checks if all owners are blocked - shared record operated in signaller mode. 421 * 422 * @returns true / false accordingly. 423 * @param pRec The record. 424 * @param pThreadSelf The current thread. 425 */ 426 DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf) 427 { 428 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->papOwners; 429 uint32_t cAllocated = pRec->cAllocated; 430 uint32_t cEntries = ASMAtomicUoReadU32(&pRec->cEntries); 431 if (cEntries == 0) 432 return false; 433 434 for (uint32_t i = 0; i < cAllocated; i++) 435 { 436 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorUoReadSharedOwner(&papOwners[i]); 437 if ( pEntry 438 && pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC) 439 { 440 PRTTHREADINT pCurThread = rtLockValidatorReadThreadHandle(&pEntry->hThread); 441 if (!pCurThread) 442 return false; 443 if (pCurThread->u32Magic != RTTHREADINT_MAGIC) 444 return false; 445 if ( !RTTHREAD_IS_SLEEPING(rtThreadGetState(pCurThread)) 446 && pCurThread != pThreadSelf) 447 return false; 448 if (--cEntries == 0) 449 break; 450 } 451 else 452 Assert(!pEntry || pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC_DEAD); 453 } 454 455 return true; 456 } 457 458 459 460 461 /** 419 462 * Verifies the deadlock stack before calling it a deadlock. 420 463 * … … 424 467 * 425 468 * @param pStack The deadlock detection stack. 426 */ 427 static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack) 469 * @param pThreadSelf The current thread. 470 */ 471 static int rtLockValidatorDdVerifyDeadlock(PRTLOCKVALDDSTACK pStack, PRTTHREADINT pThreadSelf) 428 472 { 429 473 uint32_t const c = pStack->c; … … 438 482 return VERR_TRY_AGAIN; 439 483 if (rtLockValidatorReadRecUnionPtr(&pThread->LockValidator.pRec) != pStack->a[i].pFirstSibling) 484 return VERR_TRY_AGAIN; 485 /* ASSUMES the signaller records won't have siblings! */ 486 PRTLOCKVALRECUNION pRec = pStack->a[i].pRec; 487 if ( pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC 488 && pRec->Shared.fSignaller 489 && !rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf)) 440 490 return VERR_TRY_AGAIN; 441 491 } … … 532 582 533 583 case RTLOCKVALRECSHRD_MAGIC: 534 /* Skip to the next sibling if same side. ASSUMES reader priority. */ 535 /** @todo The read side of a read-write lock is problematic if 536 * the implementation prioritizes writers over readers because 537 * that means we should could deadlock against current readers 538 * if a writer showed up. If the RW sem implementation is 539 * wrapping some native API, it's not so easy to detect when we 540 * should do this and when we shouldn't. Checking when we 541 * shouldn't is subject to wakeup scheduling and cannot easily 542 * be made reliable. 543 * 544 * At the moment we circumvent all this mess by declaring that 545 * readers has priority. This is TRUE on linux, but probably 546 * isn't on Solaris and FreeBSD. */ 547 if ( pRec == pFirstSibling 548 && pRec->Shared.pSibling != NULL 549 && pRec->Shared.pSibling != pFirstSibling) 584 if (!pRec->Shared.fSignaller) 550 585 { 551 pRec = pRec->Shared.pSibling; 552 Assert(iEntry == UINT32_MAX); 553 continue; 586 /* Skip to the next sibling if same side. ASSUMES reader priority. */ 587 /** @todo The read side of a read-write lock is problematic if 588 * the implementation prioritizes writers over readers because 589 * that means we should could deadlock against current readers 590 * if a writer showed up. If the RW sem implementation is 591 * wrapping some native API, it's not so easy to detect when we 592 * should do this and when we shouldn't. Checking when we 593 * shouldn't is subject to wakeup scheduling and cannot easily 594 * be made reliable. 595 * 596 * At the moment we circumvent all this mess by declaring that 597 * readers has priority. This is TRUE on linux, but probably 598 * isn't on Solaris and FreeBSD. */ 599 if ( pRec == pFirstSibling 600 && pRec->Shared.pSibling != NULL 601 && pRec->Shared.pSibling != pFirstSibling) 602 { 603 pRec = pRec->Shared.pSibling; 604 Assert(iEntry == UINT32_MAX); 605 continue; 606 } 554 607 } 555 608 556 609 /* Scan the owner table for blocked owners. */ 557 610 pNextThread = NIL_RTTHREAD; 558 if (ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0) 611 if ( ASMAtomicUoReadU32(&pRec->Shared.cEntries) > 0 612 && ( !pRec->Shared.fSignaller 613 || iEntry != UINT32_MAX 614 || rtLockValidatorDdAreAllThreadsBlocked(&pRec->Shared, pThreadSelf) 615 ) 616 ) 559 617 { 560 uint32_t cAllocated = ASMAtomicUoReadU32(&pRec->Shared.cAllocated);618 uint32_t cAllocated = pRec->Shared.cAllocated; 561 619 PRTLOCKVALRECSHRDOWN volatile *papOwners = pRec->Shared.papOwners; 562 620 while (++iEntry < cAllocated) … … 642 700 pStack->a[i].pFirstSibling = pFirstSibling; 643 701 644 if (RT_UNLIKELY(pNextThread == pThreadSelf)) 645 return rtLockValidatorDdVerifyDeadlock(pStack); 702 if (RT_UNLIKELY( pNextThread == pThreadSelf 703 && ( i != 0 704 || pRec->Core.u32Magic != RTLOCKVALRECSHRD_MAGIC 705 || !pRec->Shared.fSignaller) /* ASSUMES signaller records have no siblings. */ 706 ) 707 ) 708 return rtLockValidatorDdVerifyDeadlock(pStack, pThreadSelf); 646 709 647 710 pRec = pNextRec; … … 1135 1198 RTDECL(int) RTLockValidatorRecExclCheckBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf, 1136 1199 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, 1137 RTTHREADSTATE enmSleepState )1200 RTTHREADSTATE enmSleepState, bool fReallySleeping) 1138 1201 { 1139 1202 /* … … 1192 1255 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos); 1193 1256 1194 if (RT_FAILURE(rc)) 1257 if (RT_SUCCESS(rc)) 1258 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping); 1259 else 1195 1260 rtThreadSetState(pThreadSelf, enmThreadState); 1196 1261 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false); … … 1202 1267 RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf, 1203 1268 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, 1204 RTTHREADSTATE enmSleepState )1269 RTTHREADSTATE enmSleepState, bool fReallySleeping) 1205 1270 { 1206 1271 int rc = RTLockValidatorRecExclCheckOrder(pRec, hThreadSelf, pSrcPos); 1207 1272 if (RT_SUCCESS(rc)) 1208 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, enmSleepState );1273 rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, enmSleepState, fReallySleeping); 1209 1274 return rc; 1210 1275 } … … 1212 1277 1213 1278 1214 RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALIDATORCLASS hClass, 1215 uint32_t uSubClass, const char *pszName, void *hLock)1279 RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALIDATORCLASS hClass, uint32_t uSubClass, 1280 const char *pszName, void *hLock, bool fSignaller) 1216 1281 { 1217 1282 RTLOCKVAL_ASSERT_PTR_ALIGN(pRec); … … 1224 1289 pRec->pszName = pszName; 1225 1290 pRec->fEnabled = RTLockValidatorIsEnabled(); 1291 pRec->fSignaller = fSignaller; 1226 1292 pRec->pSibling = NULL; 1227 1293 … … 1231 1297 pRec->cAllocated = 0; 1232 1298 pRec->fReallocating = false; 1233 pRec->afPadding[0] = false; 1234 pRec->afPadding[1] = false; 1299 pRec->fPadding = false; 1235 1300 pRec->papOwners = NULL; 1236 1301 #if HC_ARCH_BITS == 32 … … 1334 1399 RTDECL(int) RTLockValidatorRecSharedCheckBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, 1335 1400 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, 1336 RTTHREADSTATE enmSleepState )1401 RTTHREADSTATE enmSleepState, bool fReallySleeping) 1337 1402 { 1338 1403 /* … … 1373 1438 */ 1374 1439 int rc = VINF_SUCCESS; 1375 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL); 1440 PRTLOCKVALRECSHRDOWN pEntry = !pRecU->Shared.fSignaller 1441 ? rtLockValidatorRecSharedFindOwner(&pRecU->Shared, pThreadSelf, NULL) 1442 : NULL; 1376 1443 if (pEntry) 1377 1444 { … … 1389 1456 rc = rtLockValidatorDeadlockDetection(pRecU, pThreadSelf, pSrcPos); 1390 1457 1391 if (RT_FAILURE(rc)) 1458 if (RT_SUCCESS(rc)) 1459 ASMAtomicWriteBool(&pThreadSelf->fReallySleeping, fReallySleeping); 1460 else 1392 1461 rtThreadSetState(pThreadSelf, enmThreadState); 1393 1462 ASMAtomicWriteBool(&pThreadSelf->LockValidator.fInValidator, false); … … 1399 1468 RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, 1400 1469 PCRTLOCKVALSRCPOS pSrcPos, bool fRecursiveOk, 1401 RTTHREADSTATE enmSleepState )1470 RTTHREADSTATE enmSleepState, bool fReallySleeping) 1402 1471 { 1403 1472 int rc = RTLockValidatorRecSharedCheckOrder(pRec, hThreadSelf, pSrcPos); 1404 1473 if (RT_SUCCESS(rc)) 1405 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, enmSleepState );1474 rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, enmSleepState, fReallySleeping); 1406 1475 return rc; 1407 1476 } … … 1413 1482 * 1414 1483 * @returns The new owner entry. 1415 * @param p SharedThe shared lock record.1484 * @param pRec The shared lock record. 1416 1485 * @param pThreadSelf The calling thread and owner. Used for record 1417 1486 * initialization and allocation. … … 1419 1488 */ 1420 1489 DECLINLINE(PRTLOCKVALRECSHRDOWN) 1421 rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRe ad, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)1490 rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos) 1422 1491 { 1423 1492 PRTLOCKVALRECSHRDOWN pEntry; 1424 1493 1425 1494 /* 1426 * Check if the thread has any statically allocated records we can use.1427 * /1428 unsigned iEntry = ASMBitFirstSetU32(pThreadSelf->LockValidator.bmFreeShrdOwners);1429 if (iEntry > 0)1430 {1431 iEntry--;1432 pThreadSelf->LockValidator.bmFreeShrdOwners &= ~RT_BIT_32(iEntry);1433 pEntry = &pThreadSelf->LockValidator.aShrdOwners[iEntry ];1495 * Check if the thread has any statically allocated records we can easily 1496 * make use of. 1497 */ 1498 unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners)); 1499 if ( iEntry > 0 1500 && ASMAtomicBitTestAndClear(&pThreadSelf->LockValidator.bmFreeShrdOwners, iEntry - 1)) 1501 { 1502 pEntry = &pThreadSelf->LockValidator.aShrdOwners[iEntry - 1]; 1434 1503 Assert(!pEntry->fReserved); 1435 1504 pEntry->fStaticAlloc = true; 1505 rtThreadGet(pThreadSelf); 1436 1506 } 1437 1507 else … … 1448 1518 pEntry->hThread = pThreadSelf; 1449 1519 pEntry->pDown = NULL; 1450 pEntry->pSharedRec = pRe ad;1520 pEntry->pSharedRec = pRec; 1451 1521 #if HC_ARCH_BITS == 32 1452 1522 pEntry->pvReserved = NULL; … … 1469 1539 if (pEntry) 1470 1540 { 1541 Assert(pEntry->Core.u32Magic == RTLOCKVALRECSHRDOWN_MAGIC); 1471 1542 ASMAtomicWriteU32(&pEntry->Core.u32Magic, RTLOCKVALRECSHRDOWN_MAGIC_DEAD); 1472 1543 1473 PRTTHREADINT pThreadSelf = pEntry->hThread; 1474 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThreadSelf); 1475 Assert(pThreadSelf == RTThreadSelf()); 1544 PRTTHREADINT pThread; 1545 ASMAtomicXchgHandle(&pEntry->hThread, NIL_RTTHREAD, &pThread); 1476 1546 1477 1547 Assert(pEntry->fReserved); … … 1480 1550 if (pEntry->fStaticAlloc) 1481 1551 { 1482 uintptr_t iEntry = pEntry - &pThreadSelf->LockValidator.aShrdOwners[0]; 1483 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThreadSelf->LockValidator.aShrdOwners)); 1484 pThreadSelf->LockValidator.bmFreeShrdOwners |= RT_BIT_32(iEntry); 1552 AssertPtrReturnVoid(pThread); 1553 AssertReturnVoid(pThread->u32Magic == RTTHREADINT_MAGIC); 1554 1555 uintptr_t iEntry = pEntry - &pThread->LockValidator.aShrdOwners[0]; 1556 AssertReleaseReturnVoid(iEntry < RT_ELEMENTS(pThread->LockValidator.aShrdOwners)); 1557 1558 Assert(!ASMBitTest(&pThread->LockValidator.bmFreeShrdOwners, iEntry)); 1559 ASMAtomicBitSet(&pThread->LockValidator.bmFreeShrdOwners, iEntry); 1560 1561 rtThreadRelease(pThread); 1485 1562 } 1486 1563 else … … 1649 1726 1650 1727 1651 RTDECL(void) RTLockValidator SharedRecAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)1728 RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos) 1652 1729 { 1653 1730 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC); 1654 1731 if (!pRec->fEnabled) 1655 1732 return; 1656 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD); 1657 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC); 1658 Assert(hThreadSelf == RTThreadSelf()); 1733 AssertReturnVoid(hThread == NIL_RTTHREAD || hThread->u32Magic == RTTHREADINT_MAGIC); 1734 1735 /* 1736 * Free all current owners. 1737 */ 1738 rtLockValidatorSerializeDetectionEnter(); 1739 while (ASMAtomicUoReadU32(&pRec->cEntries) > 0) 1740 { 1741 AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave()); 1742 uint32_t iEntry = 0; 1743 uint32_t cEntries = pRec->cAllocated; 1744 PRTLOCKVALRECSHRDOWN volatile *papEntries = pRec->papOwners; 1745 while (iEntry < cEntries) 1746 { 1747 PRTLOCKVALRECSHRDOWN pEntry = (PRTLOCKVALRECSHRDOWN)ASMAtomicXchgPtr((void * volatile *)&papEntries[iEntry], NULL); 1748 if (pEntry) 1749 { 1750 ASMAtomicDecU32(&pRec->cEntries); 1751 rtLockValidatorSerializeDetectionLeave(); 1752 1753 rtLockValidatorRecSharedFreeOwner(pEntry); 1754 1755 rtLockValidatorSerializeDetectionEnter(); 1756 if (ASMAtomicUoReadU32(&pRec->cEntries) == 0) 1757 break; 1758 cEntries = pRec->cAllocated; 1759 papEntries = pRec->papOwners; 1760 } 1761 iEntry++; 1762 } 1763 } 1764 rtLockValidatorSerializeDetectionLeave(); 1765 1766 if (hThread != NIL_RTTHREAD) 1767 { 1768 /* 1769 * Allocate a new owner entry and insert it into the table. 1770 */ 1771 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos); 1772 if ( pEntry 1773 && !rtLockValidatorRecSharedAddOwner(pRec, pEntry)) 1774 rtLockValidatorRecSharedFreeOwner(pEntry); 1775 } 1776 } 1777 RT_EXPORT_SYMBOL(RTLockValidatorRecSharedResetOwner); 1778 1779 1780 RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos) 1781 { 1782 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC); 1783 if (!pRec->fEnabled) 1784 return; 1785 if (hThread == NIL_RTTHREAD) 1786 { 1787 hThread = RTThreadSelfAutoAdopt(); 1788 AssertReturnVoid(hThread != NIL_RTTHREAD); 1789 } 1790 AssertReturnVoid(hThread != NIL_RTTHREAD); 1791 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC); 1659 1792 1660 1793 /* … … 1665 1798 * so it can wait til later sometime. 1666 1799 */ 1667 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread Self, NULL);1800 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, NULL); 1668 1801 if (pEntry) 1669 1802 { 1803 Assert(hThread == RTThreadSelf()); 1670 1804 pEntry->cRecursion++; 1671 1805 return; … … 1675 1809 * Allocate a new owner entry and insert it into the table. 1676 1810 */ 1677 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread Self, pSrcPos);1811 pEntry = rtLockValidatorRecSharedAllocOwner(pRec, hThread, pSrcPos); 1678 1812 if ( pEntry 1679 1813 && !rtLockValidatorRecSharedAddOwner(pRec, pEntry)) 1680 1814 rtLockValidatorRecSharedFreeOwner(pEntry); 1681 1815 } 1682 RT_EXPORT_SYMBOL(RTLockValidator SharedRecAddOwner);1683 1684 1685 RTDECL(void) RTLockValidator SharedRecRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf)1816 RT_EXPORT_SYMBOL(RTLockValidatorRecSharedAddOwner); 1817 1818 1819 RTDECL(void) RTLockValidatorRecSharedRemoveOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread) 1686 1820 { 1687 1821 AssertReturnVoid(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC); 1688 1822 if (!pRec->fEnabled) 1689 1823 return; 1690 AssertReturnVoid(hThreadSelf != NIL_RTTHREAD); 1691 AssertReturnVoid(hThreadSelf->u32Magic == RTTHREADINT_MAGIC); 1824 AssertReturnVoid(hThread != NIL_RTTHREAD); 1825 AssertReturnVoid(hThread->u32Magic == RTTHREADINT_MAGIC); 1826 1827 /* 1828 * Find the entry hope it's a recursive one. 1829 */ 1830 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */ 1831 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThread, &iEntry); 1832 AssertReturnVoid(pEntry); 1833 if (pEntry->cRecursion > 1) 1834 { 1835 Assert(hThread == RTThreadSelf()); 1836 pEntry->cRecursion--; 1837 } 1838 else 1839 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, pEntry, iEntry); 1840 } 1841 RT_EXPORT_SYMBOL(RTLockValidatorRecSharedRemoveOwner); 1842 1843 1844 RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf) 1845 { 1846 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER); 1847 if (!pRec->fEnabled) 1848 return VINF_SUCCESS; 1849 if (hThreadSelf == NIL_RTTHREAD) 1850 { 1851 hThreadSelf = RTThreadSelfAutoAdopt(); 1852 AssertReturn(hThreadSelf != NIL_RTTHREAD, VERR_SEM_LV_INVALID_PARAMETER); 1853 } 1692 1854 Assert(hThreadSelf == RTThreadSelf()); 1693 1855 1694 1856 /* 1695 * Find the entry hope it's a recursive one. 1696 */ 1697 uint32_t iEntry = UINT32_MAX; /* shuts up gcc */ 1698 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry); 1699 AssertReturnVoid(pEntry); 1857 * Locate the entry for this thread in the table. 1858 */ 1859 uint32_t iEntry = 0; 1860 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry); 1861 if (RT_UNLIKELY(!pEntry)) 1862 { 1863 rtLockValidatorComplainFirst("Not owner (shared)", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec); 1864 rtLockValidatorComplainPanic(); 1865 return VERR_SEM_LV_NOT_OWNER; 1866 } 1867 1868 /* 1869 * Check the release order. 1870 */ 1871 if (pRec->hClass != NIL_RTLOCKVALIDATORCLASS) 1872 { 1873 /** @todo order validation */ 1874 } 1875 1876 /* 1877 * Release the ownership or unwind a level of recursion. 1878 */ 1879 Assert(pEntry->cRecursion > 0); 1700 1880 if (pEntry->cRecursion > 1) 1701 1881 pEntry->cRecursion--; 1702 1882 else 1703 1883 rtLockValidatorRecSharedRemoveAndFreeOwner(pRec, pEntry, iEntry); 1704 } 1705 RT_EXPORT_SYMBOL(RTLockValidatorSharedRecRemoveOwner); 1706 1707 1708 RTDECL(int) RTLockValidatorRecSharedCheckAndRelease(PRTLOCKVALRECSHRD pRead, RTTHREAD hThreadSelf) 1709 { 1710 AssertReturn(pRead->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER); 1711 if (!pRead->fEnabled) 1884 1885 return VINF_SUCCESS; 1886 } 1887 1888 1889 RTDECL(int) RTLockValidatorRecSharedCheckSignaller(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf) 1890 { 1891 AssertReturn(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, VERR_SEM_LV_INVALID_PARAMETER); 1892 if (!pRec->fEnabled) 1712 1893 return VINF_SUCCESS; 1713 1894 if (hThreadSelf == NIL_RTTHREAD) … … 1722 1903 */ 1723 1904 uint32_t iEntry = 0; 1724 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRead, hThreadSelf, &iEntry); 1725 AssertReturn(pEntry, VERR_SEM_LV_NOT_OWNER); 1726 1727 /* 1728 * Check the release order. 1729 */ 1730 if (pRead->hClass != NIL_RTLOCKVALIDATORCLASS) 1731 { 1732 /** @todo order validation */ 1733 } 1734 1735 /* 1736 * Release the ownership or unwind a level of recursion. 1737 */ 1738 Assert(pEntry->cRecursion > 0); 1739 if (pEntry->cRecursion > 1) 1740 pEntry->cRecursion--; 1741 else 1742 rtLockValidatorRecSharedRemoveAndFreeOwner(pRead, pEntry, iEntry); 1743 1905 PRTLOCKVALRECSHRDOWN pEntry = rtLockValidatorRecSharedFindOwner(pRec, hThreadSelf, &iEntry); 1906 if (RT_UNLIKELY(!pEntry)) 1907 { 1908 rtLockValidatorComplainFirst("Invalid signaller", NULL, hThreadSelf, (PRTLOCKVALRECUNION)pRec); 1909 rtLockValidatorComplainPanic(); 1910 return VERR_SEM_LV_NOT_SIGNALLER; 1911 } 1744 1912 return VINF_SUCCESS; 1745 1913 } -
trunk/src/VBox/Runtime/common/misc/thread.cpp
r25622 r25638 349 349 memcpy(pThread->szName, pszName, cchName); 350 350 pThread->szName[cchName] = '\0'; 351 pThread->cRefs = 2 + !!(fFlags & RTTHREADFLAGS_WAITABLE); /* And extra reference if waitable. */ 352 pThread->rc = VERR_PROCESS_RUNNING; /** @todo get a better error code! */ 353 pThread->enmType = enmType; 354 pThread->fFlags = fFlags; 355 pThread->fIntFlags = fIntFlags; 356 pThread->enmState = RTTHREADSTATE_INITIALIZING; 351 pThread->cRefs = 2 + !!(fFlags & RTTHREADFLAGS_WAITABLE); /* And extra reference if waitable. */ 352 pThread->rc = VERR_PROCESS_RUNNING; /** @todo get a better error code! */ 353 pThread->enmType = enmType; 354 pThread->fFlags = fFlags; 355 pThread->fIntFlags = fIntFlags; 356 pThread->enmState = RTTHREADSTATE_INITIALIZING; 357 pThread->fReallySleeping = false; 357 358 #ifdef IN_RING3 358 359 rtLockValidatorInitPerThread(&pThread->LockValidator); … … 1300 1301 * @param hThread The current thread. 1301 1302 * @param enmState The sleep state. 1302 */ 1303 RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState) 1303 * @param fReallySleeping Really going to sleep now. 1304 */ 1305 RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, bool fReallySleeping) 1304 1306 { 1305 1307 Assert(RTTHREAD_IS_SLEEPING(enmState)); 1306 1308 PRTTHREADINT pThread = hThread; 1307 if (pThread && rtThreadGetState(pThread) == RTTHREADSTATE_RUNNING) 1308 rtThreadSetState(pThread, enmState); 1309 if (pThread != NIL_RTTHREAD) 1310 { 1311 Assert(pThread == RTThreadSelf()); 1312 if (rtThreadGetState(pThread) == RTTHREADSTATE_RUNNING) 1313 rtThreadSetState(pThread, enmState); 1314 ASMAtomicWriteBool(&pThread->fReallySleeping, fReallySleeping); 1315 } 1309 1316 } 1310 1317 RT_EXPORT_SYMBOL(RTThreadBlocking); … … 1322 1329 RTDECL(void) RTThreadUnblocked(RTTHREAD hThread, RTTHREADSTATE enmCurState) 1323 1330 { 1324 if (hThread && rtThreadGetState(hThread) == enmCurState) 1325 rtThreadSetState(hThread, RTTHREADSTATE_RUNNING); 1331 PRTTHREADINT pThread = hThread; 1332 if (pThread != NIL_RTTHREAD) 1333 { 1334 Assert(pThread == RTThreadSelf()); 1335 ASMAtomicWriteBool(&pThread->fReallySleeping, false); 1336 if (rtThreadGetState(pThread) == enmCurState) 1337 rtThreadSetState(pThread, RTTHREADSTATE_RUNNING); 1338 } 1326 1339 } 1327 1340 RT_EXPORT_SYMBOL(RTThreadUnblocked); … … 1346 1359 } 1347 1360 RT_EXPORT_SYMBOL(RTThreadGetState); 1361 1362 1363 RTDECL(RTTHREADSTATE) RTThreadGetReallySleeping(RTTHREAD hThread) 1364 { 1365 RTTHREADSTATE enmState = RTTHREADSTATE_INVALID; 1366 PRTTHREADINT pThread = rtThreadGet(hThread); 1367 if (pThread) 1368 { 1369 enmState = rtThreadGetState(pThread); 1370 if (!ASMAtomicUoReadBool(&pThread->fReallySleeping)) 1371 enmState = RTTHREADSTATE_RUNNING; 1372 rtThreadRelease(pThread); 1373 } 1374 return enmState; 1375 } 1376 RT_EXPORT_SYMBOL(RTThreadGetReallySleeping); 1348 1377 1349 1378 -
trunk/src/VBox/Runtime/generic/critsect-generic.cpp
r25618 r25638 208 208 { 209 209 #ifdef RTCRITSECT_STRICT 210 intrc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos,211 212 RTTHREADSTATE_CRITSECT);210 rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, 211 !(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING), 212 RTTHREADSTATE_CRITSECT, false); 213 213 if (RT_FAILURE(rc9)) 214 214 { … … 217 217 } 218 218 #else 219 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT );219 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, false); 220 220 #endif 221 221 int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT); -
trunk/src/VBox/Runtime/generic/semrw-generic.cpp
r25620 r25638 136 136 #ifdef RTSEMRW_STRICT 137 137 RTLockValidatorRecExclInit(&pThis->ValidatorWrite, NIL_RTLOCKVALIDATORCLASS, RTLOCKVALIDATOR_SUB_CLASS_NONE, "RTSemRW", pThis); 138 RTLockValidatorRecSharedInit(&pThis->ValidatorRead, NIL_RTLOCKVALIDATORCLASS, RTLOCKVALIDATOR_SUB_CLASS_NONE, "RTSemRW", pThis );138 RTLockValidatorRecSharedInit(&pThis->ValidatorRead, NIL_RTLOCKVALIDATORCLASS, RTLOCKVALIDATOR_SUB_CLASS_NONE, "RTSemRW", pThis, false /*fSignaller*/); 139 139 RTLockValidatorRecMakeSiblings(&pThis->ValidatorWrite.Core, &pThis->ValidatorRead.Core); 140 140 #endif … … 272 272 Assert(pThis->cReads > 0); 273 273 #ifdef RTSEMRW_STRICT 274 RTLockValidator SharedRecAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);274 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); 275 275 #endif 276 276 … … 324 324 } 325 325 #ifdef RTSEMRW_STRICT 326 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true, RTTHREADSTATE_RW_READ); 326 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true, 327 RTTHREADSTATE_RW_READ, false); 327 328 if (RT_FAILURE(rc)) 328 329 break; 329 330 #else 330 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ );331 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false); 331 332 #endif 332 333 int rcWait; … … 368 369 Assert(pThis->cReads > 0); 369 370 #ifdef RTSEMRW_STRICT 370 RTLockValidator SharedRecAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);371 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); 371 372 #endif 372 373 … … 612 613 613 614 #ifdef RTSEMRW_STRICT 614 rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true, RTTHREADSTATE_RW_WRITE); 615 rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true, 616 RTTHREADSTATE_RW_WRITE, false); 615 617 if (RT_FAILURE(rc)) 616 618 break; 617 619 #else 618 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE );620 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false); 619 621 #endif 620 622 int rcWait; -
trunk/src/VBox/Runtime/include/internal/lockvalidator.h
r25622 r25638 70 70 bool afReserved[3]; 71 71 /** Bitmap indicating which entires are free (set) and allocated (clear). */ 72 uint32_t 72 uint32_t volatile bmFreeShrdOwners; 73 73 /** Statically allocated shared owner records */ 74 74 RTLOCKVALRECSHRDOWN aShrdOwners[32]; -
trunk/src/VBox/Runtime/include/internal/strict.h
r25611 r25638 43 43 #endif 44 44 45 /** @def RTSEMEVENT_STRICT 46 * Enables strictness checks and lock accounting of the RTSemEvent API. 47 */ 48 #if defined(DOXYGEN_RUNNING) || (!defined(RTSEMEVENT_STRICT) && defined(IN_RING3) && (defined(RT_STRICT) || defined(RT_LOCK_STRICT) || defined(RTSEM_STRICT))) 49 # define RTSEMEVENT_STRICT 50 #endif 51 52 /** @def RTSEMEVENTMULTI_STRICT 53 * Enables strictness checks and lock accounting of the RTSemEventMulti API. 54 */ 55 #if defined(DOXYGEN_RUNNING) || (!defined(RTSEMEVENTMULTI_STRICT) && defined(IN_RING3) && (defined(RT_STRICT) || defined(RT_LOCK_STRICT) || defined(RTSEM_STRICT))) 56 # define RTSEMEVENTMULTI_STRICT 57 #endif 45 58 46 59 /** @def RTSEMMUTEX_STRICT -
trunk/src/VBox/Runtime/include/internal/thread.h
r25618 r25638 67 67 /** The current thread state. */ 68 68 RTTHREADSTATE volatile enmState; 69 /** Set when really sleeping. */ 70 bool volatile fReallySleeping; 69 71 #if defined(RT_OS_WINDOWS) && defined(IN_RING3) 70 72 /** The thread handle 71 73 * This is not valid until the create function has returned! */ 72 74 uintptr_t hThread; 75 #endif 76 #if defined(RT_OS_LINUX) && defined(IN_RING3) 77 /** The thread ID. 78 * This is not valid before rtThreadMain has been called by the new thread. */ 79 pid_t tid; 73 80 #endif 74 81 /** The user event semaphore. */ -
trunk/src/VBox/Runtime/r3/linux/semevent-linux.cpp
r22959 r25638 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Sun Microsystems, Inc.7 * Copyright (C) 2006-2010 Sun Microsystems, Inc. 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 30 30 31 31 #include <features.h> 32 #if __GLIBC_PREREQ(2,6) && !defined(IPRT_WITH_FUTEX_BASED_SEMS) 32 #if __GLIBC_PREREQ(2,6) && !defined(IPRT_WITH_FUTEX_BASED_SEMS) && !defined(DEBUG_bird) //// testing 1 2 3 33 33 34 34 /* … … 50 50 *******************************************************************************/ 51 51 #include <iprt/semaphore.h> 52 #include "internal/iprt.h" 53 54 #include <iprt/asm.h> 52 55 #include <iprt/assert.h> 53 #include <iprt/alloc.h>54 #include <iprt/asm.h>55 56 #include <iprt/err.h> 57 #include <iprt/lockvalidator.h> 58 #include <iprt/mem.h> 56 59 #include <iprt/time.h> 57 60 #include "internal/magics.h" 61 #include "internal/strict.h" 58 62 59 63 #include <errno.h> … … 87 91 /** The number of waiting threads */ 88 92 int32_t volatile cWaiters; 93 #ifdef RTSEMEVENT_STRICT 94 /** Signallers. */ 95 RTLOCKVALRECSHRD Signallers; 96 /** Indicates that lock validation should be performed. */ 97 bool volatile fEverHadSignallers; 98 #endif 89 99 }; 90 100 … … 118 128 pThis->cWaiters = 0; 119 129 pThis->fSignalled = 0; 130 #ifdef RTSEMEVENT_STRICT 131 RTLockValidatorRecSharedInit(&pThis->Signallers, 132 NIL_RTLOCKVALIDATORCLASS, RTLOCKVALIDATOR_SUB_CLASS_ANY, 133 "RTSemEvent", pThis, true /*fSignaller*/); 134 pThis->fEverHadSignallers = false; 135 #endif 120 136 *pEventSem = pThis; 121 137 return VINF_SUCCESS; … … 149 165 * Free the semaphore memory and be gone. 150 166 */ 167 #ifdef RTSEMEVENT_STRICT 168 RTLockValidatorRecSharedDelete(&pThis->Signallers); 169 #endif 151 170 RTMemFree(pThis); 152 171 return VINF_SUCCESS; … … 160 179 */ 161 180 struct RTSEMEVENTINTERNAL *pThis = EventSem; 162 AssertReturn(VALID_PTR(pThis) && pThis->iMagic == RTSEMEVENT_MAGIC, 163 VERR_INVALID_HANDLE); 181 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 182 AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE); 183 184 #ifdef RTSEMEVENT_STRICT 185 if (pThis->fEverHadSignallers) 186 { 187 int rc9 = RTLockValidatorRecSharedCheckSignaller(&pThis->Signallers, NIL_RTTHREAD); 188 if (RT_FAILURE(rc9)) 189 return rc9; 190 } 191 #endif 164 192 165 193 ASMAtomicWriteU32(&pThis->fSignalled, 1); … … 181 209 static int rtSemEventWait(RTSEMEVENT EventSem, unsigned cMillies, bool fAutoResume) 182 210 { 211 PCRTLOCKVALSRCPOS pSrcPos = NULL; 212 183 213 /* 184 214 * Validate input. 185 215 */ 186 216 struct RTSEMEVENTINTERNAL *pThis = EventSem; 187 Assert Return(VALID_PTR(pThis) && pThis->iMagic == RTSEMEVENT_MAGIC,188 217 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 218 AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE); 189 219 190 220 /* 191 221 * Quickly check whether it's signaled. 192 222 */ 223 /** @todo this isn't fair if someone is already waiting on it. They should 224 * have the first go at it! 225 * (ASMAtomicReadS32(&pThis->cWaiters) == 0 || !cMillies) && ... */ 193 226 if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1)) 194 227 return VINF_SUCCESS; … … 215 248 * The wait loop. 216 249 */ 250 #ifdef RTSEMEVENT_STRICT 251 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); 252 #else 253 RTTHREAD hThreadSelf = RTThreadSelf(); 254 #endif 217 255 int rc = VINF_SUCCESS; 218 256 for (;;) 219 257 { 258 #ifdef RTSEMEVENT_STRICT 259 if (pThis->fEverHadSignallers) 260 { 261 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, 262 RTTHREADSTATE_EVENT, true); 263 if (RT_FAILURE(rc)) 264 break; 265 } 266 #endif 267 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true); 220 268 long lrc = sys_futex(&pThis->fSignalled, FUTEX_WAIT, 0, pTimeout, NULL, 0); 269 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT); 221 270 if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC)) 222 271 { … … 284 333 } 285 334 335 336 RTDECL(void) RTSemEventSetSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 337 { 338 #ifdef RTSEMEVENT_STRICT 339 struct RTSEMEVENTINTERNAL *pThis = hEventSem; 340 AssertPtrReturnVoid(pThis); 341 AssertReturnVoid(pThis->iMagic == RTSEMEVENT_MAGIC); 342 343 ASMAtomicWriteBool(&pThis->fEverHadSignallers, true); 344 RTLockValidatorRecSharedResetOwner(&pThis->Signallers, hThread, NULL); 345 #endif 346 } 347 348 349 RTDECL(void) RTSemEventAddSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 350 { 351 #ifdef RTSEMEVENT_STRICT 352 struct RTSEMEVENTINTERNAL *pThis = hEventSem; 353 AssertPtrReturnVoid(pThis); 354 AssertReturnVoid(pThis->iMagic == RTSEMEVENT_MAGIC); 355 356 ASMAtomicWriteBool(&pThis->fEverHadSignallers, true); 357 RTLockValidatorRecSharedAddOwner(&pThis->Signallers, hThread, NULL); 358 #endif 359 } 360 361 362 RTDECL(void) RTSemEventRemoveSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 363 { 364 #ifdef RTSEMEVENT_STRICT 365 struct RTSEMEVENTINTERNAL *pThis = hEventSem; 366 AssertPtrReturnVoid(pThis); 367 AssertReturnVoid(pThis->iMagic == RTSEMEVENT_MAGIC); 368 369 RTLockValidatorRecSharedRemoveOwner(&pThis->Signallers, hThread); 370 #endif 371 } 372 286 373 #endif /* glibc < 2.6 || IPRT_WITH_FUTEX_BASED_SEMS */ 287 374 -
trunk/src/VBox/Runtime/r3/linux/semmutex-linux.cpp
r25628 r25638 240 240 { 241 241 #ifdef RTSEMMUTEX_STRICT 242 int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, RTTHREADSTATE_MUTEX); 242 int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, 243 RTTHREADSTATE_MUTEX, true); 243 244 if (RT_FAILURE(rc9)) 244 245 return rc9; 245 246 #else 246 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX );247 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); 247 248 #endif 248 249 } -
trunk/src/VBox/Runtime/r3/os2/sems-os2.cpp
r25624 r25638 124 124 125 125 126 RTDECL(void) RTSemEventSetSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 127 { 128 /** @todo implement RTSemEventSetSignaller and friends for OS/2 */ 129 } 130 131 132 RTDECL(void) RTSemEventAddSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 133 { 134 135 } 136 137 138 RTDECL(void) RTSemEventRemoverSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 139 { 140 141 } 142 143 126 144 127 145 -
trunk/src/VBox/Runtime/r3/posix/semevent-posix.cpp
r14318 r25638 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Sun Microsystems, Inc.7 * Copyright (C) 2006-2010 Sun Microsystems, Inc. 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 33 33 *******************************************************************************/ 34 34 #include <iprt/semaphore.h> 35 #include "internal/iprt.h" 36 37 #include <iprt/asm.h> 35 38 #include <iprt/assert.h> 36 #include <iprt/alloc.h>37 #include <iprt/asm.h>38 39 #include <iprt/err.h> 40 #include <iprt/mem.h> 41 #include <iprt/lockvalidator.h> 42 43 #include "internal/strict.h" 39 44 40 45 #include <errno.h> … … 72 77 /** Number of waiters. */ 73 78 volatile uint32_t cWaiters; 79 #ifdef RTSEMEVENT_STRICT 80 /** Signallers. */ 81 RTLOCKVALRECSHRD Signallers; 82 /** Indicates that lock validation should be performed. */ 83 bool volatile fEverHadSignallers; 84 #endif 74 85 }; 75 86 … … 85 96 86 97 87 /**88 * Validate an Event semaphore handle passed to one of the interface.89 *90 * @returns true if valid.91 * @returns false if invalid.92 * @param pIntEventSem Pointer to the event semaphore to validate.93 */94 inline bool rtsemEventValid(struct RTSEMEVENTINTERNAL *pIntEventSem)95 {96 if ((uintptr_t)pIntEventSem < 0x10000)97 return false;98 99 uint32_t u32 = pIntEventSem->u32State; /* this is volatile, so a explicit read like this is needed. */100 if ( u32 != EVENT_STATE_NOT_SIGNALED101 && u32 != EVENT_STATE_SIGNALED)102 return false;103 104 return true;105 }106 107 108 98 RTDECL(int) RTSemEventCreate(PRTSEMEVENT pEventSem) 109 99 { … … 113 103 * Allocate semaphore handle. 114 104 */ 115 struct RTSEMEVENTINTERNAL *p IntEventSem= (struct RTSEMEVENTINTERNAL *)RTMemAlloc(sizeof(struct RTSEMEVENTINTERNAL));116 if (p IntEventSem)105 struct RTSEMEVENTINTERNAL *pThis = (struct RTSEMEVENTINTERNAL *)RTMemAlloc(sizeof(struct RTSEMEVENTINTERNAL)); 106 if (pThis) 117 107 { 118 108 /* … … 123 113 if (!rc) 124 114 { 125 rc = pthread_cond_init(&p IntEventSem->Cond, &CondAttr);115 rc = pthread_cond_init(&pThis->Cond, &CondAttr); 126 116 if (!rc) 127 117 { … … 133 123 if (!rc) 134 124 { 135 rc = pthread_mutex_init(&p IntEventSem->Mutex, &MutexAttr);125 rc = pthread_mutex_init(&pThis->Mutex, &MutexAttr); 136 126 if (!rc) 137 127 { … … 139 129 pthread_condattr_destroy(&CondAttr); 140 130 141 ASMAtomicXchgU32(&pIntEventSem->u32State, EVENT_STATE_NOT_SIGNALED); 142 ASMAtomicXchgU32(&pIntEventSem->cWaiters, 0); 143 144 *pEventSem = pIntEventSem; 131 ASMAtomicXchgU32(&pThis->u32State, EVENT_STATE_NOT_SIGNALED); 132 ASMAtomicXchgU32(&pThis->cWaiters, 0); 133 #ifdef RTSEMEVENT_STRICT 134 RTLockValidatorRecSharedInit(&pThis->Signallers, 135 NIL_RTLOCKVALIDATORCLASS, RTLOCKVALIDATOR_SUB_CLASS_ANY, 136 "RTSemEvent", pThis, true /*fSignaller*/); 137 pThis->fEverHadSignallers = false; 138 #endif 139 140 *pEventSem = pThis; 145 141 return VINF_SUCCESS; 146 142 } 147 143 pthread_mutexattr_destroy(&MutexAttr); 148 144 } 149 pthread_cond_destroy(&p IntEventSem->Cond);145 pthread_cond_destroy(&pThis->Cond); 150 146 } 151 147 pthread_condattr_destroy(&CondAttr); … … 153 149 154 150 rc = RTErrConvertFromErrno(rc); 155 RTMemFree(p IntEventSem);151 RTMemFree(pThis); 156 152 } 157 153 else … … 167 163 * Validate handle. 168 164 */ 169 if (EventSem == NIL_RTSEMEVENT) /* don't bitch */ 165 struct RTSEMEVENTINTERNAL *pThis = EventSem; 166 if (pThis == NIL_RTSEMEVENT) /* don't bitch */ 170 167 return VERR_INVALID_HANDLE; 171 if (!rtsemEventValid(EventSem)) 172 { 173 AssertMsgFailed(("Invalid handle %p!\n", EventSem)); 174 return VERR_INVALID_HANDLE; 175 } 168 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 169 uint32_t u32 = pThis->u32State; 170 AssertReturn(u32 == EVENT_STATE_NOT_SIGNALED || u32 == EVENT_STATE_SIGNALED, VERR_INVALID_HANDLE); 176 171 177 172 /* 178 173 * Abort all waiters forcing them to return failure. 179 * 180 */ 181 struct RTSEMEVENTINTERNAL *pIntEventSem = EventSem; 174 */ 182 175 int rc; 183 176 for (int i = 30; i > 0; i--) 184 177 { 185 ASMAtomicXchgU32(&p IntEventSem->u32State, EVENT_STATE_UNINITIALIZED);186 rc = pthread_cond_destroy(&p IntEventSem->Cond);178 ASMAtomicXchgU32(&pThis->u32State, EVENT_STATE_UNINITIALIZED); 179 rc = pthread_cond_destroy(&pThis->Cond); 187 180 if (rc != EBUSY) 188 181 break; 189 pthread_cond_broadcast(&p IntEventSem->Cond);182 pthread_cond_broadcast(&pThis->Cond); 190 183 usleep(1000); 191 184 } … … 202 195 for (int i = 30; i > 0; i--) 203 196 { 204 rc = pthread_mutex_destroy(&p IntEventSem->Mutex);197 rc = pthread_mutex_destroy(&pThis->Mutex); 205 198 if (rc != EBUSY) 206 199 break; … … 216 209 * Free the semaphore memory and be gone. 217 210 */ 218 RTMemFree(pIntEventSem); 211 #ifdef RTSEMEVENT_STRICT 212 RTLockValidatorRecSharedDelete(&pThis->Signallers); 213 #endif 214 RTMemFree(pThis); 219 215 return VINF_SUCCESS; 220 216 } … … 226 222 * Validate input. 227 223 */ 228 if (!rtsemEventValid(EventSem)) 229 { 230 AssertMsgFailed(("Invalid handle %p!\n", EventSem)); 231 return VERR_INVALID_HANDLE; 232 } 224 struct RTSEMEVENTINTERNAL *pThis = EventSem; 225 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 226 uint32_t u32 = pThis->u32State; 227 AssertReturn(u32 == EVENT_STATE_NOT_SIGNALED || u32 == EVENT_STATE_SIGNALED, VERR_INVALID_HANDLE); 228 229 #ifdef RTSEMEVENT_STRICT 230 if (pThis->fEverHadSignallers) 231 { 232 int rc9 = RTLockValidatorRecSharedCheckSignaller(&pThis->Signallers, NIL_RTTHREAD); 233 if (RT_FAILURE(rc9)) 234 return rc9; 235 } 236 #endif 233 237 234 238 /* 235 239 * Lock the mutex semaphore. 236 240 */ 237 struct RTSEMEVENTINTERNAL *pIntEventSem = EventSem; 238 int rc = pthread_mutex_lock(&pIntEventSem->Mutex); 241 int rc = pthread_mutex_lock(&pThis->Mutex); 239 242 if (rc) 240 243 { … … 246 249 * Check the state. 247 250 */ 248 if (p IntEventSem->u32State == EVENT_STATE_NOT_SIGNALED)249 { 250 ASMAtomicXchgU32(&p IntEventSem->u32State, EVENT_STATE_SIGNALED);251 rc = pthread_cond_signal(&p IntEventSem->Cond);251 if (pThis->u32State == EVENT_STATE_NOT_SIGNALED) 252 { 253 ASMAtomicXchgU32(&pThis->u32State, EVENT_STATE_SIGNALED); 254 rc = pthread_cond_signal(&pThis->Cond); 252 255 AssertMsg(!rc, ("Failed to signal event sem %p, rc=%d.\n", EventSem, rc)); 253 256 } 254 else if (p IntEventSem->u32State == EVENT_STATE_SIGNALED)255 { 256 rc = pthread_cond_signal(&p IntEventSem->Cond); /* give'm another kick... */257 else if (pThis->u32State == EVENT_STATE_SIGNALED) 258 { 259 rc = pthread_cond_signal(&pThis->Cond); /* give'm another kick... */ 257 260 AssertMsg(!rc, ("Failed to signal event sem %p, rc=%d. (2)\n", EventSem, rc)); 258 261 } … … 263 266 * Release the mutex and return. 264 267 */ 265 int rc2 = pthread_mutex_unlock(&p IntEventSem->Mutex);268 int rc2 = pthread_mutex_unlock(&pThis->Mutex); 266 269 AssertMsg(!rc2, ("Failed to unlock event sem %p, rc=%d.\n", EventSem, rc)); 267 270 if (rc) … … 274 277 275 278 276 static int rtSemEventWait(RTSEMEVENT EventSem, unsigned cMillies, bool fAutoResume) 277 { 279 DECL_FORCE_INLINE(int) rtSemEventWait(RTSEMEVENT EventSem, unsigned cMillies, bool fAutoResume) 280 { 281 PCRTLOCKVALSRCPOS pSrcPos = NULL; 282 278 283 /* 279 284 * Validate input. 280 285 */ 281 if (!rtsemEventValid(EventSem)) 282 { 283 AssertMsgFailed(("Invalid handle %p!\n", EventSem)); 284 return VERR_INVALID_HANDLE; 285 } 286 struct RTSEMEVENTINTERNAL *pThis = EventSem; 287 AssertPtrReturn(pThis, VERR_INVALID_HANDLE); 288 uint32_t u32 = pThis->u32State; 289 AssertReturn(u32 == EVENT_STATE_NOT_SIGNALED || u32 == EVENT_STATE_SIGNALED, VERR_INVALID_HANDLE); 286 290 287 291 /* 288 292 * Timed or indefinite wait? 289 293 */ 290 struct RTSEMEVENTINTERNAL *pIntEventSem = EventSem;291 294 if (cMillies == RT_INDEFINITE_WAIT) 292 295 { 293 296 /* for fairness, yield before going to sleep. */ 294 if ( ASMAtomicIncU32(&p IntEventSem->cWaiters) > 1295 && p IntEventSem->u32State == EVENT_STATE_SIGNALED)297 if ( ASMAtomicIncU32(&pThis->cWaiters) > 1 298 && pThis->u32State == EVENT_STATE_SIGNALED) 296 299 pthread_yield(); 297 300 298 301 /* take mutex */ 299 int rc = pthread_mutex_lock(&p IntEventSem->Mutex);302 int rc = pthread_mutex_lock(&pThis->Mutex); 300 303 if (rc) 301 304 { 302 ASMAtomicDecU32(&p IntEventSem->cWaiters);305 ASMAtomicDecU32(&pThis->cWaiters); 303 306 AssertMsgFailed(("Failed to lock event sem %p, rc=%d.\n", EventSem, rc)); 304 307 return RTErrConvertFromErrno(rc); … … 308 311 { 309 312 /* check state. */ 310 if (p IntEventSem->u32State == EVENT_STATE_SIGNALED)311 { 312 ASMAtomicXchgU32(&p IntEventSem->u32State, EVENT_STATE_NOT_SIGNALED);313 ASMAtomicDecU32(&p IntEventSem->cWaiters);314 rc = pthread_mutex_unlock(&p IntEventSem->Mutex);313 if (pThis->u32State == EVENT_STATE_SIGNALED) 314 { 315 ASMAtomicXchgU32(&pThis->u32State, EVENT_STATE_NOT_SIGNALED); 316 ASMAtomicDecU32(&pThis->cWaiters); 317 rc = pthread_mutex_unlock(&pThis->Mutex); 315 318 AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", EventSem, rc)); NOREF(rc); 316 319 return VINF_SUCCESS; 317 320 } 318 if (p IntEventSem->u32State == EVENT_STATE_UNINITIALIZED)319 { 320 rc = pthread_mutex_unlock(&p IntEventSem->Mutex);321 if (pThis->u32State == EVENT_STATE_UNINITIALIZED) 322 { 323 rc = pthread_mutex_unlock(&pThis->Mutex); 321 324 AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", EventSem, rc)); NOREF(rc); 322 325 return VERR_SEM_DESTROYED; … … 324 327 325 328 /* wait */ 326 rc = pthread_cond_wait(&pIntEventSem->Cond, &pIntEventSem->Mutex); 329 #ifdef RTSEMEVENT_STRICT 330 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); 331 if (pThis->fEverHadSignallers) 332 { 333 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, 334 RTTHREADSTATE_EVENT, true); 335 if (RT_FAILURE(rc)) 336 { 337 ASMAtomicDecU32(&pThis->cWaiters); 338 pthread_mutex_unlock(&pThis->Mutex); 339 return rc; 340 } 341 } 342 #else 343 RTTHREAD hThreadSelf = RTThreadSelf(); 344 #endif 345 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true); 346 rc = pthread_cond_wait(&pThis->Cond, &pThis->Mutex); 347 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT); 327 348 if (rc) 328 349 { 329 350 AssertMsgFailed(("Failed to wait on event sem %p, rc=%d.\n", EventSem, rc)); 330 ASMAtomicDecU32(&p IntEventSem->cWaiters);331 int rc2 = pthread_mutex_unlock(&p IntEventSem->Mutex);351 ASMAtomicDecU32(&pThis->cWaiters); 352 int rc2 = pthread_mutex_unlock(&pThis->Mutex); 332 353 AssertMsg(!rc2, ("Failed to unlock event sem %p, rc=%d.\n", EventSem, rc2)); NOREF(rc2); 333 354 return RTErrConvertFromErrno(rc); … … 361 382 362 383 /* for fairness, yield before going to sleep. */ 363 if (ASMAtomicIncU32(&p IntEventSem->cWaiters) > 1)384 if (ASMAtomicIncU32(&pThis->cWaiters) > 1 && cMillies) 364 385 pthread_yield(); 365 386 366 387 /* take mutex */ 367 #ifdef RT_OS_DARWIN 368 int rc = pthread_mutex_lock(&pIntEventSem->Mutex); 369 #else 370 int rc = pthread_mutex_timedlock(&pIntEventSem->Mutex, &ts); 371 #endif 388 int rc = pthread_mutex_lock(&pThis->Mutex); 372 389 if (rc) 373 390 { 374 ASMAtomicDecU32(&p IntEventSem->cWaiters);391 ASMAtomicDecU32(&pThis->cWaiters); 375 392 AssertMsg(rc == ETIMEDOUT, ("Failed to lock event sem %p, rc=%d.\n", EventSem, rc)); 376 393 return RTErrConvertFromErrno(rc); … … 380 397 { 381 398 /* check state. */ 382 if (p IntEventSem->u32State == EVENT_STATE_SIGNALED)383 { 384 ASMAtomicXchgU32(&p IntEventSem->u32State, EVENT_STATE_NOT_SIGNALED);385 ASMAtomicDecU32(&p IntEventSem->cWaiters);386 rc = pthread_mutex_unlock(&p IntEventSem->Mutex);399 if (pThis->u32State == EVENT_STATE_SIGNALED) 400 { 401 ASMAtomicXchgU32(&pThis->u32State, EVENT_STATE_NOT_SIGNALED); 402 ASMAtomicDecU32(&pThis->cWaiters); 403 rc = pthread_mutex_unlock(&pThis->Mutex); 387 404 AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", EventSem, rc)); NOREF(rc); 388 405 return VINF_SUCCESS; 389 406 } 390 if (p IntEventSem->u32State == EVENT_STATE_UNINITIALIZED)391 { 392 rc = pthread_mutex_unlock(&p IntEventSem->Mutex);407 if (pThis->u32State == EVENT_STATE_UNINITIALIZED) 408 { 409 rc = pthread_mutex_unlock(&pThis->Mutex); 393 410 AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", EventSem, rc)); NOREF(rc); 394 411 return VERR_SEM_DESTROYED; 395 412 } 396 413 414 /* we're done if the timeout is 0. */ 415 if (!cMillies) 416 { 417 ASMAtomicDecU32(&pThis->cWaiters); 418 rc = pthread_mutex_unlock(&pThis->Mutex); 419 return VERR_SEM_BUSY; 420 } 421 397 422 /* wait */ 398 rc = pthread_cond_timedwait(&pIntEventSem->Cond, &pIntEventSem->Mutex, &ts); 423 #ifdef RTSEMEVENT_STRICT 424 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); 425 if (pThis->fEverHadSignallers) 426 { 427 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, 428 RTTHREADSTATE_EVENT, true); 429 if (RT_FAILURE(rc)) 430 { 431 ASMAtomicDecU32(&pThis->cWaiters); 432 pthread_mutex_unlock(&pThis->Mutex); 433 return rc; 434 } 435 } 436 #else 437 RTTHREAD hThreadSelf = RTThreadSelf(); 438 #endif 439 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true); 440 rc = pthread_cond_timedwait(&pThis->Cond, &pThis->Mutex, &ts); 441 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT); 399 442 if (rc && (rc != EINTR || !fAutoResume)) /* according to SuS this function shall not return EINTR, but linux man page says differently. */ 400 443 { 401 444 AssertMsg(rc == ETIMEDOUT, ("Failed to wait on event sem %p, rc=%d.\n", EventSem, rc)); 402 ASMAtomicDecU32(&p IntEventSem->cWaiters);403 int rc2 = pthread_mutex_unlock(&p IntEventSem->Mutex);445 ASMAtomicDecU32(&pThis->cWaiters); 446 int rc2 = pthread_mutex_unlock(&pThis->Mutex); 404 447 AssertMsg(!rc2, ("Failed to unlock event sem %p, rc2=%d.\n", EventSem, rc2)); NOREF(rc2); 405 448 return RTErrConvertFromErrno(rc); … … 423 466 } 424 467 468 469 RTDECL(void) RTSemEventSetSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 470 { 471 #ifdef RTSEMEVENT_STRICT 472 struct RTSEMEVENTINTERNAL *pThis = hEventSem; 473 AssertPtrReturnVoid(pThis); 474 uint32_t u32 = pThis->u32State; 475 AssertReturnVoid(u32 == EVENT_STATE_NOT_SIGNALED || u32 == EVENT_STATE_SIGNALED); 476 477 ASMAtomicWriteBool(&pThis->fEverHadSignallers, true); 478 RTLockValidatorRecSharedResetOwner(&pThis->Signallers, hThread, NULL); 479 #endif 480 } 481 482 483 RTDECL(void) RTSemEventAddSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 484 { 485 #ifdef RTSEMEVENT_STRICT 486 struct RTSEMEVENTINTERNAL *pThis = hEventSem; 487 AssertPtrReturnVoid(pThis); 488 uint32_t u32 = pThis->u32State; 489 AssertReturnVoid(u32 == EVENT_STATE_NOT_SIGNALED || u32 == EVENT_STATE_SIGNALED); 490 491 ASMAtomicWriteBool(&pThis->fEverHadSignallers, true); 492 RTLockValidatorRecSharedAddOwner(&pThis->Signallers, hThread, NULL); 493 #endif 494 } 495 496 497 RTDECL(void) RTSemEventRemoveSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 498 { 499 #ifdef RTSEMEVENT_STRICT 500 struct RTSEMEVENTINTERNAL *pThis = hEventSem; 501 AssertPtrReturnVoid(pThis); 502 uint32_t u32 = pThis->u32State; 503 AssertReturnVoid(u32 == EVENT_STATE_NOT_SIGNALED || u32 == EVENT_STATE_SIGNALED); 504 505 RTLockValidatorRecSharedRemoveOwner(&pThis->Signallers, hThread); 506 #endif 507 } 508 -
trunk/src/VBox/Runtime/r3/posix/semmutex-posix.cpp
r25628 r25638 188 188 #ifdef RTSEMMUTEX_STRICT 189 189 hThreadSelf = RTThreadSelfAutoAdopt(); 190 int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, RTTHREADSTATE_MUTEX); 190 int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, 191 RTTHREADSTATE_MUTEX, true); 191 192 if (RT_FAILURE(rc9)) 192 193 return rc9; 193 194 #else 194 195 hThreadSelf = RTThreadSelf(); 195 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX );196 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); 196 197 #endif 197 198 } -
trunk/src/VBox/Runtime/r3/posix/semrw-posix.cpp
r25620 r25638 127 127 #ifdef RTSEMRW_STRICT 128 128 RTLockValidatorRecExclInit(&pThis->ValidatorWrite, NIL_RTLOCKVALIDATORCLASS, RTLOCKVALIDATOR_SUB_CLASS_NONE, "RTSemRW", pThis); 129 RTLockValidatorRecSharedInit(&pThis->ValidatorRead, NIL_RTLOCKVALIDATORCLASS, RTLOCKVALIDATOR_SUB_CLASS_NONE, "RTSemRW", pThis );129 RTLockValidatorRecSharedInit(&pThis->ValidatorRead, NIL_RTLOCKVALIDATORCLASS, RTLOCKVALIDATOR_SUB_CLASS_NONE, "RTSemRW", pThis, false /*fSignaller*/); 130 130 RTLockValidatorRecMakeSiblings(&pThis->ValidatorWrite.Core, &pThis->ValidatorRead.Core); 131 131 #endif … … 224 224 #ifdef RTSEMRW_STRICT 225 225 hThreadSelf = RTThreadSelfAutoAdopt(); 226 int rc9 = RTLockValidatorRecSharedCheckOrderAndBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true, RTTHREADSTATE_RW_READ); 226 int rc9 = RTLockValidatorRecSharedCheckOrderAndBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true, 227 RTTHREADSTATE_RW_READ, true); 227 228 if (RT_FAILURE(rc9)) 228 229 return rc9; 229 230 #else 230 231 hThreadSelf = RTThreadSelf(); 231 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ );232 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, true); 232 233 #endif 233 234 } … … 280 281 ASMAtomicIncU32(&pThis->cReaders); 281 282 #ifdef RTSEMRW_STRICT 282 RTLockValidator SharedRecAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);283 RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); 283 284 #endif 284 285 return VINF_SUCCESS; … … 416 417 #ifdef RTSEMRW_STRICT 417 418 hThreadSelf = RTThreadSelfAutoAdopt(); 418 int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true, RTTHREADSTATE_RW_WRITE); 419 int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true, 420 RTTHREADSTATE_RW_WRITE, true); 419 421 if (RT_FAILURE(rc9)) 420 422 return rc9; 421 423 #else 422 424 hThreadSelf = RTThreadSelf(); 423 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE );425 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, true); 424 426 #endif 425 427 } -
trunk/src/VBox/Runtime/r3/posix/thread-posix.cpp
r13837 r25638 37 37 #include <pthread.h> 38 38 #include <signal.h> 39 #if defined(RT_OS_LINUX) 40 # include <unistd.h> 41 # include <sys/syscall.h> 42 #endif 39 43 #if defined(RT_OS_SOLARIS) 40 44 # include <sched.h> … … 170 174 PRTTHREADINT pThread = (PRTTHREADINT)pvArgs; 171 175 176 #if defined(RT_OS_LINUX) 177 /* 178 * Set the TID. 179 */ 180 pThread->tid = syscall(__NR_gettid); 181 ASMMemoryFence(); 182 #endif 183 172 184 /* 173 185 * Block SIGALRM - required for timer-posix.cpp. … … 204 216 if (!pThread->cbStack) 205 217 pThread->cbStack = 512*1024; 218 219 #ifdef RT_OS_LINUX 220 pThread->tid = -1; 221 #endif 206 222 207 223 /* -
trunk/src/VBox/Runtime/r3/win/semevent-win.cpp
r25381 r25638 124 124 } 125 125 126 127 RTDECL(void) RTSemEventSetSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 128 { 129 /** @todo implement RTSemEventSetSignaller and friends for NT. */ 130 } 131 132 133 RTDECL(void) RTSemEventAddSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 134 { 135 136 } 137 138 139 RTDECL(void) RTSemEventRemoverSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread) 140 { 141 142 } 143 -
trunk/src/VBox/Runtime/r3/win/semmutex-win.cpp
r25628 r25638 182 182 #ifdef RTSEMMUTEX_STRICT 183 183 hThreadSelf = RTThreadSelfAutoAdopt(); 184 int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, RTTHREADSTATE_MUTEX); 184 int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, 185 RTTHREADSTATE_MUTEX, true); 185 186 if (RT_FAILURE(rc9)) 186 187 return rc9; 187 188 #else 188 189 hThreadSelf = RTThreadSelf(); 189 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX );190 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); 190 191 #endif 191 192 } -
trunk/src/VBox/Runtime/testcase/tstRTLockValidator.cpp
r25626 r25638 58 58 static RTSEMRW g_ahSemRWs[32]; 59 59 static RTSEMMUTEX g_ahSemMtxes[32]; 60 static RTSEMEVENT g_hSemEvt; 61 static RTSEMEVENTMULTI g_hSemEvtMulti; 62 63 /** Multiple release event semaphore that is signalled by the main thread after 64 * it has started all the threads. */ 65 static RTSEMEVENTMULTI g_hThreadStarteEvt; 66 60 67 61 68 /** When to stop testing. */ … … 75 82 static bool testWaitForCritSectToBeOwned(PRTCRITSECT pCritSect) 76 83 { 84 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING); 85 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadStarteEvt, 10*1000)); 86 77 87 unsigned iLoop = 0; 78 88 while (!RTCritSectIsOwned(pCritSect)) … … 97 107 { 98 108 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING); 109 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadStarteEvt, 10*1000)); 110 99 111 unsigned iLoop = 0; 100 112 for (;;) … … 119 131 static bool testWaitForSemMutexToBeOwned(RTSEMMUTEX hSemMutex) 120 132 { 133 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING); 134 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadStarteEvt, 10*1000)); 135 121 136 unsigned iLoop = 0; 122 137 while (!RTSemMutexIsOwned(hSemMutex)) … … 126 141 } 127 142 return true; 128 }129 130 131 /**132 * Waits for a thread to enter a sleeping state.133 *134 * @returns true on success, false on abort.135 * @param hThread The thread.136 * @param enmDesiredState The desired thread sleep state.137 * @param pvLock The lock it should be sleeping on.138 */139 static bool testWaitForThreadToSleep(RTTHREAD hThread, RTTHREADSTATE enmDesiredState, void *pvLock)140 {141 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING);142 for (unsigned iLoop = 0; ; iLoop++)143 {144 RTTHREADSTATE enmState = RTThreadGetState(hThread);145 if (RTTHREAD_IS_SLEEPING(enmState))146 {147 if ( enmState == enmDesiredState148 && ( !pvLock149 || ( pvLock == RTLockValidatorQueryBlocking(hThread)150 && !RTLockValidatorIsBlockedThreadInValidator(hThread) )151 )152 )153 return true;154 }155 else if ( enmState != RTTHREADSTATE_RUNNING156 && enmState != RTTHREADSTATE_INITIALIZING)157 return false;158 RTThreadSleep(g_fDoNotSpin ? 3600*1000 : iLoop > 256 ? 1 : 0);159 }160 143 } 161 144 … … 172 155 static int testWaitForAllOtherThreadsToSleep(RTTHREADSTATE enmDesiredState, uint32_t cWaitOn) 173 156 { 157 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING); 158 RTTEST_CHECK_RC_OK(g_hTest, RTSemEventMultiWait(g_hThreadStarteEvt, 10*1000)); 159 174 160 RTTHREAD hThreadSelf = RTThreadSelf(); 175 for (uint32_t i = 0; i < g_cThreads; i++) 176 { 177 RTTHREAD hThread = g_ahThreads[i]; 178 if ( hThread != NIL_RTTHREAD 179 && hThread != hThreadSelf) 180 { 181 void *pvLock = NULL; 182 if (cWaitOn != UINT32_MAX) 161 for (uint32_t iOuterLoop = 0; ; iOuterLoop++) 162 { 163 uint32_t cMissing = 0; 164 uint32_t cWaitedOn = 0; 165 for (uint32_t i = 0; i < g_cThreads; i++) 166 { 167 RTTHREAD hThread = g_ahThreads[i]; 168 if (hThread == NIL_RTTHREAD) 169 cMissing++; 170 else if (hThread != hThreadSelf) 183 171 { 184 uint32_t j = (i + cWaitOn) % g_cThreads; 185 switch (enmDesiredState) 172 /* 173 * Figure out which lock to wait for. 174 */ 175 void *pvLock = NULL; 176 if (cWaitOn != UINT32_MAX) 186 177 { 187 case RTTHREADSTATE_CRITSECT: pvLock = &g_aCritSects[j]; break; 188 case RTTHREADSTATE_RW_WRITE: 189 case RTTHREADSTATE_RW_READ: pvLock = g_ahSemRWs[j]; break; 190 case RTTHREADSTATE_MUTEX: pvLock = g_ahSemMtxes[j]; break; 191 default: break; 178 uint32_t j = (i + cWaitOn) % g_cThreads; 179 switch (enmDesiredState) 180 { 181 case RTTHREADSTATE_CRITSECT: pvLock = &g_aCritSects[j]; break; 182 case RTTHREADSTATE_RW_WRITE: 183 case RTTHREADSTATE_RW_READ: pvLock = g_ahSemRWs[j]; break; 184 case RTTHREADSTATE_MUTEX: pvLock = g_ahSemMtxes[j]; break; 185 default: break; 186 } 187 } 188 189 /* 190 * Wait for this thread. 191 */ 192 for (unsigned iLoop = 0; ; iLoop++) 193 { 194 RTTHREADSTATE enmState = RTThreadGetReallySleeping(hThread); 195 if (RTTHREAD_IS_SLEEPING(enmState)) 196 { 197 if ( enmState == enmDesiredState 198 && ( !pvLock 199 || ( pvLock == RTLockValidatorQueryBlocking(hThread) 200 && !RTLockValidatorIsBlockedThreadInValidator(hThread) ) 201 ) 202 && RTThreadGetNativeState(hThread) != RTTHREADNATIVESTATE_RUNNING 203 ) 204 break; 205 } 206 else if ( enmState != RTTHREADSTATE_RUNNING 207 && enmState != RTTHREADSTATE_INITIALIZING) 208 return VERR_INTERNAL_ERROR; 209 RTThreadSleep(g_fDoNotSpin ? 3600*1000 : iOuterLoop + iLoop > 256 ? 1 : 0); 210 cWaitedOn++; 192 211 } 193 212 } 194 bool fRet = testWaitForThreadToSleep(hThread, enmDesiredState, pvLock); 195 if (!fRet) 196 return VERR_INTERNAL_ERROR; 197 } 198 } 199 RTThreadSleep(4); /* fudge factor */ 213 } 214 215 if (!cMissing && !cWaitedOn) 216 break; 217 RTThreadSleep(g_fDoNotSpin ? 3600*1000 : iOuterLoop > 256 ? 1 : 0); 218 } 219 220 RTThreadSleep(0); /* fudge factor */ 200 221 return VINF_SUCCESS; 201 222 } … … 211 232 static int testStartThreads(uint32_t cThreads, PFNRTTHREAD pfnThread) 212 233 { 213 uint32_t i; 214 for (i = 0; i < RT_ELEMENTS(g_ahThreads); i++) 234 RTSemEventMultiReset(g_hThreadStarteEvt); 235 236 for (uint32_t i = 0; i < RT_ELEMENTS(g_ahThreads); i++) 215 237 g_ahThreads[i] = NIL_RTTHREAD; 216 238 217 for (i = 0; i < cThreads; i++) 218 RTTEST_CHECK_RC_OK_RET(g_hTest, 219 RTThreadCreateF(&g_ahThreads[i], pfnThread, (void *)(uintptr_t)i, 0, 220 RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "thread-%02u", i), 221 rcCheck); 222 return VINF_SUCCESS; 239 int rc = VINF_SUCCESS; 240 for (uint32_t i = 0; i < cThreads; i++) 241 { 242 rc = RTThreadCreateF(&g_ahThreads[i], pfnThread, (void *)(uintptr_t)i, 0, 243 RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "thread-%02u", i); 244 RTTEST_CHECK_RC_OK(g_hTest, rc); 245 if (RT_FAILURE(rc)) 246 break; 247 } 248 249 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventMultiSignal(g_hThreadStarteEvt), rcCheck); 250 return rc; 223 251 } 224 252 … … 250 278 static void testIt(uint32_t cThreads, uint32_t cPasses, uint32_t cSecs, PFNRTTHREAD pfnThread, const char *pszName) 251 279 { 280 /* 281 * Init test. 282 */ 252 283 if (cSecs) 253 284 RTTestSubF(g_hTest, "%s, %u threads, %u secs", pszName, cThreads, cSecs * cPasses); … … 266 297 RTTEST_CHECK_RC_RETV(g_hTest, RTSemMutexCreate(&g_ahSemMtxes[i]), VINF_SUCCESS); 267 298 } 268 299 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventCreate(&g_hSemEvt), VINF_SUCCESS); 300 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hSemEvtMulti), VINF_SUCCESS); 301 RTTEST_CHECK_RC_RETV(g_hTest, RTSemEventMultiCreate(&g_hThreadStarteEvt), VINF_SUCCESS); 302 303 /* 304 * The test loop. 305 */ 269 306 uint32_t cLoops = 0; 270 307 uint32_t cDeadlocks = 0; … … 287 324 } 288 325 326 /* 327 * Cleanup. 328 */ 289 329 for (uint32_t i = 0; i < cThreads; i++) 290 330 { … … 293 333 RTTEST_CHECK_RC(g_hTest, RTSemMutexDestroy(g_ahSemMtxes[i]), VINF_SUCCESS); 294 334 } 335 RTTEST_CHECK_RC(g_hTest, RTSemEventDestroy(g_hSemEvt), VINF_SUCCESS); 336 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hSemEvtMulti), VINF_SUCCESS); 337 RTTEST_CHECK_RC(g_hTest, RTSemEventMultiDestroy(g_hThreadStarteEvt), VINF_SUCCESS); 338 295 339 testWaitForThreads(10*1000, false); 296 340 341 /* 342 * Print results if applicable. 343 */ 297 344 if (cSecs) 298 345 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cLoops=%u cDeadlocks=%u (%u%%)\n", … … 531 578 532 579 580 static DECLCALLBACK(int) test6Thread(RTTHREAD ThreadSelf, void *pvUser) 581 { 582 uintptr_t i = (uintptr_t)pvUser; 583 PRTCRITSECT pMine = &g_aCritSects[i]; 584 PRTCRITSECT pNext = &g_aCritSects[(i + 1) % g_cThreads]; 585 586 RTTEST_CHECK_RC_RET(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS, rcCheck); 587 if (i & 1) 588 RTTEST_CHECK_RC(g_hTest, RTCritSectEnter(pMine), VINF_SUCCESS); 589 if (testWaitForCritSectToBeOwned(pNext)) 590 { 591 int rc; 592 if (i != g_iDeadlockThread) 593 { 594 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectEnter(pNext), VINF_SUCCESS); 595 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING); 596 if (RT_SUCCESS(rc)) 597 RTTEST_CHECK_RC(g_hTest, rc = RTCritSectLeave(pNext), VINF_SUCCESS); 598 } 599 else 600 { 601 RTTEST_CHECK_RC_OK(g_hTest, rc = testWaitForAllOtherThreadsToSleep(RTTHREADSTATE_CRITSECT, 1)); 602 if (RT_SUCCESS(rc)) 603 { 604 RTSemEventSetSignaller(g_hSemEvt, g_ahThreads[0]); 605 for (uint32_t iThread = 1; iThread < g_cThreads; iThread++) 606 RTSemEventAddSignaller(g_hSemEvt, g_ahThreads[iThread]); 607 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING); 608 RTTEST_CHECK_RC(g_hTest, RTSemEventWait(g_hSemEvt, 10*1000), VERR_SEM_LV_DEADLOCK); 609 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING); 610 RTTEST_CHECK_RC(g_hTest, RTSemEventSignal(g_hSemEvt), VINF_SUCCESS); 611 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING); 612 RTTEST_CHECK_RC(g_hTest, RTSemEventWait(g_hSemEvt, 10*1000), VINF_SUCCESS); 613 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING); 614 RTSemEventSetSignaller(g_hSemEvt, NIL_RTTHREAD); 615 } 616 } 617 RTTEST_CHECK(g_hTest, RTThreadGetState(RTThreadSelf()) == RTTHREADSTATE_RUNNING); 618 } 619 if (i & 1) 620 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS); 621 RTTEST_CHECK_RC(g_hTest, RTCritSectLeave(pMine), VINF_SUCCESS); 622 return VINF_SUCCESS; 623 } 624 625 626 static void test6(uint32_t cThreads, uint32_t cPasses) 627 { 628 testIt(cThreads, cPasses, 0, test6Thread, "event"); 629 } 630 631 533 632 static bool testIsLockValidationCompiledIn(void) 534 633 { … … 551 650 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWDestroy(hSemRW), false); 552 651 652 #if 0 /** @todo detect it on RTSemMutex... */ 653 RTSEMMUTEX hSemMtx; 654 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexCreate(&hSemRW), false); 655 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemMutexRequest(hSemRW, 50), false); 656 /*??*/ 657 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), false); 658 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWRelease(hSemRW), false); 659 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemRWDestroy(hSemRW), false); 660 #endif 661 662 RTSEMEVENT hSemEvt; 663 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventCreate(&hSemEvt), false); 664 RTSemEventSetSignaller(hSemEvt, RTThreadSelf()); 665 RTSemEventSetSignaller(hSemEvt, NIL_RTTHREAD); 666 rc = RTSemEventSignal(hSemEvt); 667 if (rc != VERR_SEM_LV_NOT_SIGNALLER) 668 fRet = false; 669 RTTEST_CHECK_RET(g_hTest, RT_FAILURE_NP(rc), false); 670 RTTEST_CHECK_RC_OK_RET(g_hTest, RTSemEventDestroy(hSemEvt), false); 671 553 672 return fRet; 554 673 } … … 577 696 * Some initial tests with verbose output. 578 697 */ 579 #if 0698 #if 1 580 699 test1(3, 1); 581 700 test2(1, 1); 582 701 test2(3, 1); 702 test5(3, 1); 703 test6(3, 1); 583 704 #endif 584 test5(3, 1);585 705 586 706 /* … … 588 708 */ 589 709 RTLockValidatorSetQuiet(true); 590 #if 0591 710 test1( 2, 256); /* 256 * 4ms = 1s (approx); 4ms == fudge factor */ 592 711 test1( 3, 256); … … 596 715 test1(30, 256); 597 716 717 #if 1 598 718 test2( 1, 256); 599 719 test2( 2, 256); … … 611 731 test4(10, 1, 10); 612 732 test4(30, 1, 10); 613 #endif614 733 615 734 test5( 2, 256); … … 619 738 test5(15, 256); 620 739 test5(30, 256); 740 #endif 741 742 test6( 2, 256); 743 test6( 3, 256); 744 test6( 7, 256); 745 test6(10, 256); 746 test6(15, 256); 747 test6(30, 256); 621 748 622 749 return RTTestSummaryAndDestroy(g_hTest); -
trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
r25618 r25638 140 140 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, 141 141 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING), 142 RTTHREADSTATE_CRITSECT );142 RTTHREADSTATE_CRITSECT, true); 143 143 if (RT_FAILURE(rc9)) 144 144 return rc9; 145 145 # else 146 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT );146 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true); 147 147 # endif 148 148 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
Note:
See TracChangeset
for help on using the changeset viewer.