Changeset 25368 in vbox
- Timestamp:
- Dec 14, 2009 4:31:40 PM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 55985
- Location:
- trunk
- Files:
-
- 2 added
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/pdmcritsect.h
r23350 r25368 48 48 { 49 49 /** Padding. */ 50 uint8_t padding[HC_ARCH_BITS == 64 ? 0xb8 : 0xa8];50 uint8_t padding[HC_ARCH_BITS == 32 ? 0x80 : 0xc0]; 51 51 #ifdef PDMCRITSECTINT_DECLARED 52 52 /** The internal structure (not normally visible). */ … … 57 57 VMMR3DECL(int) PDMR3CritSectInit(PVM pVM, PPDMCRITSECT pCritSect, const char *pszName); 58 58 VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy); 59 VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL); 59 60 VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect); 61 VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL); 60 62 VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3); 61 63 VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect); … … 75 77 VMMR3DECL(void) PDMR3CritSectLeaveAll(PVM pVM); 76 78 79 /* Strict build: Remap the two enter calls to the debug versions. */ 80 #ifdef VBOX_STRICT 81 # ifdef ___iprt_asm_h 82 # define PDMCritSectEnter(pCritSect, rcBusy) PDMCritSectEnterDebug((pCritSect), (rcBusy), (uintptr_t)ASMReturnAddress(), RT_SRC_POS) 83 # define PDMCritSectTryEnter(pCritSect) PDMCritSectTryEnterDebug((pCritSect), (uintptr_t)ASMReturnAddress(), RT_SRC_POS) 84 # else 85 # define PDMCritSectEnter(pCritSect, rcBusy) PDMCritSectEnterDebug((pCritSect), (rcBusy), 0, RT_SRC_POS) 86 # define PDMCritSectTryEnter(pCritSect) PDMCritSectTryEnterDebug((pCritSect), 0, RT_SRC_POS) 87 # endif 88 #endif 89 77 90 /** @} */ 78 91 -
trunk/include/VBox/vm.h
r24799 r25368 957 957 struct REM s; 958 958 #endif 959 960 /** @def VM_REM_SIZE 961 * Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h. */ 962 # define VM_REM_SIZE 0x11100 963 uint8_t padding[VM_REM_SIZE]; /* multiple of 32 */ 959 uint8_t padding[0x11100]; /* multiple of 64 */ 964 960 } rem; 965 961 -
trunk/include/iprt/critsect.h
r25168 r25368 33 33 #include <iprt/cdefs.h> 34 34 #include <iprt/types.h> 35 #include <iprt/lockvalidator.h> 35 36 #ifdef IN_RING3 36 37 #include <iprt/thread.h> … … 71 72 /** Magic used to validate the section state. 72 73 * RTCRITSECT_MAGIC is the value of an initialized & operational section. */ 73 volatile uint32_t u32Magic;74 volatile uint32_t u32Magic; 74 75 /** Number of lockers. 75 76 * -1 if the section is free. */ 76 volatile int32_t cLockers;77 volatile int32_t cLockers; 77 78 /** The owner thread. */ 78 volatile RTNATIVETHREAD NativeThreadOwner;79 volatile RTNATIVETHREAD NativeThreadOwner; 79 80 /** Number of nested enter operations performed. 80 81 * Greater or equal to 1 if owned, 0 when free. 81 82 */ 82 volatile int32_t cNestings;83 volatile int32_t cNestings; 83 84 /** Section flags - the RTCRITSECT_FLAGS_* \#defines. */ 84 uint32_t fFlags; 85 /** The semaphore to wait for. */ 86 RTSEMEVENT EventSem; 87 88 /** Data only used in strict mode for detecting and debugging deadlocks. */ 89 struct RTCRITSECTSTRICT 90 { 91 /** Strict: The current owner thread. */ 92 RTTHREAD volatile ThreadOwner; 93 /** Strict: Where the section was entered. */ 94 R3PTRTYPE(const char * volatile) pszEnterFile; 95 /** Strict: Where the section was entered. */ 96 uint32_t volatile u32EnterLine; 97 #if HC_ARCH_BITS == 64 || GC_ARCH_BITS == 64 98 /** Padding for correct alignment. */ 99 uint32_t u32Padding; 100 #endif 101 /** Strict: Where the section was entered. */ 102 RTUINTPTR volatile uEnterId; 103 } Strict; 85 uint32_t fFlags; 86 /** The semaphore to block on. */ 87 RTSEMEVENT EventSem; 88 /** Lock validator record. Only used in strict builds. */ 89 R3R0PTRTYPE(PRTLOCKVALIDATORREC) pValidatorRec; 90 /** Alignmnet padding. */ 91 RTHCPTR Alignment; 104 92 } RTCRITSECT; 93 AssertCompileSize(RTCRITSECT, HC_ARCH_BITS == 32 ? 32 : 48); 105 94 /** Pointer to a critical section. */ 106 95 typedef RTCRITSECT *PRTCRITSECT; … … 108 97 typedef const RTCRITSECT *PCRTCRITSECT; 109 98 110 /** RTCRITSECT::u32Magic value. */111 #define RTCRITSECT_MAGIC 0x778899aa99 /** RTCRITSECT::u32Magic value. (Hiromi Uehara) */ 100 #define RTCRITSECT_MAGIC UINT32_C(0x19790326) 112 101 113 102 /** If set, nesting(/recursion) is not allowed. */ 114 #define RTCRITSECT_FLAGS_NO_NESTING 1103 #define RTCRITSECT_FLAGS_NO_NESTING UINT32_C(0x00000001) 115 104 116 105 #ifdef IN_RING3 … … 143 132 * Enter a critical section. 144 133 * 145 * @returns VINF_SUCCESS on success. 146 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 147 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 148 * @param pCritSect The critical section. 149 * @param pszFile Where we're entering the section. 150 * @param uLine Where we're entering the section. 151 * @param uId Where we're entering the section. 152 */ 153 RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId); 154 155 /* in debug mode we'll redefine the enter call. */ 156 #ifdef RT_STRICT 157 # define RTCritSectEnter(pCritSect) RTCritSectEnterDebug(pCritSect, __FILE__, __LINE__, 0) 158 #endif 134 * @retval VINF_SUCCESS on success. 135 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 136 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 137 * 138 * @param pCritSect The critical section. 139 * @param uId Where we're entering the section. 140 * @param RT_SRC_POS_DECL The source position. 141 */ 142 RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL); 159 143 160 144 /** 161 145 * Try enter a critical section. 162 146 * 163 * @returns VINF_SUCCESS on success. 164 * @returns VERR_SEM_BUSY if the critsect was owned. 165 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 166 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 147 * @retval VINF_SUCCESS on success. 148 * @retval VERR_SEM_BUSY if the critsect was owned. 149 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 150 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 151 * 167 152 * @param pCritSect The critical section. 168 153 */ … … 172 157 * Try enter a critical section. 173 158 * 174 * @returns VINF_SUCCESS on success. 175 * @returns VERR_SEM_BUSY if the critsect was owned. 176 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 177 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 178 * @param pCritSect The critical section. 179 * @param pszFile Where we're entering the section. 180 * @param uLine Where we're entering the section. 181 * @param uId Where we're entering the section. 182 */ 183 RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId); 184 185 /* in debug mode we'll redefine the try-enter call. */ 186 #ifdef RT_STRICT 187 # define RTCritSectTryEnter(pCritSect) RTCritSectTryEnterDebug(pCritSect, __FILE__, __LINE__, 0) 188 #endif 159 * @retval VINF_SUCCESS on success. 160 * @retval VERR_SEM_BUSY if the critsect was owned. 161 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 162 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 163 * 164 * @param pCritSect The critical section. 165 * @param uId Where we're entering the section. 166 * @param RT_SRC_POS_DECL The source position. 167 */ 168 RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL); 189 169 190 170 /** … … 203 183 * Therefore, avoid having to enter multiple critical sections! 204 184 */ 205 RTDECL(int) RTCritSectEnterMultiple( unsignedcCritSects, PRTCRITSECT *papCritSects);185 RTDECL(int) RTCritSectEnterMultiple(size_t cCritSects, PRTCRITSECT *papCritSects); 206 186 207 187 /** … … 216 196 * @param cCritSects Number of critical sections in the array. 217 197 * @param papCritSects Array of critical section pointers. 218 * @param pszFile Where we're entering the section.219 * @param uLine Where we're entering the section.220 198 * @param uId Where we're entering the section. 199 * @param RT_SRC_POS_DECL The source position. 221 200 * 222 201 * @remark See RTCritSectEnterMultiple(). 223 202 */ 224 RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId); 225 226 /* in debug mode we'll redefine the enter-multiple call. */ 227 #ifdef RT_STRICT 228 # define RTCritSectEnterMultiple(cCritSects, pCritSect) RTCritSectEnterMultipleDebug((cCritSects), (pCritSect), __FILE__, __LINE__, 0) 229 #endif 203 RTDECL(int) RTCritSectEnterMultipleDebug(size_t cCritSects, PRTCRITSECT *papCritSects, RTUINTPTR uId, RT_SRC_POS_DECL); 230 204 231 205 /** … … 244 218 * @param papCritSects Array of critical section pointers. 245 219 */ 246 RTDECL(int) RTCritSectLeaveMultiple( unsignedcCritSects, PRTCRITSECT *papCritSects);220 RTDECL(int) RTCritSectLeaveMultiple(size_t cCritSects, PRTCRITSECT *papCritSects); 247 221 248 222 /** … … 326 300 } 327 301 302 /* Strict build: Remap the three enter calls to the debug versions. */ 303 #ifdef RT_STRICT 304 # ifdef ___iprt_asm_h 305 # define RTCritSectEnter(pCritSect) RTCritSectEnterDebug(pCritSect, (uintptr_t)ASMReturnAddress(), RT_SRC_POS) 306 # define RTCritSectTryEnter(pCritSect) RTCritSectTryEnterDebug(pCritSect, (uintptr_t)ASMReturnAddress(), RT_SRC_POS) 307 # define RTCritSectEnterMultiple(cCritSects, pCritSect) RTCritSectEnterMultipleDebug((cCritSects), (pCritSect), (uintptr_t)ASMReturnAddress(), RT_SRC_POS) 308 # else 309 # define RTCritSectEnter(pCritSect) RTCritSectEnterDebug(pCritSect, 0, RT_SRC_POS) 310 # define RTCritSectTryEnter(pCritSect) RTCritSectTryEnterDebug(pCritSect, 0, RT_SRC_POS) 311 # define RTCritSectEnterMultiple(cCritSects, pCritSect) RTCritSectEnterMultipleDebug((cCritSects), (pCritSect), 0, RT_SRC_POS) 312 # endif 313 #endif 314 328 315 /** @} */ 329 316 -
trunk/include/iprt/thread.h
r25295 r25368 61 61 /** Probably running. */ 62 62 RTTHREADSTATE_RUNNING, 63 63 64 /** Waiting on a critical section. */ 64 65 RTTHREADSTATE_CRITSECT, 65 /** Waiting on a mutex. */66 RTTHREADSTATE_MUTEX,67 66 /** Waiting on a event semaphore. */ 68 67 RTTHREADSTATE_EVENT, 69 68 /** Waiting on a event multiple wakeup semaphore. */ 70 RTTHREADSTATE_EVENTMULTI, 69 RTTHREADSTATE_EVENT_MULTI, 70 /** Waiting on a fast mutex. */ 71 RTTHREADSTATE_FAST_MUTEX, 72 /** Waiting on a mutex. */ 73 RTTHREADSTATE_MUTEX, 71 74 /** Waiting on a read write semaphore, read (shared) access. */ 72 75 RTTHREADSTATE_RW_READ, … … 75 78 /** The thread is sleeping. */ 76 79 RTTHREADSTATE_SLEEP, 80 /** Waiting on a spin mutex. */ 81 RTTHREADSTATE_SPIN_MUTEX, 82 77 83 /** The usual 32-bit size hack. */ 78 84 RTTHREADSTATE_32BIT_HACK = 0x7fffffff … … 80 86 81 87 /** Checks if a thread state indicates that the thread is sleeping. */ 82 #define RTTHREAD_IS_SLEEPING(enmState) ( (enmState) == RTTHREADSTATE_CRITSECT \ 83 || (enmState) == RTTHREADSTATE_MUTEX \ 84 || (enmState) == RTTHREADSTATE_EVENT \ 85 || (enmState) == RTTHREADSTATE_EVENTMULTI \ 86 || (enmState) == RTTHREADSTATE_RW_READ \ 87 || (enmState) == RTTHREADSTATE_RW_WRITE \ 88 || (enmState) == RTTHREADSTATE_SLEEP \ 89 ) 88 #define RTTHREAD_IS_SLEEPING(enmState) ((enmState) >= RTTHREADSTATE_CRITSECT) 90 89 91 90 /** … … 638 637 * This is a RT_STRICT method for debugging locks and detecting deadlocks. 639 638 * 640 * @param hThread The current thread. 641 * @param enmState The sleep state. 642 * @param u64Block The block data. A pointer or handle. 643 * @param pszFile Where we are blocking. 644 * @param uLine Where we are blocking. 645 * @param uId Where we are blocking. 646 */ 647 RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, uint64_t u64Block, 648 const char *pszFile, unsigned uLine, RTUINTPTR uId); 639 * @param hThread The current thread. 640 * @param enmState The sleep state. 641 * @param pvBlock Pointer to a RTLOCKVALIDATORREC structure. 642 * @param uId Where we are blocking. 643 * @param RT_SRC_POS_DECL Where we are blocking. 644 */ 645 RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, 646 PRTLOCKVALIDATORREC pValidatorRec, RTHCUINTPTR uId, RT_SRC_POS_DECL); 649 647 650 648 -
trunk/include/iprt/types.h
r25310 r25368 1124 1124 #define NIL_RTLDRMOD 0 1125 1125 1126 /** Lock validator class handle. */ 1127 typedef R3R0PTRTYPE(struct RTLOCKVALIDATORCLASSINT *) RTLOCKVALIDATORCLASS; 1128 /** Pointer to a lock validator class handle. */ 1129 typedef RTLOCKVALIDATORCLASS *PRTLOCKVALIDATORCLASS; 1130 /** Nil lock validator class handle. */ 1131 #define NIL_RTLOCKVALIDATORCLASS ((RTLOCKVALIDATORCLASS)0) 1132 1126 1133 /** Ring-0 memory object handle. */ 1127 1134 typedef R0PTRTYPE(struct RTR0MEMOBJINTERNAL *) RTR0MEMOBJ; … … 1460 1467 1461 1468 1469 /** Pointer to a lock validator record. 1470 * The structure definition is found in iprt/lockvalidator.h. */ 1471 typedef struct RTLOCKVALIDATORREC *PRTLOCKVALIDATORREC; 1472 1473 1462 1474 #ifdef __cplusplus 1463 1475 /** -
trunk/src/VBox/Main/AutoLock.cpp
r25350 r25368 292 292 #endif 293 293 294 #if defined(DEBUG) && defined(VBOX_WITH_DEBUG_LOCK_VALIDATOR) 295 RTCritSectEnterDebug(&m->sem, pszFile, iLine, (uintptr_t)ASMReturnAddress()); 296 #elif defined(DEBUG) 297 RTCritSectEnterDebug(&m->sem, 298 "WriteLockHandle::lockWrite() return address >>>", 299 0, (RTUINTPTR)ASMReturnAddress()); 294 #if defined(RT_STRICT) && defined(VBOX_WITH_DEBUG_LOCK_VALIDATOR) 295 RTCritSectEnterDebug(&m->sem, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS); 296 #elif defined(RT_STRICT) 297 RTCritSectEnterDebug(&m->sem, (uintptr_t)ASMReturnAddress(), 298 "return address >>>", 0, __PRETTY_FUNCTION__); 300 299 #else 301 300 RTCritSectEnter(&m->sem); -
trunk/src/VBox/Runtime/Makefile.kmk
r25296 r25368 240 240 common/misc/handletablectx.cpp \ 241 241 common/misc/handletablesimple.cpp \ 242 common/misc/lockvalidator.cpp \ 242 243 common/misc/message.cpp \ 243 244 common/misc/once.cpp \ … … 1257 1258 1258 1259 RuntimeR0Drv_SOURCES.win = \ 1260 common/misc/lockvalidator.cpp \ 1259 1261 common/misc/thread.cpp \ 1260 1262 common/string/memcmp.asm \ … … 1299 1301 common/misc/RTAssertMsg1Weak.cpp \ 1300 1302 common/misc/RTAssertMsg2Weak.cpp \ 1303 common/misc/lockvalidator.cpp \ 1301 1304 common/misc/thread.cpp \ 1302 1305 common/string/memchr.asm \ … … 1343 1346 common/string/strpbrk.cpp \ 1344 1347 \ 1348 common/misc/lockvalidator.cpp \ 1345 1349 common/misc/thread.cpp \ 1346 1350 generic/RTAssertShouldPanic-generic.cpp \ … … 1390 1394 common/err/RTErrConvertFromErrno.cpp \ 1391 1395 common/err/RTErrConvertToErrno.cpp \ 1396 common/misc/lockvalidator.cpp \ 1392 1397 common/misc/thread.cpp \ 1393 1398 common/string/memchr.asm \ … … 1424 1429 common/err/RTErrConvertFromErrno.cpp \ 1425 1430 common/err/RTErrConvertToErrno.cpp \ 1431 common/misc/lockvalidator.cpp \ 1426 1432 common/misc/thread.cpp \ 1427 1433 common/string/memchr.asm \ -
trunk/src/VBox/Runtime/common/misc/thread.cpp
r25000 r25368 41 41 #include <iprt/alloc.h> 42 42 #include <iprt/assert.h> 43 #include <iprt/lockvalidator.h> 43 44 #include <iprt/semaphore.h> 44 45 #ifdef IN_RING0 … … 48 49 #include <iprt/err.h> 49 50 #include <iprt/string.h> 51 #include "internal/magics.h" 50 52 #include "internal/thread.h" 51 53 #include "internal/sched.h" … … 195 197 196 198 199 /** 200 * Gets the thread state. 201 * 202 * @returns The thread state. 203 * @param pThread The thread. 204 */ 205 DECLINLINE(RTTHREADSTATE) rtThreadGetState(PRTTHREADINT pThread) 206 { 207 return pThread->enmState; 208 } 209 210 211 /** 212 * Sets the thread state. 213 * 214 * @param pThread The thread. 215 * @param enmNewState The new thread state. 216 */ 217 DECLINLINE(void) rtThreadSetState(PRTTHREADINT pThread, RTTHREADSTATE enmNewState) 218 { 219 AssertCompile(sizeof(pThread->enmState) == sizeof(uint32_t)); 220 ASMAtomicWriteU32((uint32_t volatile *)&pThread->enmState, enmNewState); 221 } 197 222 198 223 #ifdef IN_RING3 … … 255 280 { 256 281 rtThreadInsert(pThread, NativeThread); 257 ASMAtomicWriteSize(&pThread->enmState, RTTHREADSTATE_RUNNING);282 rtThreadSetState(pThread, RTTHREADSTATE_RUNNING); 258 283 rtThreadRelease(pThread); 259 284 } … … 376 401 * it should not be reinserted at this point. 377 402 */ 378 if ( pThread->enmState!= RTTHREADSTATE_TERMINATED)403 if (rtThreadGetState(pThread) != RTTHREADSTATE_TERMINATED) 379 404 { 380 405 /* … … 588 613 */ 589 614 pThread->rc = rc; 590 ASMAtomicWriteSize(&pThread->enmState, RTTHREADSTATE_TERMINATED);615 rtThreadSetState(pThread, RTTHREADSTATE_TERMINATED); 591 616 ASMAtomicOrU32(&pThread->fIntFlags, RTTHREADINT_FLAGS_TERMINATED); 592 617 if (pThread->EventTerminated != NIL_RTSEMEVENTMULTI) … … 634 659 * Call thread function and terminate when it returns. 635 660 */ 636 ASMAtomicWriteSize(&pThread->enmState, RTTHREADSTATE_RUNNING);661 rtThreadSetState(pThread, RTTHREADSTATE_RUNNING); 637 662 rc = pThread->pfnThread(pThread, pThread->pvUser); 638 663 … … 1380 1405 1381 1406 /** 1407 * Translate a thread state into a string. 1408 * 1409 * @returns Pointer to a read-only string containing the state name. 1410 * @param enmState The state. 1411 */ 1412 static const char *rtThreadStateName(RTTHREADSTATE enmState) 1413 { 1414 switch (enmState) 1415 { 1416 case RTTHREADSTATE_INVALID: return "INVALID"; 1417 case RTTHREADSTATE_INITIALIZING: return "INITIALIZING"; 1418 case RTTHREADSTATE_TERMINATED: return "TERMINATED"; 1419 case RTTHREADSTATE_RUNNING: return "RUNNING"; 1420 case RTTHREADSTATE_CRITSECT: return "CRITSECT"; 1421 case RTTHREADSTATE_EVENT: return "EVENT"; 1422 case RTTHREADSTATE_EVENT_MULTI: return "EVENT_MULTI"; 1423 case RTTHREADSTATE_FAST_MUTEX: return "FAST_MUTEX"; 1424 case RTTHREADSTATE_MUTEX: return "MUTEX"; 1425 case RTTHREADSTATE_RW_READ: return "RW_READ"; 1426 case RTTHREADSTATE_RW_WRITE: return "RW_WRITE"; 1427 case RTTHREADSTATE_SLEEP: return "SLEEP"; 1428 case RTTHREADSTATE_SPIN_MUTEX: return "SPIN_MUTEX"; 1429 default: return "UnknownThreadState"; 1430 } 1431 } 1432 1433 1434 /** 1382 1435 * Bitch about a deadlock. 1383 1436 * 1384 * @param pThread This thread. 1385 * @param pCur The thread we're deadlocking with. 1386 * @param enmState The sleep state. 1387 * @param u64Block The block data. A pointer or handle. 1388 * @param pszFile Where we are gonna block. 1389 * @param uLine Where we are gonna block. 1390 * @param uId Where we are gonna block. 1391 */ 1392 static void rtThreadDeadlock(PRTTHREADINT pThread, PRTTHREADINT pCur, RTTHREADSTATE enmState, uint64_t u64Block, 1393 const char *pszFile, unsigned uLine, RTUINTPTR uId) 1394 { 1395 AssertMsg1(pCur == pThread ? "!!Deadlock detected!!" : "!!Deadlock exists!!", uLine, pszFile, ""); 1437 * @param pThread This thread. 1438 * @param pCur The thread we're deadlocking with. 1439 * @param enmState The sleep state. 1440 * @param pRec The lock validator record we're going to block on. 1441 * @param RT_SRC_POS_DECL Where we are going to deadlock. 1442 * @param uId Where we are going to deadlock. 1443 */ 1444 static void rtThreadDeadlock(PRTTHREADINT pThread, PRTTHREADINT pCur, RTTHREADSTATE enmState, 1445 PRTLOCKVALIDATORREC pRec, RTHCUINTPTR uId, RT_SRC_POS_DECL) 1446 { 1447 AssertMsg1(pCur == pThread ? "!!Deadlock detected!!" : "!!Deadlock exists!!", iLine, pszFile, pszFunction); 1396 1448 1397 1449 /* … … 1406 1458 * Print info on pCur. Determin next while doing so. 1407 1459 */ 1408 AssertMsg2(" #% d: %RTthrd/%RTnthrd %s: %s(%u) %RTptr\n",1460 AssertMsg2(" #%u: %RTthrd/%RTnthrd %s: %s(%u) %RTptr\n", 1409 1461 iEntry, pCur, pCur->Core.Key, pCur->szName, 1410 pCur->pszBlockFile, pCur->uBlockLine, pCur->uBlockId); 1411 PRTTHREADINT pNext = NULL; 1412 switch (pCur->enmState) 1462 pCur->pszBlockFile, pCur->uBlockLine, pCur->pszBlockFunction, pCur->uBlockId); 1463 PRTTHREADINT pNext = NULL; 1464 RTTHREADSTATE enmCurState = rtThreadGetState(pCur); 1465 switch (enmCurState) 1413 1466 { 1414 1467 case RTTHREADSTATE_CRITSECT: 1468 case RTTHREADSTATE_EVENT: 1469 case RTTHREADSTATE_EVENT_MULTI: 1470 case RTTHREADSTATE_FAST_MUTEX: 1471 case RTTHREADSTATE_MUTEX: 1472 case RTTHREADSTATE_RW_READ: 1473 case RTTHREADSTATE_RW_WRITE: 1474 case RTTHREADSTATE_SPIN_MUTEX: 1415 1475 { 1416 PRTCRITSECT pCritSect = pCur->Block.pCritSect; 1417 if (pCur->enmState != RTTHREADSTATE_CRITSECT) 1476 PRTLOCKVALIDATORREC pCurRec = pCur->Block.pRec; 1477 RTTHREADSTATE enmCurState2 = rtThreadGetState(pCur); 1478 if (enmCurState2 != enmCurState) 1418 1479 { 1419 AssertMsg2("Impossible!!!\n"); 1480 AssertMsg2(" Impossible!!! enmState=%s -> %s (%d)\n", 1481 rtThreadStateName(enmCurState), rtThreadStateName(enmCurState2), enmCurState2); 1420 1482 break; 1421 1483 } 1422 if (VALID_PTR(pCritSect) && RTCritSectIsInitialized(pCritSect)) 1484 if ( VALID_PTR(pCurRec) 1485 && pCurRec->u32Magic == RTLOCKVALIDATORREC_MAGIC) 1423 1486 { 1424 AssertMsg2(" Waiting on CRITSECT %p: Entered %s(%u) %RTptr\n",1425 pCritSect, pCritSect->Strict.pszEnterFile,1426 pC ritSect->Strict.u32EnterLine, pCritSect->Strict.uEnterId);1427 pNext = pC ritSect->Strict.ThreadOwner;1487 AssertMsg2(" Waiting on %s %p [%s]: Entered %s(%u) %s %p\n", 1488 rtThreadStateName(enmCurState), pCurRec->hLock, pCurRec->pszName, 1489 pCurRec->pszFile, pCurRec->uLine, pCurRec->pszFunction, pCurRec->uId); 1490 pNext = pCurRec->hThread; 1428 1491 } 1492 else if (VALID_PTR(pCurRec)) 1493 AssertMsg2(" Waiting on %s pCurRec=%p: invalid magic number: %#x\n", 1494 rtThreadStateName(enmCurState), pCurRec, pCurRec->u32Magic); 1429 1495 else 1430 AssertMsg2(" Waiting on CRITSECT %p: invalid pointer or uninitialized critsect\n", pCritSect); 1496 AssertMsg2(" Waiting on %s pCurRec=%p: invalid pointer\n", 1497 rtThreadStateName(enmCurState), pCurRec); 1431 1498 break; 1432 1499 } 1433 1500 1434 1501 default: 1435 AssertMsg2(" Impossible!!! enmState=% d\n", pCur->enmState);1502 AssertMsg2(" Impossible!!! enmState=%s (%d)\n", rtThreadStateName(enmCurState), enmCurState); 1436 1503 break; 1437 1504 } … … 1466 1533 * This is a RT_STRICT method for debugging locks and detecting deadlocks. 1467 1534 * 1468 * @param hThread The current thread.1469 * @param enmState The sleep state.1470 * @param u64Block The block data. A pointer or handle.1471 * @param pszFileWhere we are blocking.1472 * @param uLineWhere we are blocking.1473 * @param uId Where we are blocking.1474 */ 1475 RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, uint64_t u64Block, 1476 const char *pszFile, unsigned uLine, RTUINTPTR uId) 1535 * @param hThread The current thread. 1536 * @param enmState The sleep state. 1537 * @param pvBlock Pointer to a RTLOCKVALIDATORREC structure. 1538 * @param uId Where we are blocking. 1539 * @param RT_SRC_POS_DECL Where we are blocking. 1540 */ 1541 RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, 1542 PRTLOCKVALIDATORREC pValidatorRec, RTHCUINTPTR uId, RT_SRC_POS_DECL) 1543 1477 1544 { 1478 1545 PRTTHREADINT pThread = hThread; 1479 1546 Assert(RTTHREAD_IS_SLEEPING(enmState)); 1480 if (pThread && pThread->enmState== RTTHREADSTATE_RUNNING)1547 if (pThread && rtThreadGetState(pThread) == RTTHREADSTATE_RUNNING) 1481 1548 { 1482 1549 /** @todo This has to be serialized! The deadlock detection isn't 100% safe!!! */ 1483 pThread->Block.u64 = u64Block; 1484 pThread->pszBlockFile = pszFile; 1485 pThread->uBlockLine = uLine; 1486 pThread->uBlockId = uId; 1487 ASMAtomicWriteSize(&pThread->enmState, enmState); 1550 pThread->Block.pRec = pValidatorRec; 1551 pThread->pszBlockFunction = pszFunction; 1552 pThread->pszBlockFile = pszFile; 1553 pThread->uBlockLine = iLine; 1554 pThread->uBlockId = uId; 1555 rtThreadSetState(pThread, enmState); 1488 1556 1489 1557 /* … … 1497 1565 PRTTHREADINT pCur; 1498 1566 unsigned cPrevLength = ~0U; 1499 unsigned cEqualRuns = 0;1500 unsigned iParanoia = 256;1567 unsigned cEqualRuns = 0; 1568 unsigned iParanoia = 256; 1501 1569 do 1502 1570 { … … 1508 1576 * Get the next thread. 1509 1577 */ 1578 PRTTHREADINT pNext = NULL; 1510 1579 for (;;) 1511 1580 { 1512 switch (pCur->enmState) 1581 RTTHREADSTATE enmCurState = rtThreadGetState(pCur); 1582 switch (enmCurState) 1513 1583 { 1514 1584 case RTTHREADSTATE_CRITSECT: 1585 case RTTHREADSTATE_EVENT: 1586 case RTTHREADSTATE_EVENT_MULTI: 1587 case RTTHREADSTATE_FAST_MUTEX: 1588 case RTTHREADSTATE_MUTEX: 1589 case RTTHREADSTATE_RW_READ: 1590 case RTTHREADSTATE_RW_WRITE: 1591 case RTTHREADSTATE_SPIN_MUTEX: 1515 1592 { 1516 PRTCRITSECT pCritSect = pCur->Block.pCritSect; 1517 if (pCur->enmState != RTTHREADSTATE_CRITSECT) 1593 PRTLOCKVALIDATORREC pRec = pCur->Block.pRec; 1594 if ( rtThreadGetState(pCur) != enmCurState 1595 || !VALID_PTR(pRec) 1596 || pRec->u32Magic != RTLOCKVALIDATORREC_MAGIC) 1518 1597 continue; 1519 pCur = pCritSect->Strict.ThreadOwner; 1598 pNext = pRec->hThread; 1599 if ( rtThreadGetState(pCur) != enmCurState 1600 || pRec->u32Magic != RTLOCKVALIDATORREC_MAGIC 1601 || pRec->hThread != pNext) 1602 continue; 1520 1603 break; 1521 1604 } 1522 1605 1523 1606 default: 1524 p Cur= NULL;1607 pNext = NULL; 1525 1608 break; 1526 1609 } 1527 1610 break; 1528 1611 } 1612 1613 /* 1614 * If we arrive at the end of the list we're good. 1615 */ 1616 pCur = pNext; 1529 1617 if (!pCur) 1530 1618 return; 1531 1619 1532 1620 /* 1533 * If we've got back to the blocking thread id we've got a deadlock. 1534 * If we've got a chain of more than 256 items, there is some kind of cycle 1535 * in the list, which means that there is already a deadlock somewhere. 1621 * If we've got back to the blocking thread id we've 1622 * got a deadlock. 1536 1623 */ 1537 if (pCur == pThread || cLength >= 256)1624 if (pCur == pThread) 1538 1625 break; 1626 1627 /* 1628 * If we've got a chain of more than 256 items, there is some 1629 * kind of cycle in the list, which means that there is already 1630 * a deadlock somewhere. 1631 */ 1632 if (cLength >= 256) 1633 break; 1634 1539 1635 cLength++; 1540 1636 } … … 1553 1649 * Ok, if we ever get here, it's most likely a genuine deadlock. 1554 1650 */ 1555 rtThreadDeadlock(pThread, pCur, enmState, u64Block, pszFile, uLine, uId);1651 rtThreadDeadlock(pThread, pCur, enmState, pValidatorRec, uId, RT_SRC_POS_ARGS); 1556 1652 } 1557 1653 } … … 1570 1666 RTDECL(void) RTThreadUnblocked(RTTHREAD hThread, RTTHREADSTATE enmCurState) 1571 1667 { 1572 if (hThread && hThread->enmState== enmCurState)1573 ASMAtomicWriteSize(&hThread->enmState, RTTHREADSTATE_RUNNING);1668 if (hThread && rtThreadGetState(hThread) == enmCurState) 1669 rtThreadSetState(hThread, RTTHREADSTATE_RUNNING); 1574 1670 } 1575 1671 RT_EXPORT_SYMBOL(RTThreadUnblocked); -
trunk/src/VBox/Runtime/generic/critsect-generic.cpp
r23718 r25368 5 5 6 6 /* 7 * Copyright (C) 2006-200 7Sun Microsystems, Inc.7 * Copyright (C) 2006-2009 Sun Microsystems, Inc. 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 45 45 46 46 47 /* in strict mode we're redefining this, so undefine itnow for the implementation. */47 /* In strict mode we're redefining these, so undefine them now for the implementation. */ 48 48 #undef RTCritSectEnter 49 49 #undef RTCritSectTryEnter … … 51 51 52 52 53 /**54 * Initialize a critical section.55 */56 53 RTDECL(int) RTCritSectInit(PRTCRITSECT pCritSect) 57 54 { … … 61 58 62 59 63 /**64 * Initialize a critical section.65 *66 * @returns iprt status code.67 * @param pCritSect Pointer to the critical section structure.68 * @param fFlags Flags, any combination of the RTCRITSECT_FLAGS \#defines.69 */70 60 RTDECL(int) RTCritSectInitEx(PRTCRITSECT pCritSect, uint32_t fFlags) 71 61 { … … 78 68 pCritSect->cLockers = -1; 79 69 pCritSect->NativeThreadOwner = NIL_RTNATIVETHREAD; 80 pCritSect->Strict.ThreadOwner = NIL_RTTHREAD; 81 pCritSect->Strict.pszEnterFile = NULL; 82 pCritSect->Strict.u32EnterLine = 0; 83 pCritSect->Strict.uEnterId = 0; 84 int rc = RTSemEventCreate(&pCritSect->EventSem); 70 int rc = RTLockValidatorCreate(&pCritSect->pValidatorRec, NIL_RTLOCKVALIDATORCLASS, 0, NULL, pCritSect); 85 71 if (RT_SUCCESS(rc)) 86 return VINF_SUCCESS; 72 { 73 rc = RTSemEventCreate(&pCritSect->EventSem); 74 if (RT_SUCCESS(rc)) 75 return VINF_SUCCESS; 76 RTLockValidatorDestroy(&pCritSect->pValidatorRec); 77 } 87 78 88 79 AssertRC(rc); … … 94 85 95 86 96 /** 97 * Enter multiple critical sections. 98 * 99 * This function will enter ALL the specified critical sections before returning. 100 * 101 * @returns VINF_SUCCESS on success. 102 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 103 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 104 * @param cCritSects Number of critical sections in the array. 105 * @param papCritSects Array of critical section pointers. 106 * 107 * @remark Please note that this function will not necessarily come out favourable in a 108 * fight with other threads which are using the normal RTCritSectEnter() function. 109 * Therefore, avoid having to enter multiple critical sections! 110 */ 111 RTDECL(int) RTCritSectEnterMultiple(unsigned cCritSects, PRTCRITSECT *papCritSects) 112 #ifdef RTCRITSECT_STRICT 113 { 114 return RTCritSectEnterMultipleDebug(cCritSects, papCritSects, __FILE__, __LINE__, 0); 115 } 116 RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId) 117 #endif /* RTCRITSECT_STRICT */ 87 #ifdef RTCRITSECT_STRICT 88 RTDECL(int) RTCritSectEnterMultipleDebug(size_t cCritSects, PRTCRITSECT *papCritSects, RTUINTPTR uId, RT_SRC_POS_DECL) 89 #else 90 RTDECL(int) RTCritSectEnterMultiple(size_t cCritSects, PRTCRITSECT *papCritSects) 91 #endif 118 92 { 119 93 Assert(cCritSects > 0); 120 Assert (VALID_PTR(papCritSects));94 AssertPtr(papCritSects); 121 95 122 96 /* … … 124 98 */ 125 99 int rc = VERR_INVALID_PARAMETER; 126 unsignedi;100 size_t i; 127 101 for (i = 0; i < cCritSects; i++) 128 102 { 129 103 #ifdef RTCRITSECT_STRICT 130 rc = RTCritSectTryEnterDebug(papCritSects[i], pszFile, uLine, uId);104 rc = RTCritSectTryEnterDebug(papCritSects[i], uId, RT_SRC_POS_ARGS); 131 105 #else 132 106 rc = RTCritSectTryEnter(papCritSects[i]); … … 146 120 * We've failed, release any locks we might have gotten. ('i' is the lock that failed btw.) 147 121 */ 148 unsignedj = i;122 size_t j = i; 149 123 while (j-- > 0) 150 124 { … … 166 140 */ 167 141 #ifdef RTCRITSECT_STRICT 168 rc = RTCritSectEnterDebug(papCritSects[i], pszFile, uLine, uId);142 rc = RTCritSectEnterDebug(papCritSects[i], uId, RT_SRC_POS_ARGS); 169 143 #else 170 144 rc = RTCritSectEnter(papCritSects[i]); … … 181 155 { 182 156 #ifdef RTCRITSECT_STRICT 183 rc = RTCritSectTryEnterDebug(papCritSects[j], pszFile, uLine, uId);157 rc = RTCritSectTryEnterDebug(papCritSects[j], uId, RT_SRC_POS_ARGS); 184 158 #else 185 159 rc = RTCritSectTryEnter(papCritSects[j]); … … 203 177 } 204 178 } 179 #ifdef RTCRITSECT_STRICT 180 RT_EXPORT_SYMBOL(RTCritSectEnterMultipleDebug); 181 #else 205 182 RT_EXPORT_SYMBOL(RTCritSectEnterMultiple); 206 207 208 /** 209 * Try enter a critical section. 210 * 211 * @returns VINF_SUCCESS on success. 212 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 213 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 214 * @param pCritSect The critical section. 215 */ 183 #endif 184 185 186 #ifdef RTCRITSECT_STRICT 187 RTDECL(int) RTCritSectEnterMultiple(size_t cCritSects, PRTCRITSECT *papCritSects) 188 { 189 return RTCritSectEnterMultipleDebug(cCritSects, papCritSects, 0, RT_SRC_POS); 190 } 191 RT_EXPORT_SYMBOL(RTCritSectEnterMultiple); 192 193 194 #else /* !RTCRITSECT_STRICT */ 195 RTDECL(int) RTCritSectEnterMultipleDebug(size_t cCritSects, PRTCRITSECT *papCritSects, RTUINTPTR uId, RT_SRC_POS_DECL) 196 { 197 return RTCritSectEnterMultiple(cCritSects, papCritSects); 198 } 199 RT_EXPORT_SYMBOL(RTCritSectEnterMultipleDebug); 200 #endif /* !RTCRITSECT_STRICT */ 201 202 203 #ifdef RTCRITSECT_STRICT 204 RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL) 205 #else 216 206 RTDECL(int) RTCritSectTryEnter(PRTCRITSECT pCritSect) 217 #ifdef RTCRITSECT_STRICT 218 { 219 return RTCritSectTryEnterDebug(pCritSect, __FILE__, __LINE__, 0); 220 } 221 RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId) 222 #endif /* RTCRITSECT_STRICT */ 207 #endif 223 208 { 224 209 Assert(pCritSect); … … 259 244 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf); 260 245 #ifdef RTCRITSECT_STRICT 261 pCritSect->Strict.pszEnterFile = pszFile; 262 pCritSect->Strict.u32EnterLine = uLine; 263 pCritSect->Strict.uEnterId = uId; 264 ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf); 246 RTLockValidatorSetOwner(pCritSect->pValidatorRec, ThreadSelf, uId, RT_SRC_POS_ARGS); 265 247 #endif 266 248 267 249 return VINF_SUCCESS; 268 250 } 251 #ifdef RTCRITSECT_STRICT 252 RT_EXPORT_SYMBOL(RTCritSectTryEnterDebug); 253 #else 269 254 RT_EXPORT_SYMBOL(RTCritSectTryEnter); 270 271 272 /** 273 * Enter a critical section. 274 * 275 * @returns VINF_SUCCESS on success. 276 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 277 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 278 * @param pCritSect The critical section. 279 */ 255 #endif 256 257 258 #ifdef RTCRITSECT_STRICT 259 RTDECL(int) RTCritSectTryEnter(PRTCRITSECT pCritSect) 260 { 261 return RTCritSectTryEnterDebug(pCritSect, 0, RT_SRC_POS); 262 } 263 RT_EXPORT_SYMBOL(RTCritSectTryEnter); 264 265 266 #else /* !RTCRITSECT_STRICT */ 267 RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL) 268 { 269 return RTCritSectTryEnter(pCritSect); 270 } 271 RT_EXPORT_SYMBOL(RTCritSectTryEnterDebug); 272 #endif /* !RTCRITSECT_STRICT */ 273 274 275 #ifdef RTCRITSECT_STRICT 276 RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL) 277 #else 280 278 RTDECL(int) RTCritSectEnter(PRTCRITSECT pCritSect) 281 #ifdef RTCRITSECT_STRICT 282 { 283 return RTCritSectEnterDebug(pCritSect, __FILE__, __LINE__, 0); 284 } 285 RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId) 286 #endif /* RTCRITSECT_STRICT */ 279 #endif 287 280 { 288 281 Assert(pCritSect); … … 293 286 if (ThreadSelf == NIL_RTTHREAD) 294 287 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &ThreadSelf); 288 RTLockValidatorCheckOrder(pCritSect->pValidatorRec, ThreadSelf, uId, RT_SRC_POS_ARGS); 295 289 #endif 296 290 … … 324 318 { 325 319 #ifdef RTCRITSECT_STRICT 326 RTThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId);320 RTThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, pCritSect->pValidatorRec, uId, RT_SRC_POS_ARGS); 327 321 #endif 328 322 int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT); … … 345 339 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf); 346 340 #ifdef RTCRITSECT_STRICT 347 pCritSect->Strict.pszEnterFile = pszFile; 348 pCritSect->Strict.u32EnterLine = uLine; 349 pCritSect->Strict.uEnterId = uId; 350 ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, ThreadSelf); 341 RTLockValidatorSetOwner(pCritSect->pValidatorRec, ThreadSelf, uId, RT_SRC_POS_ARGS); 351 342 RTThreadWriteLockInc(ThreadSelf); 352 343 #endif … … 354 345 return VINF_SUCCESS; 355 346 } 347 #ifdef RTCRITSECT_STRICT 348 RT_EXPORT_SYMBOL(RTCritSectEnterDebug); 349 #else 356 350 RT_EXPORT_SYMBOL(RTCritSectEnter); 357 358 359 /** 360 * Leave a critical section. 361 * 362 * @returns VINF_SUCCESS. 363 * @param pCritSect The critical section. 364 */ 351 #endif 352 353 354 #ifdef RTCRITSECT_STRICT 355 RTDECL(int) RTCritSectEnter(PRTCRITSECT pCritSect) 356 { 357 return RTCritSectEnterDebug(pCritSect, 0, RT_SRC_POS); 358 } 359 RT_EXPORT_SYMBOL(RTCritSectEnter); 360 361 362 #else /* !RTCRITSECT_STRICT */ 363 RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL) 364 { 365 return RTCritSectEnter(pCritSect); 366 } 367 RT_EXPORT_SYMBOL(RTCritSectEnterDebug); 368 #endif /* !RTCRITSECT_STRICT */ 369 370 365 371 RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect) 366 372 { … … 387 393 */ 388 394 #ifdef RTCRITSECT_STRICT 389 if (pCritSect->Strict.ThreadOwner != NIL_RTTHREAD) /* May happen for PDMCritSects when entering GC/R0. */ 390 RTThreadWriteLockDec(pCritSect->Strict.ThreadOwner); 391 ASMAtomicWriteHandle(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD); 395 RTLockValidatorUnsetOwner(pCritSect->pValidatorRec); 392 396 #endif 393 397 ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD); … … 403 407 404 408 405 /** 406 * Leave multiple critical sections. 407 * 408 * @returns VINF_SUCCESS. 409 * @param cCritSects Number of critical sections in the array. 410 * @param papCritSects Array of critical section pointers. 411 */ 412 RTDECL(int) RTCritSectLeaveMultiple(unsigned cCritSects, PRTCRITSECT *papCritSects) 409 RTDECL(int) RTCritSectLeaveMultiple(size_t cCritSects, PRTCRITSECT *papCritSects) 413 410 { 414 411 int rc = VINF_SUCCESS; 415 for ( unsignedi = 0; i < cCritSects; i++)412 for (size_t i = 0; i < cCritSects; i++) 416 413 { 417 414 int rc2 = RTCritSectLeave(papCritSects[i]); … … 424 421 425 422 426 #ifndef RTCRITSECT_STRICT427 RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)428 {429 return RTCritSectEnter(pCritSect);430 }431 432 RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)433 {434 return RTCritSectTryEnter(pCritSect);435 }436 437 RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId)438 {439 return RTCritSectEnterMultiple(cCritSects, papCritSects);440 }441 #endif /* RT_STRICT */442 RT_EXPORT_SYMBOL(RTCritSectEnterDebug);443 RT_EXPORT_SYMBOL(RTCritSectTryEnterDebug);444 RT_EXPORT_SYMBOL(RTCritSectEnterMultipleDebug);445 446 447 /**448 * Deletes a critical section.449 *450 * @returns VINF_SUCCESS.451 * @param pCritSect The critical section.452 */453 423 RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect) 454 424 { … … 472 442 RTSEMEVENT EventSem = pCritSect->EventSem; 473 443 pCritSect->EventSem = NIL_RTSEMEVENT; 444 474 445 while (pCritSect->cLockers-- >= 0) 475 446 RTSemEventSignal(EventSem); … … 478 449 AssertRC(rc); 479 450 451 RTLockValidatorDestroy(&pCritSect->pValidatorRec); 452 480 453 return rc; 481 454 } -
trunk/src/VBox/Runtime/include/internal/magics.h
r25059 r25368 67 67 /** The magic value for RTLOCALIPCSERVER::u32Magic. (Katsuhiro Otomo) */ 68 68 #define RTLOCALIPCSESSION_MAGIC 0x19530414 69 /** The magic value for RTLOCKVALIDATORREC::u32Magic. (Vladimir Vladimirovich Nabokov) */ 70 #define RTLOCKVALIDATORREC_MAGIC 0x18990422 71 /** The dead magic value for RTLOCKVALIDATORREC::u32Magic. */ 72 #define RTLOCKVALIDATORREC_MAGIC_DEAD 0x19770702 69 73 /** Magic number for RTMEMCACHEINT::u32Magic. (Joseph Weizenbaum) */ 70 74 #define RTMEMCACHE_MAGIC 0x19230108 -
trunk/src/VBox/Runtime/include/internal/thread.h
r23124 r25368 93 93 union RTTHREADINTBLOCKID 94 94 { 95 PRTLOCKVALIDATORREC pRec; 95 96 uint64_t u64; 96 PRTCRITSECT pCritSect;97 RTSEMEVENT Event;98 RTSEMEVENTMULTI EventMulti;99 RTSEMMUTEX Mutex;100 97 } Block; 101 98 /** Where we're blocking. */ 99 const char volatile *pszBlockFunction; 100 /** Where we're blocking. */ 102 101 const char volatile *pszBlockFile; 103 102 /** Where we're blocking. */ 104 u nsignedvolatile uBlockLine;105 /** Where we're blocking. */ 106 RT UINTPTR volatileuBlockId;103 uint32_t volatile uBlockLine; 104 /** Where we're blocking. */ 105 RTHCUINTPTR volatile uBlockId; 107 106 /** Number of registered write locks, mutexes and critsects that this thread owns. */ 108 107 int32_t volatile cWriteLocks; -
trunk/src/VBox/Runtime/testcase/tstDeadlock.cpp
r14831 r25368 33 33 * Header Files * 34 34 *******************************************************************************/ 35 #include <iprt/ thread.h>35 #include <iprt/asm.h> /* for return addresses */ 36 36 #include <iprt/critsect.h> 37 #include <iprt/stream.h> 37 38 38 #include <iprt/err.h> 39 39 #include <iprt/initterm.h> 40 #include <iprt/stream.h> 41 #include <iprt/thread.h> 40 42 41 43 … … 78 80 RTPrintf("thread3: taking 1\n"); 79 81 RTCritSectEnter(&g_CritSect1); 80 RTPrintf("thread 1: got 1!!!\n");82 RTPrintf("thread3: got 1!!!\n"); 81 83 return VERR_DEADLOCK; 82 84 } … … 100 102 } 101 103 RTCritSectEnter(&g_CritSect1); 102 if ( g_CritSect1.Strict.ThreadOwner== NIL_RTTHREAD)104 if (!g_CritSect1.pValidatorRec || g_CritSect1.pValidatorRec->hThread == NIL_RTTHREAD) 103 105 { 104 106 RTPrintf("tstDeadlock: deadlock detection is not enabled in this build\n"); -
trunk/src/VBox/VMM/PDMCritSect.cpp
r23350 r25368 126 126 if (RT_SUCCESS(rc)) 127 127 { 128 /* 129 * Initialize the structure (first bit is c&p from RTCritSectInitEx). 130 */ 131 pCritSect->Core.u32Magic = RTCRITSECT_MAGIC; 132 pCritSect->Core.fFlags = 0; 133 pCritSect->Core.cNestings = 0; 134 pCritSect->Core.cLockers = -1; 135 pCritSect->Core.NativeThreadOwner = NIL_RTNATIVETHREAD; 136 pCritSect->Core.Strict.ThreadOwner = NIL_RTTHREAD; 137 pCritSect->Core.Strict.pszEnterFile = NULL; 138 pCritSect->Core.Strict.u32EnterLine = 0; 139 pCritSect->Core.Strict.uEnterId = 0; 140 pCritSect->pVMR3 = pVM; 141 pCritSect->pVMR0 = pVM->pVMR0; 142 pCritSect->pVMRC = pVM->pVMRC; 143 pCritSect->pvKey = pvKey; 144 pCritSect->EventToSignal = NIL_RTSEMEVENT; 145 pCritSect->pNext = pVM->pdm.s.pCritSects; 146 pCritSect->pszName = RTStrDup(pszName); 147 pVM->pdm.s.pCritSects = pCritSect; 148 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLock", pszName); 149 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZUnlock,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZUnlock", pszName); 150 STAMR3RegisterF(pVM, &pCritSect->StatContentionR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionR3", pszName); 128 rc = RTLockValidatorCreate(&pCritSect->Core.pValidatorRec, NIL_RTLOCKVALIDATORCLASS, 0, pszName, pCritSect); 129 if (RT_SUCCESS(rc)) 130 { 131 /* 132 * Initialize the structure (first bit is c&p from RTCritSectInitEx). 133 */ 134 pCritSect->Core.u32Magic = RTCRITSECT_MAGIC; 135 pCritSect->Core.fFlags = 0; 136 pCritSect->Core.cNestings = 0; 137 pCritSect->Core.cLockers = -1; 138 pCritSect->Core.NativeThreadOwner = NIL_RTNATIVETHREAD; 139 pCritSect->pVMR3 = pVM; 140 pCritSect->pVMR0 = pVM->pVMR0; 141 pCritSect->pVMRC = pVM->pVMRC; 142 pCritSect->pvKey = pvKey; 143 pCritSect->EventToSignal = NIL_RTSEMEVENT; 144 pCritSect->pNext = pVM->pdm.s.pCritSects; 145 pCritSect->pszName = RTStrDup(pszName); 146 pVM->pdm.s.pCritSects = pCritSect; 147 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLock", pszName); 148 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZUnlock,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZUnlock", pszName); 149 STAMR3RegisterF(pVM, &pCritSect->StatContentionR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionR3", pszName); 151 150 #ifdef VBOX_WITH_STATISTICS 152 STAMR3RegisterF(pVM, &pCritSect->StatLocked, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/Locked", pszName);151 STAMR3RegisterF(pVM, &pCritSect->StatLocked, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/Locked", pszName); 153 152 #endif 153 return VINF_SUCCESS; 154 } 155 156 SUPSemEventClose(pVM->pSession, (SUPSEMEVENT)pCritSect->Core.EventSem); 154 157 } 155 158 return rc; … … 241 244 int rc = SUPSemEventClose(pVM->pSession, hEvent); 242 245 AssertRC(rc); 246 RTLockValidatorDestroy(&pCritSect->Core.pValidatorRec); 243 247 pCritSect->pNext = NULL; 244 248 pCritSect->pvKey = NULL; … … 390 394 return false; 391 395 396 #ifdef PDMCRITSECT_STRICT 397 const char * const pszFile = pCritSect->s.Core.pValidatorRec->pszFile; 398 const char * const pszFunction = pCritSect->s.Core.pValidatorRec->pszFunction; 399 uint32_t const iLine = pCritSect->s.Core.pValidatorRec->uLine; 400 RTHCUINTPTR const uId = pCritSect->s.Core.pValidatorRec->uId; 401 #endif 392 402 PDMCritSectLeave(pCritSect); 393 403 … … 411 421 } 412 422 423 #ifdef PDMCRITSECT_STRICT 424 int rc = PDMCritSectEnterDebug(pCritSect, VERR_INTERNAL_ERROR, uId, pszFile, iLine, pszFunction); 425 #else 413 426 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR); 427 #endif 414 428 AssertLogRelRC(rc); 415 429 return true; -
trunk/src/VBox/VMM/PDMInternal.h
r24744 r25368 49 49 #if defined(DOXYGEN_RUNNING) || 1 50 50 # define PDM_WITH_R3R0_CRIT_SECT 51 #endif 52 53 /** @def PDMCRITSECT_STRICT 54 * Enables/disables PDM critsect strictness like deadlock detection. */ 55 #if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING) 56 # define PDMCRITSECT_STRICT 51 57 #endif 52 58 -
trunk/src/VBox/VMM/REMInternal.h
r22707 r25368 217 217 STAMPROFILE StatsStateBack; 218 218 219 /** Padding the CPUX86State structure to 32byte. */220 uint32_t abPadding[HC_ARCH_BITS == 32 ? 2 : 6];219 /** Padding the CPUX86State structure to 64 byte. */ 220 uint32_t abPadding[HC_ARCH_BITS == 32 ? 4 : 4]; 221 221 222 222 # define REM_ENV_SIZE 0xff00 -
trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
r23350 r25368 51 51 #define PDMCRITSECT_SPIN_COUNT_RC 256 52 52 53 /** @def PDMCRITSECT_STRICT 54 * Enables/disables PDM critsect strictness like deadlock detection. */ 55 #if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING) 56 # define PDMCRITSECT_STRICT 57 #endif 53 #ifdef PDMCRITSECT_STRICT 54 # define PDMCRITSECT_STRICT_ARGS_DECL RTHCUINTPTR uId, RT_SRC_POS_DECL 55 # define PDMCRITSECT_STRICT_ARGS_PASS_ON uId, RT_SRC_POS_ARGS 56 #else 57 # define PDMCRITSECT_STRICT_ARGS_DECL int iDummy 58 # define PDMCRITSECT_STRICT_ARGS_PASS_ON 0 59 #endif 60 61 62 /* Undefine the automatic VBOX_STRICT API mappings. */ 63 #undef PDMCritSectEnter 64 #undef PDMCritSectTryEnter 58 65 59 66 … … 88 95 * @param hNativeSelf The native handle of this thread. 89 96 */ 90 DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf )97 DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PDMCRITSECT_STRICT_ARGS_DECL) 91 98 { 92 99 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner)); … … 97 104 98 105 # if defined(PDMCRITSECT_STRICT) && defined(IN_RING3) 99 pCritSect->s.Core.Strict.pszEnterFile = NULL; 100 pCritSect->s.Core.Strict.u32EnterLine = 0; 101 pCritSect->s.Core.Strict.uEnterId = 0; 102 RTTHREAD hSelf = RTThreadSelf(); 103 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, hSelf); 104 RTThreadWriteLockInc(hSelf); 106 RTLockValidatorSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, PDMCRITSECT_STRICT_ARGS_PASS_ON); 107 RTThreadWriteLockInc(pCritSect->s.Core.pValidatorRec->hThread); 105 108 # endif 106 109 … … 118 121 * @param hNativeSelf The native thread handle. 119 122 */ 120 static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf )123 static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PDMCRITSECT_STRICT_ARGS_DECL) 121 124 { 122 125 /* … … 124 127 */ 125 128 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0) 126 return pdmCritSectEnterFirst(pCritSect, hNativeSelf );129 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON); 127 130 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3); 128 131 … … 136 139 if (hSelf == NIL_RTTHREAD) 137 140 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf); 141 RTLockValidatorCheckOrder(pCritSect->s.Core.pValidatorRec, hSelf, 0, NULL, 0, NULL); 138 142 # endif 139 143 for (;;) 140 144 { 141 145 # ifdef PDMCRITSECT_STRICT 142 RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, NULL, 0, 0);146 RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, pCritSect->s.Core.pValidatorRec, 0, NULL, 0, NULL); 143 147 # endif 144 148 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT); … … 149 153 return VERR_SEM_DESTROYED; 150 154 if (rc == VINF_SUCCESS) 151 return pdmCritSectEnterFirst(pCritSect, hNativeSelf );155 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON); 152 156 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc)); 153 157 } … … 158 162 159 163 /** 160 * Enters a PDM critical section.164 * Common worker for the debug and normal APIs. 161 165 * 162 166 * @returns VINF_SUCCESS if entered successfully. … … 168 172 * and the section is busy. 169 173 */ 170 VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)174 DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PDMCRITSECT_STRICT_ARGS_DECL) 171 175 { 172 176 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */ … … 185 189 /* Not owned ... */ 186 190 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1)) 187 return pdmCritSectEnterFirst(pCritSect, hNativeSelf );191 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON); 188 192 189 193 /* ... or nested. */ … … 205 209 { 206 210 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1)) 207 return pdmCritSectEnterFirst(pCritSect, hNativeSelf );211 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON); 208 212 ASMNopPause(); 209 213 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a … … 218 222 * Take the slow path. 219 223 */ 220 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf );224 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON); 221 225 #else 222 226 /* … … 231 235 232 236 /** 233 * Try enter a critical section. 237 * Enters a PDM critical section. 238 * 239 * @returns VINF_SUCCESS if entered successfully. 240 * @returns rcBusy when encountering a busy critical section in GC/R0. 241 * @returns VERR_SEM_DESTROYED if the critical section is dead. 242 * 243 * @param pCritSect The PDM critical section to enter. 244 * @param rcBusy The status code to return when we're in GC or R0 245 * and the section is busy. 246 */ 247 VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy) 248 { 249 #ifndef PDMCRITSECT_STRICT 250 return pdmCritSectEnter(pCritSect, rcBusy, PDMCRITSECT_STRICT_ARGS_PASS_ON); 251 #else 252 /* No need for a second code instance. */ 253 return PDMCritSectEnterDebug(pCritSect, rcBusy, (uintptr_t)ASMReturnAddress(), RT_SRC_POS); 254 #endif 255 } 256 257 258 /** 259 * Enters a PDM critical section, with location information for debugging. 260 * 261 * @returns VINF_SUCCESS if entered successfully. 262 * @returns rcBusy when encountering a busy critical section in GC/R0. 263 * @returns VERR_SEM_DESTROYED if the critical section is dead. 264 * 265 * @param pCritSect The PDM critical section to enter. 266 * @param rcBusy The status code to return when we're in GC or R0 267 * and the section is busy. 268 * @param uId Some kind of locking location ID. Typically a 269 * return address up the stack. Optional (0). 270 * @param pszFile The file where the lock is being acquired from. 271 * Optional. 272 * @param iLine The line number in that file. Optional (0). 273 * @param pszFunction The functionn where the lock is being acquired 274 * from. Optional. 275 */ 276 VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL) 277 { 278 #ifdef PDMCRITSECT_STRICT 279 return pdmCritSectEnter(pCritSect, rcBusy, PDMCRITSECT_STRICT_ARGS_PASS_ON); 280 #else 281 /* No need for a second code instance. */ 282 return PDMCritSectEnter(pCritSect, rcBusy); 283 #endif 284 } 285 286 287 /** 288 * Common worker for the debug and normal APIs. 234 289 * 235 290 * @retval VINF_SUCCESS on success. … … 240 295 * @param pCritSect The critical section. 241 296 */ 242 VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)297 static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PDMCRITSECT_STRICT_ARGS_DECL) 243 298 { 244 299 /* … … 255 310 /* Not owned ... */ 256 311 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1)) 257 return pdmCritSectEnterFirst(pCritSect, hNativeSelf );312 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON); 258 313 259 314 /* ... or nested. */ … … 281 336 282 337 338 /** 339 * Try enter a critical section. 340 * 341 * @retval VINF_SUCCESS on success. 342 * @retval VERR_SEM_BUSY if the critsect was owned. 343 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 344 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 345 * 346 * @param pCritSect The critical section. 347 */ 348 VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect) 349 { 350 #ifndef PDMCRITSECT_STRICT 351 return pdmCritSectTryEnter(pCritSect, PDMCRITSECT_STRICT_ARGS_PASS_ON); 352 #else 353 /* No need for a second code instance. */ 354 return PDMCritSectTryEnterDebug(pCritSect, (uintptr_t)ASMReturnAddress(), RT_SRC_POS); 355 #endif 356 } 357 358 359 /** 360 * Try enter a critical section, with location information for debugging. 361 * 362 * @retval VINF_SUCCESS on success. 363 * @retval VERR_SEM_BUSY if the critsect was owned. 364 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.) 365 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting. 366 * 367 * @param pCritSect The critical section. 368 * @param uId Some kind of locking location ID. Typically a 369 * return address up the stack. Optional (0). 370 * @param pszFile The file where the lock is being acquired from. 371 * Optional. 372 * @param iLine The line number in that file. Optional (0). 373 * @param pszFunction The functionn where the lock is being acquired 374 * from. Optional. 375 */ 376 VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL) 377 { 378 #ifdef PDMCRITSECT_STRICT 379 return pdmCritSectTryEnter(pCritSect, PDMCRITSECT_STRICT_ARGS_PASS_ON); 380 #else 381 /* No need for a second code instance. */ 382 return PDMCritSectTryEnterDebug(pCritSect, (uintptr_t)ASMReturnAddress(), RT_SRC_POS); 383 #endif 384 } 385 386 283 387 #ifdef IN_RING3 284 388 /** … … 297 401 if ( rc == VINF_SUCCESS 298 402 && fCallRing3 299 && pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD) 300 { 301 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner); 302 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD); 403 && pCritSect->s.Core.pValidatorRec 404 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD) 405 { 406 RTThreadWriteLockDec(pCritSect->s.Core.pValidatorRec->hThread); 407 RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec); 303 408 } 304 409 return rc; … … 346 451 pCritSect->s.EventToSignal = NIL_RTSEMEVENT; 347 452 # if defined(PDMCRITSECT_STRICT) 348 if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD) 349 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner); 350 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD); 453 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD) 454 { 455 RTThreadWriteLockDec(pCritSect->s.Core.pValidatorRec->hThread); 456 RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec); 457 } 351 458 # endif 352 459 # endif 353 460 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK); 354 Assert( pCritSect->s.Core.Strict.ThreadOwner== NIL_RTTHREAD);461 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD); 355 462 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD); 356 463 ASMAtomicDecS32(&pCritSect->s.Core.cNestings); -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r22890 r25368 406 406 407 407 /* 408 * Don't make updates until l408 * Don't make updates until we've check the timer qeueue. 409 409 */ 410 410 bool fUpdatePrev = true; -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r24730 r25368 976 976 GEN_CHECK_OFF(RTCRITSECT, fFlags); 977 977 GEN_CHECK_OFF(RTCRITSECT, EventSem); 978 GEN_CHECK_OFF(RTCRITSECT, Strict.ThreadOwner); 979 GEN_CHECK_OFF(RTCRITSECT, Strict.pszEnterFile); 980 GEN_CHECK_OFF(RTCRITSECT, Strict.u32EnterLine); 981 GEN_CHECK_OFF(RTCRITSECT, Strict.uEnterId); 982 978 GEN_CHECK_OFF(RTCRITSECT, pValidatorRec); 983 979 984 980 GEN_CHECK_SIZE(CSAM); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r23366 r25368 249 249 CHECK_MEMBER_ALIGNMENT(VM, rem.s.uPendingExcptCR2, 8); 250 250 CHECK_MEMBER_ALIGNMENT(VM, rem.s.StatsInQEMU, 8); 251 CHECK_MEMBER_ALIGNMENT(VM, rem.s.Env, 32);251 CHECK_MEMBER_ALIGNMENT(VM, rem.s.Env, 64); 252 252 253 253 /* the VMCPUs are page aligned TLB hit reassons. */
Note:
See TracChangeset
for help on using the changeset viewer.