VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/critsect-generic.cpp@ 2615

Last change on this file since 2615 was 751, checked in by vboxsync, 18 years ago

Stricter check in RTCritSectEnter for destroyed critical sections.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 14.0 KB
Line 
1/* $Id: critsect-generic.cpp 751 2007-02-07 15:34:35Z vboxsync $ */
2/** @file
3 * InnoTek Portable Runtime - Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#include <iprt/critsect.h>
26#include <iprt/semaphore.h>
27#include <iprt/thread.h>
28#include <iprt/assert.h>
29#include <iprt/asm.h>
30#include <iprt/err.h>
31#include "internal/thread.h"
32
33/** @def RTCRITSECT_STRICT
34 * Define this to enable deadlock detection.
35 *
36 * @remark This won't work safely on L4 since we have to traverse the AVL tree
37 * in order to get the RT thread structure there and this tree is
38 * protected by a critsect atm.
39 */
40#if !defined(RTCRITSECT_STRICT) && defined(RT_STRICT) && !defined(__L4ENV__)
41# define RTCRITSECT_STRICT
42#endif
43
44/* in strict mode we're redefining this, so undefine it now for the implementation. */
45#undef RTCritSectEnter
46#undef RTCritSectTryEnter
47#undef RTCritSectEnterMultiple
48
49
50/**
51 * Initialize a critical section.
52 */
53RTDECL(int) RTCritSectInit(PRTCRITSECT pCritSect)
54{
55 return RTCritSectInitEx(pCritSect, 0);
56}
57
58
59/**
60 * Initialize a critical section.
61 *
62 * @returns iprt status code.
63 * @param pCritSect Pointer to the critical section structure.
64 * @param fFlags Flags, any combination of the RTCRITSECT_FLAGS \#defines.
65 */
66RTDECL(int) RTCritSectInitEx(PRTCRITSECT pCritSect, uint32_t fFlags)
67{
68 /*
69 * Initialize the structure and
70 */
71 pCritSect->u32Magic = RTCRITSECT_MAGIC;
72 pCritSect->fFlags = fFlags;
73 pCritSect->cNestings = 0;
74 pCritSect->cLockers = -1;
75 pCritSect->NativeThreadOwner = NIL_RTNATIVETHREAD;
76 pCritSect->Strict.ThreadOwner = NIL_RTTHREAD;
77 pCritSect->Strict.pszEnterFile = NULL;
78 pCritSect->Strict.u32EnterLine = 0;
79 pCritSect->Strict.uEnterId = 0;
80 int rc = RTSemEventCreate(&pCritSect->EventSem);
81 if (RT_SUCCESS(rc))
82 return VINF_SUCCESS;
83
84 AssertRC(rc);
85 pCritSect->EventSem = NULL;
86 pCritSect->u32Magic = (uint32_t)rc;
87 return rc;
88}
89
90
91/**
92 * Enter multiple critical sections.
93 *
94 * This function will enter ALL the specified critical sections before returning.
95 *
96 * @returns VINF_SUCCESS on success.
97 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
98 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
99 * @param cCritSects Number of critical sections in the array.
100 * @param papCritSects Array of critical section pointers.
101 *
102 * @remark Please note that this function will not necessarily come out favourable in a
103 * fight with other threads which are using the normal RTCritSectEnter() function.
104 * Therefore, avoid having to enter multiple critical sections!
105 */
106RTDECL(int) RTCritSectEnterMultiple(unsigned cCritSects, PRTCRITSECT *papCritSects)
107#ifdef RTCRITSECT_STRICT
108{
109 return RTCritSectEnterMultipleDebug(cCritSects, papCritSects, __FILE__, __LINE__, 0);
110}
111RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId)
112#endif /* RTCRITSECT_STRICT */
113{
114 Assert(cCritSects > 0);
115 Assert(VALID_PTR(papCritSects));
116
117 /*
118 * Try get them all.
119 */
120 int rc = VERR_INVALID_PARAMETER;
121 unsigned i;
122 for (i = 0; i < cCritSects; i++)
123 {
124#ifdef RTCRITSECT_STRICT
125 rc = RTCritSectTryEnterDebug(papCritSects[i], pszFile, uLine, uId);
126#else
127 rc = RTCritSectTryEnter(papCritSects[i]);
128#endif
129 if (RT_FAILURE(rc))
130 break;
131 }
132 if (RT_SUCCESS(rc))
133 return rc;
134
135 /*
136 * The retry loop.
137 */
138 for (unsigned cTries = 0; ; cTries++)
139 {
140 /*
141 * We've failed, release any locks we might have gotten. ('i' is the lock that failed btw.)
142 */
143 unsigned j = i;
144 while (j-- > 0)
145 {
146 int rc2 = RTCritSectLeave(papCritSects[j]);
147 AssertRC(rc2);
148 }
149 if (rc != VERR_SEM_BUSY)
150 return rc;
151
152 /*
153 * Try prevent any theoretical synchronous races with other threads.
154 */
155 Assert(cTries < 1000000);
156 if (cTries > 10000)
157 RTThreadSleep(cTries % 3);
158
159 /*
160 * Wait on the one we failed to get.
161 */
162#ifdef RTCRITSECT_STRICT
163 rc = RTCritSectEnterDebug(papCritSects[i], pszFile, uLine, uId);
164#else
165 rc = RTCritSectEnter(papCritSects[i]);
166#endif
167 if (RT_FAILURE(rc))
168 return rc;
169
170 /*
171 * Try take the others.
172 */
173 for (j = 0; j < cCritSects; j++)
174 {
175 if (j != i)
176 {
177#ifdef RTCRITSECT_STRICT
178 rc = RTCritSectTryEnterDebug(papCritSects[j], pszFile, uLine, uId);
179#else
180 rc = RTCritSectTryEnter(papCritSects[j]);
181#endif
182 if (RT_FAILURE(rc))
183 break;
184 }
185 }
186 if (RT_SUCCESS(rc))
187 return rc;
188
189 /*
190 * We failed.
191 */
192 if (i > j)
193 {
194 int rc2 = RTCritSectLeave(papCritSects[i]);
195 AssertRC(rc2);
196 }
197 i = j;
198 }
199}
200
201
202/**
203 * Try enter a critical section.
204 *
205 * @returns VINF_SUCCESS on success.
206 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
207 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
208 * @param pCritSect The critical section.
209 */
210RTDECL(int) RTCritSectTryEnter(PRTCRITSECT pCritSect)
211#ifdef RTCRITSECT_STRICT
212{
213 return RTCritSectTryEnterDebug(pCritSect, __FILE__, __LINE__, 0);
214}
215RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
216#endif /* RTCRITSECT_STRICT */
217{
218 Assert(pCritSect);
219 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
220 RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
221#ifdef RTCRITSECT_STRICT
222 RTTHREAD ThreadSelf = RTThreadSelf();
223 if (ThreadSelf == NIL_RTTHREAD)
224 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &ThreadSelf);
225#endif
226
227 /*
228 * Try take the lock. (cLockers is -1 if it's free)
229 */
230 if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1))
231 {
232 /*
233 * Somebody is owning it (or will be soon). Perhaps it's us?
234 */
235 if (pCritSect->NativeThreadOwner == NativeThreadSelf)
236 {
237 if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
238 {
239 ASMAtomicIncS32(&pCritSect->cLockers);
240 pCritSect->cNestings++;
241 return VINF_SUCCESS;
242 }
243 AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
244 return VERR_SEM_NESTED;
245 }
246 return VERR_SEM_BUSY;
247 }
248
249 /*
250 * First time
251 */
252 pCritSect->cNestings = 1;
253 ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NativeThreadSelf);
254#ifdef RTCRITSECT_STRICT
255 pCritSect->Strict.pszEnterFile = pszFile;
256 pCritSect->Strict.u32EnterLine = uLine;
257 pCritSect->Strict.uEnterId = uId;
258 ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, (RTUINTPTR)ThreadSelf); /* screw gcc and its pedantic warnings. */
259#endif
260
261 return VINF_SUCCESS;
262}
263
264
265/**
266 * Enter a critical section.
267 *
268 * @returns VINF_SUCCESS on success.
269 * @returns VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
270 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
271 * @param pCritSect The critical section.
272 */
273RTDECL(int) RTCritSectEnter(PRTCRITSECT pCritSect)
274#ifdef RTCRITSECT_STRICT
275{
276 return RTCritSectEnterDebug(pCritSect, __FILE__, __LINE__, 0);
277}
278RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
279#endif /* RTCRITSECT_STRICT */
280{
281 Assert(pCritSect);
282 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
283 RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
284#ifdef RTCRITSECT_STRICT
285 RTTHREAD ThreadSelf = RTThreadSelf();
286 if (ThreadSelf == NIL_RTTHREAD)
287 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &ThreadSelf);
288#endif
289
290 /** If the critical section has already been destroyed, then inform the caller. */
291 if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
292 return VERR_SEM_DESTROYED;
293
294 /*
295 * Increment the waiter counter.
296 * This becomes 0 when the section is free.
297 */
298 if (ASMAtomicIncS32(&pCritSect->cLockers) > 0)
299 {
300 /*
301 * Nested?
302 */
303 if (pCritSect->NativeThreadOwner == NativeThreadSelf)
304 {
305 if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
306 {
307 pCritSect->cNestings++;
308 return VINF_SUCCESS;
309 }
310 else
311 {
312 AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
313 ASMAtomicDecS32(&pCritSect->cLockers);
314 return VERR_SEM_NESTED;
315 }
316 }
317
318 for (;;)
319 {
320#ifdef RTCRITSECT_STRICT
321 rtThreadBlocking(ThreadSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, pszFile, uLine, uId);
322#endif
323 int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
324#ifdef RTCRITSECT_STRICT
325 rtThreadUnblocked(ThreadSelf, RTTHREADSTATE_CRITSECT);
326#endif
327 if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
328 return VERR_SEM_DESTROYED;
329 if (rc == VINF_SUCCESS)
330 break;
331 AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Vrc\n", rc));
332 }
333 AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
334 }
335
336 /*
337 * First time
338 */
339 pCritSect->cNestings = 1;
340 ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NativeThreadSelf);
341#ifdef RTCRITSECT_STRICT
342 pCritSect->Strict.pszEnterFile = pszFile;
343 pCritSect->Strict.u32EnterLine = uLine;
344 pCritSect->Strict.uEnterId = uId;
345 ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, (RTUINTPTR)ThreadSelf); /* screw gcc and its pedantic warnings. */
346#endif
347
348 return VINF_SUCCESS;
349}
350
351
352/**
353 * Leave a critical section.
354 *
355 * @returns VINF_SUCCESS.
356 * @param pCritSect The critical section.
357 */
358RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect)
359{
360 /*
361 * Assert ownership and so on.
362 */
363 Assert(pCritSect);
364 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
365 Assert(pCritSect->cNestings > 0);
366 Assert(pCritSect->cLockers >= 0);
367 Assert(pCritSect->NativeThreadOwner == RTThreadNativeSelf());
368
369 /*
370 * Decrement nestings, if <= 0 when we'll release the critsec.
371 */
372 pCritSect->cNestings--;
373 if (pCritSect->cNestings > 0)
374 ASMAtomicDecS32(&pCritSect->cLockers);
375 else
376 {
377 /*
378 * Set owner to zero.
379 * Decrement waiters, if >= 0 then we have to wake one of them up.
380 */
381#ifdef RTCRITSECT_STRICT
382 ASMAtomicXchgSize(&pCritSect->Strict.ThreadOwner, NIL_RTTHREAD);
383#endif
384 ASMAtomicXchgSize(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
385 if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
386 {
387 int rc = RTSemEventSignal(pCritSect->EventSem);
388 AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Vrc\n", rc));
389 }
390 }
391 return VINF_SUCCESS;
392}
393
394
395/**
396 * Leave multiple critical sections.
397 *
398 * @returns VINF_SUCCESS.
399 * @param cCritSects Number of critical sections in the array.
400 * @param papCritSects Array of critical section pointers.
401 */
402RTDECL(int) RTCritSectLeaveMultiple(unsigned cCritSects, PRTCRITSECT *papCritSects)
403{
404 int rc = VINF_SUCCESS;
405 for (unsigned i = 0; i < cCritSects; i++)
406 {
407 int rc2 = RTCritSectLeave(papCritSects[i]);
408 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
409 rc = rc2;
410 }
411 return rc;
412}
413
414
415#ifndef RTCRITSECT_STRICT
416RTDECL(int) RTCritSectEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
417{
418 return RTCritSectEnter(pCritSect);
419}
420
421RTDECL(int) RTCritSectTryEnterDebug(PRTCRITSECT pCritSect, const char *pszFile, unsigned uLine, RTUINTPTR uId)
422{
423 return RTCritSectTryEnter(pCritSect);
424}
425
426RTDECL(int) RTCritSectEnterMultipleDebug(unsigned cCritSects, PRTCRITSECT *papCritSects, const char *pszFile, unsigned uLine, RTUINTPTR uId)
427{
428 return RTCritSectEnterMultiple(cCritSects, papCritSects);
429}
430#endif /* RT_STRICT */
431
432
433/**
434 * Deletes a critical section.
435 *
436 * @returns VINF_SUCCESS.
437 * @param pCritSect The critical section.
438 */
439RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect)
440{
441 /*
442 * Assert free waiters and so on.
443 */
444 Assert(pCritSect);
445 Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
446 Assert(pCritSect->cNestings == 0);
447 Assert(pCritSect->cLockers == -1);
448 Assert(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD);
449
450 /*
451 * Invalidate the structure and free the mutex.
452 * In case someone is waiting we'll signal the semaphore cLockers + 1 times.
453 */
454 ASMAtomicXchgU32(&pCritSect->u32Magic, 0);
455 pCritSect->fFlags = 0;
456 pCritSect->cNestings = 0;
457 pCritSect->NativeThreadOwner= NIL_RTNATIVETHREAD;
458 RTSEMEVENT EventSem = pCritSect->EventSem;
459 pCritSect->EventSem = NULL;
460 while (pCritSect->cLockers-- >= 0)
461 RTSemEventSignal(EventSem);
462 ASMAtomicXchgS32(&pCritSect->cLockers, -1);
463 int rc = RTSemEventDestroy(EventSem);
464 AssertRC(rc);
465
466 return rc;
467}
468
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette