VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 21587

Last change on this file since 21587 was 21264, checked in by vboxsync, 16 years ago

PDMAllCritSect.cpp: Extended some assertions for #4088.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.2 KB
Line 
1/* $Id: PDMAllCritSect.cpp 21264 2009-07-06 17:20:57Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdmcritsect.h>
29#include <VBox/mm.h>
30#include <VBox/vmm.h>
31#include <VBox/vm.h>
32#include <VBox/err.h>
33#include <VBox/hwaccm.h>
34
35#include <VBox/log.h>
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#ifdef IN_RING3
39# include <iprt/semaphore.h>
40#endif
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/** The number loops to spin for in ring-3. */
47#define PDMCRITSECT_SPIN_COUNT_R3 20
48/** The number loops to spin for in ring-0. */
49#define PDMCRITSECT_SPIN_COUNT_R0 256
50/** The number loops to spin for in the raw-mode context. */
51#define PDMCRITSECT_SPIN_COUNT_RC 256
52
53/** @def PDMCRITSECT_STRICT
54 * Enables/disables PDM critsect strictness like deadlock detection. */
55#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
56# define PDMCRITSECT_STRICT
57#endif
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 VERR_SEM_DESTROYED);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've wont the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
97
98# if defined(PDMCRITSECT_STRICT) && defined(IN_RING3)
99 pCritSect->s.Core.Strict.pszEnterFile = NULL;
100 pCritSect->s.Core.Strict.u32EnterLine = 0;
101 pCritSect->s.Core.Strict.uEnterId = 0;
102 RTTHREAD hSelf = RTThreadSelf();
103 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, hSelf);
104 RTThreadWriteLockInc(hSelf);
105# endif
106
107 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
108 return VINF_SUCCESS;
109}
110
111
112#ifdef IN_RING3
113/**
114 * Deals with the contended case in ring-3.
115 *
116 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
117 * @param pCritSect The critsect.
118 * @param hNativeSelf The native thread handle.
119 */
120static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
121{
122 /*
123 * Start waiting.
124 */
125 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
126 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
127 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
128
129 /*
130 * The wait loop.
131 */
132 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
133 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
134# ifdef PDMCRITSECT_STRICT
135 RTTHREAD hSelf = RTThreadSelf();
136 if (hSelf == NIL_RTTHREAD)
137 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf);
138# endif
139 for (;;)
140 {
141# ifdef PDMCRITSECT_STRICT
142 RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, NULL, 0, 0);
143# endif
144 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
145# ifdef PDMCRITSECT_STRICT
146 RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT);
147# endif
148 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
149 return VERR_SEM_DESTROYED;
150 if (rc == VINF_SUCCESS)
151 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
152 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
153 }
154 /* won't get here */
155}
156#endif /* IN_RING3 */
157
158
159/**
160 * Enters a PDM critical section.
161 *
162 * @returns VINF_SUCCESS if entered successfully.
163 * @returns rcBusy when encountering a busy critical section in GC/R0.
164 * @returns VERR_SEM_DESTROYED if the critical section is dead.
165 *
166 * @param pCritSect The PDM critical section to enter.
167 * @param rcBusy The status code to return when we're in GC or R0
168 * and the section is busy.
169 */
170VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
171{
172 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
173
174 /*
175 * If the critical section has already been destroyed, then inform the caller.
176 */
177 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
178 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
179 VERR_SEM_DESTROYED);
180
181 /*
182 * See if we're lucky.
183 */
184 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
185 /* Not owned ... */
186 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
187 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
188
189 /* ... or nested. */
190 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
191 {
192 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
193 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
194 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
195 return VINF_SUCCESS;
196 }
197
198 /*
199 * Spin for a bit without incrementing the counter.
200 */
201 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
202 * cpu systems. */
203 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
204 while (cSpinsLeft-- > 0)
205 {
206 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
207 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
208 /** @todo need pause/nop instruction here! */
209 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
210 cli'ed pendingpreemption check up front using sti w/ instruction fusing
211 for avoiding races. Hmm ... This is assuming the other party is actually
212 executing code on another CPU... */
213 }
214
215#ifdef IN_RING3
216 /*
217 * Take the slow path.
218 */
219 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf);
220#else
221 /*
222 * Return busy.
223 */
224 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
225 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
226 return rcBusy;
227#endif
228}
229
230
231/**
232 * Try enter a critical section.
233 *
234 * @retval VINF_SUCCESS on success.
235 * @retval VERR_SEM_BUSY if the critsect was owned.
236 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
237 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
238 *
239 * @param pCritSect The critical section.
240 */
241VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
242{
243 /*
244 * If the critical section has already been destroyed, then inform the caller.
245 */
246 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
247 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
248 VERR_SEM_DESTROYED);
249
250 /*
251 * See if we're lucky.
252 */
253 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
254 /* Not owned ... */
255 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
256 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
257
258 /* ... or nested. */
259 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
260 {
261 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
262 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
263 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
264 return VINF_SUCCESS;
265 }
266
267 /* no spinning */
268
269 /*
270 * Return busy.
271 */
272#ifdef IN_RING3
273 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
274#else
275 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
276#endif
277 LogFlow(("PDMCritSectTryEnter: locked\n"));
278 return VERR_SEM_BUSY;
279}
280
281
282#ifdef IN_RING3
283/**
284 * Enters a PDM critical section.
285 *
286 * @returns VINF_SUCCESS if entered successfully.
287 * @returns rcBusy when encountering a busy critical section in GC/R0.
288 * @returns VERR_SEM_DESTROYED if the critical section is dead.
289 *
290 * @param pCritSect The PDM critical section to enter.
291 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
292 */
293VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
294{
295 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
296 if ( rc == VINF_SUCCESS
297 && fCallRing3
298 && pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
299 {
300 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
301 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
302 }
303 return rc;
304}
305#endif /* IN_RING3 */
306
307
308/**
309 * Leaves a critical section entered with PDMCritSectEnter().
310 *
311 * @param pCritSect The PDM critical section to leave.
312 */
313VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
314{
315 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
316 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
317 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
318 Assert(pCritSect->s.Core.cNestings >= 1);
319
320 /*
321 * Nested leave.
322 */
323 if (pCritSect->s.Core.cNestings > 1)
324 {
325 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
326 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
327 return;
328 }
329
330#if defined(IN_RING3) || defined(IN_RING0)
331 /*
332 * Leave for real.
333 */
334 /* update members. */
335# ifdef IN_RING3
336 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
337 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
338# if defined(PDMCRITSECT_STRICT)
339 if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
340 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
341 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
342# endif
343# endif
344 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
345 Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD);
346 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
347 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
348
349 /* stop and decrement lockers. */
350 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
351 ASMCompilerBarrier();
352 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
353 {
354 /* Someone is waiting, wake up one of them. */
355 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
356 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
357 int rc = SUPSemEventSignal(pSession, hEvent);
358 AssertRC(rc);
359 }
360
361# ifdef IN_RING3
362 /* Signal exit event. */
363 if (hEventToSignal != NIL_RTSEMEVENT)
364 {
365 LogBird(("Signalling %#x\n", hEventToSignal));
366 int rc = RTSemEventSignal(hEventToSignal);
367 AssertRC(rc);
368 }
369# endif
370
371# if defined(DEBUG_bird) && defined(IN_RING0)
372 VMMTrashVolatileXMMRegs();
373# endif
374
375#else /* IN_RC */
376 /*
377 * Try leave it.
378 */
379 if (pCritSect->s.Core.cLockers == 0)
380 {
381 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
382 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
383 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
384 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
385
386 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
387 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
388 return;
389
390 /* darn, someone raced in on us. */
391 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
392 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
393 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
394 }
395 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
396
397 /*
398 * Queue the request.
399 */
400 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
401 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
402 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
403 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
404 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
405 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
406 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
407 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
408 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
409 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
410#endif /* IN_RC */
411}
412
413
414#if defined(IN_RING3) || defined(IN_RING0)
415/**
416 * Process the critical sections queued for ring-3 'leave'.
417 *
418 * @param pVCpu The VMCPU handle.
419 */
420VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
421{
422 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
423
424 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
425 for (RTUINT i = 0; i < c; i++)
426 {
427# ifdef IN_RING3
428 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
429# else
430 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
431# endif
432
433 PDMCritSectLeave(pCritSect);
434 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
435 }
436
437 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
438 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
439}
440#endif /* IN_RING3 || IN_RING0 */
441
442
443/**
444 * Checks the caller is the owner of the critical section.
445 *
446 * @returns true if owner.
447 * @returns false if not owner.
448 * @param pCritSect The critical section.
449 */
450VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
451{
452#ifdef IN_RING3
453 return RTCritSectIsOwner(&pCritSect->s.Core);
454#else
455 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
456 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
457 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
458 return false;
459 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
460#endif
461}
462
463
464/**
465 * Checks the specified VCPU is the owner of the critical section.
466 *
467 * @returns true if owner.
468 * @returns false if not owner.
469 * @param pCritSect The critical section.
470 * @param idCpu VCPU id
471 */
472VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
473{
474#ifdef IN_RING3
475 NOREF(idCpu);
476 return RTCritSectIsOwner(&pCritSect->s.Core);
477#else
478 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
479 AssertPtr(pVM);
480 Assert(idCpu < pVM->cCPUs);
481 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
482 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
483#endif
484}
485
486
487/**
488 * Checks if somebody currently owns the critical section.
489 *
490 * @returns true if locked.
491 * @returns false if not locked.
492 *
493 * @param pCritSect The critical section.
494 *
495 * @remarks This doesn't prove that no deadlocks will occur later on; it's
496 * just a debugging tool
497 */
498VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
499{
500 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
501 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
502}
503
504
505/**
506 * Checks if a critical section is initialized or not.
507 *
508 * @returns true if initialized.
509 * @returns false if not initialized.
510 * @param pCritSect The critical section.
511 */
512VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
513{
514 return RTCritSectIsInitialized(&pCritSect->s.Core);
515}
516
517
518/**
519 * Gets the recursion depth.
520 *
521 * @returns The recursion depth.
522 * @param pCritSect The critical section.
523 */
524VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
525{
526 return RTCritSectGetRecursion(&pCritSect->s.Core);
527}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette