VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 21892

Last change on this file since 21892 was 21591, checked in by vboxsync, 15 years ago

PDMCritSectEnter: PAUSE when spinning.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.2 KB
Line 
1/* $Id: PDMAllCritSect.cpp 21591 2009-07-14 22:08:57Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdmcritsect.h>
29#include <VBox/mm.h>
30#include <VBox/vmm.h>
31#include <VBox/vm.h>
32#include <VBox/err.h>
33#include <VBox/hwaccm.h>
34
35#include <VBox/log.h>
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#ifdef IN_RING3
39# include <iprt/semaphore.h>
40#endif
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/** The number loops to spin for in ring-3. */
47#define PDMCRITSECT_SPIN_COUNT_R3 20
48/** The number loops to spin for in ring-0. */
49#define PDMCRITSECT_SPIN_COUNT_R0 256
50/** The number loops to spin for in the raw-mode context. */
51#define PDMCRITSECT_SPIN_COUNT_RC 256
52
53/** @def PDMCRITSECT_STRICT
54 * Enables/disables PDM critsect strictness like deadlock detection. */
55#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
56# define PDMCRITSECT_STRICT
57#endif
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 VERR_SEM_DESTROYED);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've wont the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
97
98# if defined(PDMCRITSECT_STRICT) && defined(IN_RING3)
99 pCritSect->s.Core.Strict.pszEnterFile = NULL;
100 pCritSect->s.Core.Strict.u32EnterLine = 0;
101 pCritSect->s.Core.Strict.uEnterId = 0;
102 RTTHREAD hSelf = RTThreadSelf();
103 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, hSelf);
104 RTThreadWriteLockInc(hSelf);
105# endif
106
107 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
108 return VINF_SUCCESS;
109}
110
111
112#ifdef IN_RING3
113/**
114 * Deals with the contended case in ring-3.
115 *
116 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
117 * @param pCritSect The critsect.
118 * @param hNativeSelf The native thread handle.
119 */
120static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
121{
122 /*
123 * Start waiting.
124 */
125 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
126 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
127 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
128
129 /*
130 * The wait loop.
131 */
132 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
133 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
134# ifdef PDMCRITSECT_STRICT
135 RTTHREAD hSelf = RTThreadSelf();
136 if (hSelf == NIL_RTTHREAD)
137 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf);
138# endif
139 for (;;)
140 {
141# ifdef PDMCRITSECT_STRICT
142 RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, NULL, 0, 0);
143# endif
144 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
145# ifdef PDMCRITSECT_STRICT
146 RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT);
147# endif
148 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
149 return VERR_SEM_DESTROYED;
150 if (rc == VINF_SUCCESS)
151 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
152 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
153 }
154 /* won't get here */
155}
156#endif /* IN_RING3 */
157
158
159/**
160 * Enters a PDM critical section.
161 *
162 * @returns VINF_SUCCESS if entered successfully.
163 * @returns rcBusy when encountering a busy critical section in GC/R0.
164 * @returns VERR_SEM_DESTROYED if the critical section is dead.
165 *
166 * @param pCritSect The PDM critical section to enter.
167 * @param rcBusy The status code to return when we're in GC or R0
168 * and the section is busy.
169 */
170VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
171{
172 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
173
174 /*
175 * If the critical section has already been destroyed, then inform the caller.
176 */
177 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
178 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
179 VERR_SEM_DESTROYED);
180
181 /*
182 * See if we're lucky.
183 */
184 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
185 /* Not owned ... */
186 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
187 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
188
189 /* ... or nested. */
190 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
191 {
192 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
193 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
194 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
195 return VINF_SUCCESS;
196 }
197
198 /*
199 * Spin for a bit without incrementing the counter.
200 */
201 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
202 * cpu systems. */
203 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
204 while (cSpinsLeft-- > 0)
205 {
206 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
207 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
208 ASMNopPause();
209 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
210 cli'ed pendingpreemption check up front using sti w/ instruction fusing
211 for avoiding races. Hmm ... This is assuming the other party is actually
212 executing code on another CPU ... which we could keep track of if we
213 wanted. */
214 }
215
216#ifdef IN_RING3
217 /*
218 * Take the slow path.
219 */
220 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf);
221#else
222 /*
223 * Return busy.
224 */
225 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
226 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
227 return rcBusy;
228#endif
229}
230
231
232/**
233 * Try enter a critical section.
234 *
235 * @retval VINF_SUCCESS on success.
236 * @retval VERR_SEM_BUSY if the critsect was owned.
237 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
238 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
239 *
240 * @param pCritSect The critical section.
241 */
242VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
243{
244 /*
245 * If the critical section has already been destroyed, then inform the caller.
246 */
247 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
248 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
249 VERR_SEM_DESTROYED);
250
251 /*
252 * See if we're lucky.
253 */
254 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
255 /* Not owned ... */
256 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
257 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
258
259 /* ... or nested. */
260 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
261 {
262 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
263 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
264 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
265 return VINF_SUCCESS;
266 }
267
268 /* no spinning */
269
270 /*
271 * Return busy.
272 */
273#ifdef IN_RING3
274 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
275#else
276 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
277#endif
278 LogFlow(("PDMCritSectTryEnter: locked\n"));
279 return VERR_SEM_BUSY;
280}
281
282
283#ifdef IN_RING3
284/**
285 * Enters a PDM critical section.
286 *
287 * @returns VINF_SUCCESS if entered successfully.
288 * @returns rcBusy when encountering a busy critical section in GC/R0.
289 * @returns VERR_SEM_DESTROYED if the critical section is dead.
290 *
291 * @param pCritSect The PDM critical section to enter.
292 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
293 */
294VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
295{
296 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
297 if ( rc == VINF_SUCCESS
298 && fCallRing3
299 && pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
300 {
301 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
302 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
303 }
304 return rc;
305}
306#endif /* IN_RING3 */
307
308
309/**
310 * Leaves a critical section entered with PDMCritSectEnter().
311 *
312 * @param pCritSect The PDM critical section to leave.
313 */
314VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
315{
316 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
317 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
318 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
319 Assert(pCritSect->s.Core.cNestings >= 1);
320
321 /*
322 * Nested leave.
323 */
324 if (pCritSect->s.Core.cNestings > 1)
325 {
326 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
327 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
328 return;
329 }
330
331#if defined(IN_RING3) || defined(IN_RING0)
332 /*
333 * Leave for real.
334 */
335 /* update members. */
336# ifdef IN_RING3
337 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
338 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
339# if defined(PDMCRITSECT_STRICT)
340 if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
341 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
342 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
343# endif
344# endif
345 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
346 Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD);
347 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
348 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
349
350 /* stop and decrement lockers. */
351 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
352 ASMCompilerBarrier();
353 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
354 {
355 /* Someone is waiting, wake up one of them. */
356 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
357 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
358 int rc = SUPSemEventSignal(pSession, hEvent);
359 AssertRC(rc);
360 }
361
362# ifdef IN_RING3
363 /* Signal exit event. */
364 if (hEventToSignal != NIL_RTSEMEVENT)
365 {
366 LogBird(("Signalling %#x\n", hEventToSignal));
367 int rc = RTSemEventSignal(hEventToSignal);
368 AssertRC(rc);
369 }
370# endif
371
372# if defined(DEBUG_bird) && defined(IN_RING0)
373 VMMTrashVolatileXMMRegs();
374# endif
375
376#else /* IN_RC */
377 /*
378 * Try leave it.
379 */
380 if (pCritSect->s.Core.cLockers == 0)
381 {
382 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
383 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
384 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
385 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
386
387 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
388 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
389 return;
390
391 /* darn, someone raced in on us. */
392 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
393 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
394 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
395 }
396 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
397
398 /*
399 * Queue the request.
400 */
401 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
402 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
403 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
404 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
405 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
406 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
407 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
408 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
409 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
410 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
411#endif /* IN_RC */
412}
413
414
415#if defined(IN_RING3) || defined(IN_RING0)
416/**
417 * Process the critical sections queued for ring-3 'leave'.
418 *
419 * @param pVCpu The VMCPU handle.
420 */
421VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
422{
423 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
424
425 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
426 for (RTUINT i = 0; i < c; i++)
427 {
428# ifdef IN_RING3
429 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
430# else
431 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
432# endif
433
434 PDMCritSectLeave(pCritSect);
435 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
436 }
437
438 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
439 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
440}
441#endif /* IN_RING3 || IN_RING0 */
442
443
444/**
445 * Checks the caller is the owner of the critical section.
446 *
447 * @returns true if owner.
448 * @returns false if not owner.
449 * @param pCritSect The critical section.
450 */
451VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
452{
453#ifdef IN_RING3
454 return RTCritSectIsOwner(&pCritSect->s.Core);
455#else
456 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
457 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
458 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
459 return false;
460 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
461#endif
462}
463
464
465/**
466 * Checks the specified VCPU is the owner of the critical section.
467 *
468 * @returns true if owner.
469 * @returns false if not owner.
470 * @param pCritSect The critical section.
471 * @param idCpu VCPU id
472 */
473VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
474{
475#ifdef IN_RING3
476 NOREF(idCpu);
477 return RTCritSectIsOwner(&pCritSect->s.Core);
478#else
479 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
480 AssertPtr(pVM);
481 Assert(idCpu < pVM->cCPUs);
482 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
483 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
484#endif
485}
486
487
488/**
489 * Checks if somebody currently owns the critical section.
490 *
491 * @returns true if locked.
492 * @returns false if not locked.
493 *
494 * @param pCritSect The critical section.
495 *
496 * @remarks This doesn't prove that no deadlocks will occur later on; it's
497 * just a debugging tool
498 */
499VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
500{
501 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
502 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
503}
504
505
506/**
507 * Checks if a critical section is initialized or not.
508 *
509 * @returns true if initialized.
510 * @returns false if not initialized.
511 * @param pCritSect The critical section.
512 */
513VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
514{
515 return RTCritSectIsInitialized(&pCritSect->s.Core);
516}
517
518
519/**
520 * Gets the recursion depth.
521 *
522 * @returns The recursion depth.
523 * @param pCritSect The critical section.
524 */
525VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
526{
527 return RTCritSectGetRecursion(&pCritSect->s.Core);
528}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette