VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 20088

Last change on this file since 20088 was 20010, checked in by vboxsync, 16 years ago

PDMCritSect: Enabled ring-0 exit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 16.2 KB
Line 
1/* $Id: PDMAllCritSect.cpp 20010 2009-05-25 18:41:29Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdm.h>
29#include <VBox/mm.h>
30#include <VBox/vm.h>
31#include <VBox/err.h>
32#include <VBox/hwaccm.h>
33
34#include <VBox/log.h>
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#ifdef IN_RING3
38# include <iprt/semaphore.h>
39#endif
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/** The number loops to spin for in ring-3. */
46#define PDMCRITSECT_SPIN_COUNT_R3 20
47/** The number loops to spin for in ring-0. */
48#define PDMCRITSECT_SPIN_COUNT_R0 256
49/** The number loops to spin for in the raw-mode context. */
50#define PDMCRITSECT_SPIN_COUNT_RC 256
51
52/** @def PDMCRITSECT_STRICT
53 * Enables/disables PDM critsect strictness like deadlock detection. */
54#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
55# define PDMCRITSECT_STRICT
56#endif
57
58
59/**
60 * Gets the ring-3 native thread handle of the calling thread.
61 *
62 * @returns native thread handle (ring-3).
63 * @param pCritSect The critical section. This is used in R0 and RC.
64 */
65DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PPDMCRITSECT pCritSect)
66{
67#ifdef IN_RING3
68 NOREF(pCritSect);
69 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
70#else
71 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
72 VERR_SEM_DESTROYED);
73 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
74 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
75 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
76#endif
77 return hNativeSelf;
78}
79
80
81/**
82 * Tail code called when we've wont the battle for the lock.
83 *
84 * @returns VINF_SUCCESS.
85 *
86 * @param pCritSect The critical section.
87 * @param hNativeSelf The native handle of this thread.
88 */
89DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
90{
91 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
92 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
93
94 pCritSect->s.Core.cNestings = 1;
95 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
96
97# if defined(PDMCRITSECT_STRICT) && defined(IN_RING3)
98 pCritSect->s.Core.Strict.pszEnterFile = NULL;
99 pCritSect->s.Core.Strict.u32EnterLine = 0;
100 pCritSect->s.Core.Strict.uEnterId = 0;
101 RTTHREAD hSelf = RTThreadSelf();
102 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, hSelf);
103 RTThreadWriteLockInc(hSelf);
104# endif
105
106 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
107 return VINF_SUCCESS;
108}
109
110
111#ifdef IN_RING3
112/**
113 * Deals with the contended case in ring-3.
114 *
115 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
116 * @param pCritSect The critsect.
117 * @param hNativeSelf The native thread handle.
118 */
119static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
120{
121 /*
122 * Start waiting.
123 */
124 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
125 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
126 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
127
128 /*
129 * The wait loop.
130 */
131 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
132 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
133# ifdef PDMCRITSECT_STRICT
134 RTTHREAD hSelf = RTThreadSelf();
135 if (hSelf == NIL_RTTHREAD)
136 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf);
137# endif
138 for (;;)
139 {
140# ifdef PDMCRITSECT_STRICT
141 RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, NULL, 0, 0);
142# endif
143 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
144# ifdef PDMCRITSECT_STRICT
145 RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT);
146# endif
147 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
148 return VERR_SEM_DESTROYED;
149 if (rc == VINF_SUCCESS)
150 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
151 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
152 }
153 /* won't get here */
154}
155#endif /* IN_RING3 */
156
157
158/**
159 * Enters a PDM critical section.
160 *
161 * @returns VINF_SUCCESS if entered successfully.
162 * @returns rcBusy when encountering a busy critical section in GC/R0.
163 * @returns VERR_SEM_DESTROYED if the critical section is dead.
164 *
165 * @param pCritSect The PDM critical section to enter.
166 * @param rcBusy The status code to return when we're in GC or R0
167 * and the section is busy.
168 */
169VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
170{
171 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
172
173 /*
174 * If the critical section has already been destroyed, then inform the caller.
175 */
176 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
177
178 /*
179 * See if we're lucky.
180 */
181 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
182 /* Not owned ... */
183 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
184 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
185
186 /* ... or nested. */
187 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
188 {
189 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
190 pCritSect->s.Core.cNestings++;
191 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
192 return VINF_SUCCESS;
193 }
194
195 /*
196 * Spin for a bit without incrementing the counter.
197 */
198 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
199 * cpu systems. */
200 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
201 while (cSpinsLeft-- > 0)
202 {
203 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
204 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
205 /** @todo need pause/nop instruction here! */
206 }
207
208#ifdef IN_RING3
209 /*
210 * Take the slow path.
211 */
212 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf);
213#else
214 /*
215 * Return busy.
216 */
217 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
218 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
219 return rcBusy;
220#endif
221}
222
223
224/**
225 * Try enter a critical section.
226 *
227 * @retval VINF_SUCCESS on success.
228 * @retval VERR_SEM_BUSY if the critsect was owned.
229 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
230 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
231 *
232 * @param pCritSect The critical section.
233 */
234VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
235{
236 /*
237 * If the critical section has already been destroyed, then inform the caller.
238 */
239 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
240
241 /*
242 * See if we're lucky.
243 */
244 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
245 /* Not owned ... */
246 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
247 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
248
249 /* ... or nested. */
250 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
251 {
252 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
253 pCritSect->s.Core.cNestings++;
254 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
255 return VINF_SUCCESS;
256 }
257
258 /* no spinning */
259
260 /*
261 * Return busy.
262 */
263#ifdef IN_RING3
264 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
265#else
266 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
267#endif
268 LogFlow(("PDMCritSectTryEnter: locked\n"));
269 return VERR_SEM_BUSY;
270}
271
272
273#ifdef IN_RING3
274/**
275 * Enters a PDM critical section.
276 *
277 * @returns VINF_SUCCESS if entered successfully.
278 * @returns rcBusy when encountering a busy critical section in GC/R0.
279 * @returns VERR_SEM_DESTROYED if the critical section is dead.
280 *
281 * @param pCritSect The PDM critical section to enter.
282 * @param fCallHost Whether this is a VMMGCCallHost() or VMMR0CallHost() request.
283 */
284VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallHost)
285{
286 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
287 if ( rc == VINF_SUCCESS
288 && fCallHost
289 && pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
290 {
291 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
292 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
293 }
294 return rc;
295}
296#endif /* IN_RING3 */
297
298
299/**
300 * Leaves a critical section entered with PDMCritSectEnter().
301 *
302 * @param pCritSect The PDM critical section to leave.
303 */
304VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
305{
306 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
307 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
308 Assert(pCritSect->s.Core.cNestings >= 1);
309
310 /*
311 * Nested leave.
312 */
313 if (pCritSect->s.Core.cNestings > 1)
314 {
315 pCritSect->s.Core.cNestings--;
316 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
317 return;
318 }
319
320#if defined(IN_RING3) || defined(IN_RING0)
321 /*
322 * Leave for real.
323 */
324 /* update members. */
325# ifdef IN_RING3
326 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
327 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
328# if defined(PDMCRITSECT_STRICT)
329 if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
330 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
331 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
332# endif
333# endif
334 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
335 Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD);
336 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
337 pCritSect->s.Core.cNestings--;
338
339 /* stop and decrement lockers. */
340 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
341 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
342 {
343 /* Someone is waiting, wake up one of them. */
344 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
345 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
346 int rc = SUPSemEventSignal(pSession, hEvent);
347 AssertRC(rc);
348 }
349
350# ifdef IN_RING3
351 /* Signal exit event. */
352 if (hEventToSignal != NIL_RTSEMEVENT)
353 {
354 LogBird(("Signalling %#x\n", hEventToSignal));
355 int rc = RTSemEventSignal(hEventToSignal);
356 AssertRC(rc);
357 }
358# endif
359
360#else /* IN_RC */
361 /*
362 * Try leave it.
363 */
364 if (pCritSect->s.Core.cLockers == 0)
365 {
366 pCritSect->s.Core.cNestings = 0;
367 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
368 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
369 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
370
371 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
372 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
373 return;
374
375 /* darn, someone raced in on us. */
376 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
377 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
378 pCritSect->s.Core.cNestings = 1;
379 }
380 pCritSect->s.Core.fFlags |= PDMCRITSECT_FLAGS_PENDING_UNLOCK;
381
382 /*
383 * Queue the request.
384 */
385 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
386 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
387 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
388 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
389 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
390 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
391 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
392 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
393 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
394 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
395#endif /* IN_RC */
396}
397
398
399#if defined(IN_RING3) || defined(IN_RING0)
400/**
401 * Process the critical sections queued for ring-3 'leave'.
402 *
403 * @param pVCpu The VMCPU handle.
404 */
405VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
406{
407 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
408
409 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
410 for (RTUINT i = 0; i < c; i++)
411 {
412# ifdef IN_RING3
413 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
414# else
415 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
416# endif
417
418 PDMCritSectLeave(pCritSect);
419 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
420 }
421
422 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
423 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
424}
425#endif /* IN_RING3 || IN_RING0 */
426
427
428/**
429 * Checks the caller is the owner of the critical section.
430 *
431 * @returns true if owner.
432 * @returns false if not owner.
433 * @param pCritSect The critical section.
434 */
435VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
436{
437#ifdef IN_RING3
438 return RTCritSectIsOwner(&pCritSect->s.Core);
439#else
440 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
441 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
442 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
443 return false;
444 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
445#endif
446}
447
448
449/**
450 * Checks the specified VCPU is the owner of the critical section.
451 *
452 * @returns true if owner.
453 * @returns false if not owner.
454 * @param pCritSect The critical section.
455 * @param idCpu VCPU id
456 */
457VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
458{
459#ifdef IN_RING3
460 NOREF(idCpu);
461 return RTCritSectIsOwner(&pCritSect->s.Core);
462#else
463 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
464 AssertPtr(pVM);
465 Assert(idCpu < pVM->cCPUs);
466 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
467 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
468#endif
469}
470
471
472/**
473 * Checks if somebody currently owns the critical section.
474 *
475 * @returns true if locked.
476 * @returns false if not locked.
477 *
478 * @param pCritSect The critical section.
479 *
480 * @remarks This doesn't prove that no deadlocks will occur later on; it's
481 * just a debugging tool
482 */
483VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
484{
485 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
486 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
487}
488
489
490/**
491 * Checks if a critical section is initialized or not.
492 *
493 * @returns true if initialized.
494 * @returns false if not initialized.
495 * @param pCritSect The critical section.
496 */
497VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
498{
499 return RTCritSectIsInitialized(&pCritSect->s.Core);
500}
501
502
503/**
504 * Gets the recursion depth.
505 *
506 * @returns The recursion depth.
507 * @param pCritSect The critical section.
508 */
509VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
510{
511 return RTCritSectGetRecursion(&pCritSect->s.Core);
512}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette