VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 28875

Last change on this file since 28875 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.2 KB
Line 
1/* $Id: PDMAllCritSect.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "../PDMInternal.h"
24#include <VBox/pdmcritsect.h>
25#include <VBox/mm.h>
26#include <VBox/vmm.h>
27#include <VBox/vm.h>
28#include <VBox/err.h>
29#include <VBox/hwaccm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/assert.h>
34#ifdef IN_RING3
35# include <iprt/lockvalidator.h>
36# include <iprt/semaphore.h>
37#endif
38
39
40/*******************************************************************************
41* Defined Constants And Macros *
42*******************************************************************************/
43/** The number loops to spin for in ring-3. */
44#define PDMCRITSECT_SPIN_COUNT_R3 20
45/** The number loops to spin for in ring-0. */
46#define PDMCRITSECT_SPIN_COUNT_R0 256
47/** The number loops to spin for in the raw-mode context. */
48#define PDMCRITSECT_SPIN_COUNT_RC 256
49
50
51/* Undefine the automatic VBOX_STRICT API mappings. */
52#undef PDMCritSectEnter
53#undef PDMCritSectTryEnter
54
55
56/**
57 * Gets the ring-3 native thread handle of the calling thread.
58 *
59 * @returns native thread handle (ring-3).
60 * @param pCritSect The critical section. This is used in R0 and RC.
61 */
62DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
63{
64#ifdef IN_RING3
65 NOREF(pCritSect);
66 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
67#else
68 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
69 NIL_RTNATIVETHREAD);
70 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
71 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
72 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
73#endif
74 return hNativeSelf;
75}
76
77
78/**
79 * Tail code called when we've wont the battle for the lock.
80 *
81 * @returns VINF_SUCCESS.
82 *
83 * @param pCritSect The critical section.
84 * @param hNativeSelf The native handle of this thread.
85 */
86DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
87{
88 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
89 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
90
91 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
92 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
93
94# ifdef PDMCRITSECT_STRICT
95 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
96# endif
97
98 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
99 return VINF_SUCCESS;
100}
101
102
103#ifdef IN_RING3
104/**
105 * Deals with the contended case in ring-3.
106 *
107 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
108 * @param pCritSect The critsect.
109 * @param hNativeSelf The native thread handle.
110 */
111static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
112{
113 /*
114 * Start waiting.
115 */
116 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
117 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
118 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
119
120 /*
121 * The wait loop.
122 */
123 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
124 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
125# ifdef PDMCRITSECT_STRICT
126 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
127 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
128 if (RT_FAILURE(rc2))
129 return rc2;
130# else
131 RTTHREAD hThreadSelf = RTThreadSelf();
132# endif
133 for (;;)
134 {
135# ifdef PDMCRITSECT_STRICT
136 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
137 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
138 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
139 if (RT_FAILURE(rc9))
140 return rc9;
141# else
142 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
143# endif
144 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
145 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
146
147 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
148 return VERR_SEM_DESTROYED;
149 if (rc == VINF_SUCCESS)
150 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
151 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
152 }
153 /* won't get here */
154}
155#endif /* IN_RING3 */
156
157
158/**
159 * Common worker for the debug and normal APIs.
160 *
161 * @returns VINF_SUCCESS if entered successfully.
162 * @returns rcBusy when encountering a busy critical section in GC/R0.
163 * @returns VERR_SEM_DESTROYED if the critical section is dead.
164 *
165 * @param pCritSect The PDM critical section to enter.
166 * @param rcBusy The status code to return when we're in GC or R0
167 * and the section is busy.
168 */
169DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
170{
171 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
172
173 /*
174 * If the critical section has already been destroyed, then inform the caller.
175 */
176 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
177 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
178 VERR_SEM_DESTROYED);
179
180 /*
181 * See if we're lucky.
182 */
183 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
184 /* Not owned ... */
185 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
186 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
187
188 /* ... or nested. */
189 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
190 {
191 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
192 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
193 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
194 return VINF_SUCCESS;
195 }
196
197 /*
198 * Spin for a bit without incrementing the counter.
199 */
200 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
201 * cpu systems. */
202 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
203 while (cSpinsLeft-- > 0)
204 {
205 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
206 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
207 ASMNopPause();
208 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
209 cli'ed pendingpreemption check up front using sti w/ instruction fusing
210 for avoiding races. Hmm ... This is assuming the other party is actually
211 executing code on another CPU ... which we could keep track of if we
212 wanted. */
213 }
214
215#ifdef IN_RING3
216 /*
217 * Take the slow path.
218 */
219 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
220#else
221 /*
222 * Return busy.
223 */
224 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
225 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
226 return rcBusy;
227#endif
228}
229
230
231/**
232 * Enters a PDM critical section.
233 *
234 * @returns VINF_SUCCESS if entered successfully.
235 * @returns rcBusy when encountering a busy critical section in GC/R0.
236 * @returns VERR_SEM_DESTROYED if the critical section is dead.
237 *
238 * @param pCritSect The PDM critical section to enter.
239 * @param rcBusy The status code to return when we're in GC or R0
240 * and the section is busy.
241 */
242VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
243{
244#ifndef PDMCRITSECT_STRICT
245 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
246#else
247 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
248 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
249#endif
250}
251
252
253/**
254 * Enters a PDM critical section, with location information for debugging.
255 *
256 * @returns VINF_SUCCESS if entered successfully.
257 * @returns rcBusy when encountering a busy critical section in GC/R0.
258 * @returns VERR_SEM_DESTROYED if the critical section is dead.
259 *
260 * @param pCritSect The PDM critical section to enter.
261 * @param rcBusy The status code to return when we're in GC or R0
262 * and the section is busy.
263 * @param uId Some kind of locking location ID. Typically a
264 * return address up the stack. Optional (0).
265 * @param pszFile The file where the lock is being acquired from.
266 * Optional.
267 * @param iLine The line number in that file. Optional (0).
268 * @param pszFunction The functionn where the lock is being acquired
269 * from. Optional.
270 */
271VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
272{
273#ifdef PDMCRITSECT_STRICT
274 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
275 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
276#else
277 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
278#endif
279}
280
281
282/**
283 * Common worker for the debug and normal APIs.
284 *
285 * @retval VINF_SUCCESS on success.
286 * @retval VERR_SEM_BUSY if the critsect was owned.
287 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
288 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
289 *
290 * @param pCritSect The critical section.
291 */
292static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
293{
294 /*
295 * If the critical section has already been destroyed, then inform the caller.
296 */
297 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
298 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
299 VERR_SEM_DESTROYED);
300
301 /*
302 * See if we're lucky.
303 */
304 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
305 /* Not owned ... */
306 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
307 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
308
309 /* ... or nested. */
310 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
311 {
312 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
313 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
314 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
315 return VINF_SUCCESS;
316 }
317
318 /* no spinning */
319
320 /*
321 * Return busy.
322 */
323#ifdef IN_RING3
324 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
325#else
326 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
327#endif
328 LogFlow(("PDMCritSectTryEnter: locked\n"));
329 return VERR_SEM_BUSY;
330}
331
332
333/**
334 * Try enter a critical section.
335 *
336 * @retval VINF_SUCCESS on success.
337 * @retval VERR_SEM_BUSY if the critsect was owned.
338 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
339 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
340 *
341 * @param pCritSect The critical section.
342 */
343VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
344{
345#ifndef PDMCRITSECT_STRICT
346 return pdmCritSectTryEnter(pCritSect, NULL);
347#else
348 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
349 return pdmCritSectTryEnter(pCritSect, &SrcPos);
350#endif
351}
352
353
354/**
355 * Try enter a critical section, with location information for debugging.
356 *
357 * @retval VINF_SUCCESS on success.
358 * @retval VERR_SEM_BUSY if the critsect was owned.
359 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
360 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
361 *
362 * @param pCritSect The critical section.
363 * @param uId Some kind of locking location ID. Typically a
364 * return address up the stack. Optional (0).
365 * @param pszFile The file where the lock is being acquired from.
366 * Optional.
367 * @param iLine The line number in that file. Optional (0).
368 * @param pszFunction The functionn where the lock is being acquired
369 * from. Optional.
370 */
371VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
372{
373#ifdef PDMCRITSECT_STRICT
374 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
375 return pdmCritSectTryEnter(pCritSect, &SrcPos);
376#else
377 return pdmCritSectTryEnter(pCritSect, NULL);
378#endif
379}
380
381
382#ifdef IN_RING3
383/**
384 * Enters a PDM critical section.
385 *
386 * @returns VINF_SUCCESS if entered successfully.
387 * @returns rcBusy when encountering a busy critical section in GC/R0.
388 * @returns VERR_SEM_DESTROYED if the critical section is dead.
389 *
390 * @param pCritSect The PDM critical section to enter.
391 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
392 */
393VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
394{
395 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
396 if ( rc == VINF_SUCCESS
397 && fCallRing3
398 && pCritSect->s.Core.pValidatorRec
399 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
400 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
401 return rc;
402}
403#endif /* IN_RING3 */
404
405
406/**
407 * Leaves a critical section entered with PDMCritSectEnter().
408 *
409 * @param pCritSect The PDM critical section to leave.
410 */
411VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
412{
413 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
414 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
415 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
416 Assert(pCritSect->s.Core.cNestings >= 1);
417
418 /*
419 * Nested leave.
420 */
421 if (pCritSect->s.Core.cNestings > 1)
422 {
423 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
424 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
425 return;
426 }
427
428#ifdef IN_RING0
429# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
430 if (1) /* SUPSemEventSignal is safe */
431# else
432 if (ASMIntAreEnabled())
433# endif
434#endif
435#if defined(IN_RING3) || defined(IN_RING0)
436 {
437 /*
438 * Leave for real.
439 */
440 /* update members. */
441# ifdef IN_RING3
442 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
443 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
444# if defined(PDMCRITSECT_STRICT)
445 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
446 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
447# endif
448 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
449# endif
450 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
451 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
452 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
453
454 /* stop and decrement lockers. */
455 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
456 ASMCompilerBarrier();
457 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
458 {
459 /* Someone is waiting, wake up one of them. */
460 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
461 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
462 int rc = SUPSemEventSignal(pSession, hEvent);
463 AssertRC(rc);
464 }
465
466# ifdef IN_RING3
467 /* Signal exit event. */
468 if (hEventToSignal != NIL_RTSEMEVENT)
469 {
470 LogBird(("Signalling %#x\n", hEventToSignal));
471 int rc = RTSemEventSignal(hEventToSignal);
472 AssertRC(rc);
473 }
474# endif
475
476# if defined(DEBUG_bird) && defined(IN_RING0)
477 VMMTrashVolatileXMMRegs();
478# endif
479 }
480#endif /* IN_RING3 || IN_RING0 */
481#ifdef IN_RING0
482 else
483#endif
484#if defined(IN_RING0) || defined(IN_RC)
485 {
486 /*
487 * Try leave it.
488 */
489 if (pCritSect->s.Core.cLockers == 0)
490 {
491 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
492 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
493 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
494 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
495
496 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
497 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
498 return;
499
500 /* darn, someone raced in on us. */
501 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
502 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
503 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
504 }
505 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
506
507 /*
508 * Queue the request.
509 */
510 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
511 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
512 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
513 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
514 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
515 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
516 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
517 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
518 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
519 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
520 }
521#endif /* IN_RING0 || IN_RC */
522}
523
524
525#if defined(IN_RING3) || defined(IN_RING0)
526/**
527 * Process the critical sections queued for ring-3 'leave'.
528 *
529 * @param pVCpu The VMCPU handle.
530 */
531VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
532{
533 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
534
535 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
536 for (RTUINT i = 0; i < c; i++)
537 {
538# ifdef IN_RING3
539 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
540# else
541 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
542# endif
543
544 PDMCritSectLeave(pCritSect);
545 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
546 }
547
548 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
549 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
550}
551#endif /* IN_RING3 || IN_RING0 */
552
553
554/**
555 * Checks the caller is the owner of the critical section.
556 *
557 * @returns true if owner.
558 * @returns false if not owner.
559 * @param pCritSect The critical section.
560 */
561VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
562{
563#ifdef IN_RING3
564 return RTCritSectIsOwner(&pCritSect->s.Core);
565#else
566 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
567 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
568 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
569 return false;
570 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
571#endif
572}
573
574
575/**
576 * Checks the specified VCPU is the owner of the critical section.
577 *
578 * @returns true if owner.
579 * @returns false if not owner.
580 * @param pCritSect The critical section.
581 * @param idCpu VCPU id
582 */
583VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
584{
585#ifdef IN_RING3
586 NOREF(idCpu);
587 return RTCritSectIsOwner(&pCritSect->s.Core);
588#else
589 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
590 AssertPtr(pVM);
591 Assert(idCpu < pVM->cCpus);
592 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
593 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
594#endif
595}
596
597
598/**
599 * Checks if somebody currently owns the critical section.
600 *
601 * @returns true if locked.
602 * @returns false if not locked.
603 *
604 * @param pCritSect The critical section.
605 *
606 * @remarks This doesn't prove that no deadlocks will occur later on; it's
607 * just a debugging tool
608 */
609VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
610{
611 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
612 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
613}
614
615
616/**
617 * Checks if anyone is waiting on the critical section we own.
618 *
619 * @returns true if someone is waitings.
620 * @returns false if no one is waiting.
621 * @param pCritSect The critical section.
622 */
623VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
624{
625 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
626 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
627 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
628}
629
630
631/**
632 * Checks if a critical section is initialized or not.
633 *
634 * @returns true if initialized.
635 * @returns false if not initialized.
636 * @param pCritSect The critical section.
637 */
638VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
639{
640 return RTCritSectIsInitialized(&pCritSect->s.Core);
641}
642
643
644/**
645 * Gets the recursion depth.
646 *
647 * @returns The recursion depth.
648 * @param pCritSect The critical section.
649 */
650VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
651{
652 return RTCritSectGetRecursion(&pCritSect->s.Core);
653}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette