VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 25375

Last change on this file since 25375 was 25373, checked in by vboxsync, 15 years ago

IPRT,PDMCritSect: More validation changes. Validate posix and linux mutexes. Always update the thread state with critsects.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.5 KB
Line 
1/* $Id: PDMAllCritSect.cpp 25373 2009-12-14 19:20:27Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdmcritsect.h>
29#include <VBox/mm.h>
30#include <VBox/vmm.h>
31#include <VBox/vm.h>
32#include <VBox/err.h>
33#include <VBox/hwaccm.h>
34
35#include <VBox/log.h>
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#ifdef IN_RING3
39# include <iprt/semaphore.h>
40#endif
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/** The number loops to spin for in ring-3. */
47#define PDMCRITSECT_SPIN_COUNT_R3 20
48/** The number loops to spin for in ring-0. */
49#define PDMCRITSECT_SPIN_COUNT_R0 256
50/** The number loops to spin for in the raw-mode context. */
51#define PDMCRITSECT_SPIN_COUNT_RC 256
52
53#ifdef PDMCRITSECT_STRICT
54# define PDMCRITSECT_STRICT_ARGS_DECL RTHCUINTPTR uId, RT_SRC_POS_DECL
55# define PDMCRITSECT_STRICT_ARGS_PASS_ON uId, RT_SRC_POS_ARGS
56#else
57# define PDMCRITSECT_STRICT_ARGS_DECL int iDummy
58# define PDMCRITSECT_STRICT_ARGS_PASS_ON 0
59#endif
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectEnter
64#undef PDMCritSectTryEnter
65
66
67/**
68 * Gets the ring-3 native thread handle of the calling thread.
69 *
70 * @returns native thread handle (ring-3).
71 * @param pCritSect The critical section. This is used in R0 and RC.
72 */
73DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
74{
75#ifdef IN_RING3
76 NOREF(pCritSect);
77 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
78#else
79 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
80 VERR_SEM_DESTROYED);
81 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
82 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
83 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
84#endif
85 return hNativeSelf;
86}
87
88
89/**
90 * Tail code called when we've wont the battle for the lock.
91 *
92 * @returns VINF_SUCCESS.
93 *
94 * @param pCritSect The critical section.
95 * @param hNativeSelf The native handle of this thread.
96 */
97DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PDMCRITSECT_STRICT_ARGS_DECL)
98{
99 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
100 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
101
102 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
103 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
104
105# if defined(PDMCRITSECT_STRICT) && defined(IN_RING3)
106 RTThreadWriteLockInc(RTLockValidatorSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, PDMCRITSECT_STRICT_ARGS_PASS_ON));
107# endif
108
109 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
110 return VINF_SUCCESS;
111}
112
113
114#ifdef IN_RING3
115/**
116 * Deals with the contended case in ring-3.
117 *
118 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
119 * @param pCritSect The critsect.
120 * @param hNativeSelf The native thread handle.
121 */
122static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PDMCRITSECT_STRICT_ARGS_DECL)
123{
124 /*
125 * Start waiting.
126 */
127 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
128 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
129 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
130
131 /*
132 * The wait loop.
133 */
134 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
135 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
136# ifdef PDMCRITSECT_STRICT
137 RTTHREAD hSelf = RTThreadSelfAutoAdopt();
138 RTLockValidatorCheckOrder(pCritSect->s.Core.pValidatorRec, hSelf, 0, NULL, 0, NULL);
139# endif
140 for (;;)
141 {
142# ifdef PDMCRITSECT_STRICT
143 RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, pCritSect->s.Core.pValidatorRec, 0, NULL, 0, NULL);
144# endif
145 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
146# ifdef PDMCRITSECT_STRICT
147 RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT);
148# endif
149 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
150 return VERR_SEM_DESTROYED;
151 if (rc == VINF_SUCCESS)
152 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
153 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
154 }
155 /* won't get here */
156}
157#endif /* IN_RING3 */
158
159
160/**
161 * Common worker for the debug and normal APIs.
162 *
163 * @returns VINF_SUCCESS if entered successfully.
164 * @returns rcBusy when encountering a busy critical section in GC/R0.
165 * @returns VERR_SEM_DESTROYED if the critical section is dead.
166 *
167 * @param pCritSect The PDM critical section to enter.
168 * @param rcBusy The status code to return when we're in GC or R0
169 * and the section is busy.
170 */
171DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PDMCRITSECT_STRICT_ARGS_DECL)
172{
173 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
174
175 /*
176 * If the critical section has already been destroyed, then inform the caller.
177 */
178 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
179 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
180 VERR_SEM_DESTROYED);
181
182 /*
183 * See if we're lucky.
184 */
185 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
186 /* Not owned ... */
187 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
188 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
189
190 /* ... or nested. */
191 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
192 {
193 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
194 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
195 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
196 return VINF_SUCCESS;
197 }
198
199 /*
200 * Spin for a bit without incrementing the counter.
201 */
202 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
203 * cpu systems. */
204 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
205 while (cSpinsLeft-- > 0)
206 {
207 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
208 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
209 ASMNopPause();
210 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
211 cli'ed pendingpreemption check up front using sti w/ instruction fusing
212 for avoiding races. Hmm ... This is assuming the other party is actually
213 executing code on another CPU ... which we could keep track of if we
214 wanted. */
215 }
216
217#ifdef IN_RING3
218 /*
219 * Take the slow path.
220 */
221 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
222#else
223 /*
224 * Return busy.
225 */
226 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
227 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
228 return rcBusy;
229#endif
230}
231
232
233/**
234 * Enters a PDM critical section.
235 *
236 * @returns VINF_SUCCESS if entered successfully.
237 * @returns rcBusy when encountering a busy critical section in GC/R0.
238 * @returns VERR_SEM_DESTROYED if the critical section is dead.
239 *
240 * @param pCritSect The PDM critical section to enter.
241 * @param rcBusy The status code to return when we're in GC or R0
242 * and the section is busy.
243 */
244VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
245{
246#ifndef PDMCRITSECT_STRICT
247 return pdmCritSectEnter(pCritSect, rcBusy, PDMCRITSECT_STRICT_ARGS_PASS_ON);
248#else
249 /* No need for a second code instance. */
250 return PDMCritSectEnterDebug(pCritSect, rcBusy, (uintptr_t)ASMReturnAddress(), RT_SRC_POS);
251#endif
252}
253
254
255/**
256 * Enters a PDM critical section, with location information for debugging.
257 *
258 * @returns VINF_SUCCESS if entered successfully.
259 * @returns rcBusy when encountering a busy critical section in GC/R0.
260 * @returns VERR_SEM_DESTROYED if the critical section is dead.
261 *
262 * @param pCritSect The PDM critical section to enter.
263 * @param rcBusy The status code to return when we're in GC or R0
264 * and the section is busy.
265 * @param uId Some kind of locking location ID. Typically a
266 * return address up the stack. Optional (0).
267 * @param pszFile The file where the lock is being acquired from.
268 * Optional.
269 * @param iLine The line number in that file. Optional (0).
270 * @param pszFunction The functionn where the lock is being acquired
271 * from. Optional.
272 */
273VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
274{
275#ifdef PDMCRITSECT_STRICT
276 return pdmCritSectEnter(pCritSect, rcBusy, PDMCRITSECT_STRICT_ARGS_PASS_ON);
277#else
278 /* No need for a second code instance. */
279 return PDMCritSectEnter(pCritSect, rcBusy);
280#endif
281}
282
283
284/**
285 * Common worker for the debug and normal APIs.
286 *
287 * @retval VINF_SUCCESS on success.
288 * @retval VERR_SEM_BUSY if the critsect was owned.
289 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
290 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
291 *
292 * @param pCritSect The critical section.
293 */
294static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PDMCRITSECT_STRICT_ARGS_DECL)
295{
296 /*
297 * If the critical section has already been destroyed, then inform the caller.
298 */
299 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
300 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
301 VERR_SEM_DESTROYED);
302
303 /*
304 * See if we're lucky.
305 */
306 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
307 /* Not owned ... */
308 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
309 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
310
311 /* ... or nested. */
312 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
313 {
314 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
315 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
316 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
317 return VINF_SUCCESS;
318 }
319
320 /* no spinning */
321
322 /*
323 * Return busy.
324 */
325#ifdef IN_RING3
326 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
327#else
328 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
329#endif
330 LogFlow(("PDMCritSectTryEnter: locked\n"));
331 return VERR_SEM_BUSY;
332}
333
334
335/**
336 * Try enter a critical section.
337 *
338 * @retval VINF_SUCCESS on success.
339 * @retval VERR_SEM_BUSY if the critsect was owned.
340 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
341 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
342 *
343 * @param pCritSect The critical section.
344 */
345VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
346{
347#ifndef PDMCRITSECT_STRICT
348 return pdmCritSectTryEnter(pCritSect, PDMCRITSECT_STRICT_ARGS_PASS_ON);
349#else
350 /* No need for a second code instance. */
351 return PDMCritSectTryEnterDebug(pCritSect, (uintptr_t)ASMReturnAddress(), RT_SRC_POS);
352#endif
353}
354
355
356/**
357 * Try enter a critical section, with location information for debugging.
358 *
359 * @retval VINF_SUCCESS on success.
360 * @retval VERR_SEM_BUSY if the critsect was owned.
361 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
362 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
363 *
364 * @param pCritSect The critical section.
365 * @param uId Some kind of locking location ID. Typically a
366 * return address up the stack. Optional (0).
367 * @param pszFile The file where the lock is being acquired from.
368 * Optional.
369 * @param iLine The line number in that file. Optional (0).
370 * @param pszFunction The functionn where the lock is being acquired
371 * from. Optional.
372 */
373VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
374{
375#ifdef PDMCRITSECT_STRICT
376 return pdmCritSectTryEnter(pCritSect, PDMCRITSECT_STRICT_ARGS_PASS_ON);
377#else
378 /* No need for a second code instance. */
379 return PDMCritSectTryEnter(pCritSect);
380#endif
381}
382
383
384#ifdef IN_RING3
385/**
386 * Enters a PDM critical section.
387 *
388 * @returns VINF_SUCCESS if entered successfully.
389 * @returns rcBusy when encountering a busy critical section in GC/R0.
390 * @returns VERR_SEM_DESTROYED if the critical section is dead.
391 *
392 * @param pCritSect The PDM critical section to enter.
393 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
394 */
395VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
396{
397 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
398 if ( rc == VINF_SUCCESS
399 && fCallRing3
400 && pCritSect->s.Core.pValidatorRec
401 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
402 RTThreadWriteLockDec(RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec));
403 return rc;
404}
405#endif /* IN_RING3 */
406
407
408/**
409 * Leaves a critical section entered with PDMCritSectEnter().
410 *
411 * @param pCritSect The PDM critical section to leave.
412 */
413VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
414{
415 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
416 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
417 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
418 Assert(pCritSect->s.Core.cNestings >= 1);
419
420 /*
421 * Nested leave.
422 */
423 if (pCritSect->s.Core.cNestings > 1)
424 {
425 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
426 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
427 return;
428 }
429
430#ifdef IN_RING0
431# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
432 if (1) /* SUPSemEventSignal is safe */
433# else
434 if (ASMIntAreEnabled())
435# endif
436#endif
437#if defined(IN_RING3) || defined(IN_RING0)
438 {
439 /*
440 * Leave for real.
441 */
442 /* update members. */
443# ifdef IN_RING3
444 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
445 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
446# if defined(PDMCRITSECT_STRICT)
447 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
448 RTThreadWriteLockDec(RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec));
449# endif
450# endif
451 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
452 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
453 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
454 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
455
456 /* stop and decrement lockers. */
457 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
458 ASMCompilerBarrier();
459 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
460 {
461 /* Someone is waiting, wake up one of them. */
462 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
463 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
464 int rc = SUPSemEventSignal(pSession, hEvent);
465 AssertRC(rc);
466 }
467
468# ifdef IN_RING3
469 /* Signal exit event. */
470 if (hEventToSignal != NIL_RTSEMEVENT)
471 {
472 LogBird(("Signalling %#x\n", hEventToSignal));
473 int rc = RTSemEventSignal(hEventToSignal);
474 AssertRC(rc);
475 }
476# endif
477
478# if defined(DEBUG_bird) && defined(IN_RING0)
479 VMMTrashVolatileXMMRegs();
480# endif
481 }
482#endif /* IN_RING3 || IN_RING0 */
483#ifdef IN_RING0
484 else
485#endif
486#if defined(IN_RING0) || defined(IN_RC)
487 {
488 /*
489 * Try leave it.
490 */
491 if (pCritSect->s.Core.cLockers == 0)
492 {
493 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
494 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
495 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
496 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
497
498 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
499 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
500 return;
501
502 /* darn, someone raced in on us. */
503 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
504 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
505 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
506 }
507 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
508
509 /*
510 * Queue the request.
511 */
512 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
513 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
514 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
515 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
516 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
517 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
518 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
519 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
520 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
521 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
522 }
523#endif /* IN_RING0 || IN_RC */
524}
525
526
527#if defined(IN_RING3) || defined(IN_RING0)
528/**
529 * Process the critical sections queued for ring-3 'leave'.
530 *
531 * @param pVCpu The VMCPU handle.
532 */
533VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
534{
535 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
536
537 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
538 for (RTUINT i = 0; i < c; i++)
539 {
540# ifdef IN_RING3
541 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
542# else
543 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
544# endif
545
546 PDMCritSectLeave(pCritSect);
547 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
548 }
549
550 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
551 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
552}
553#endif /* IN_RING3 || IN_RING0 */
554
555
556/**
557 * Checks the caller is the owner of the critical section.
558 *
559 * @returns true if owner.
560 * @returns false if not owner.
561 * @param pCritSect The critical section.
562 */
563VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
564{
565#ifdef IN_RING3
566 return RTCritSectIsOwner(&pCritSect->s.Core);
567#else
568 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
569 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
570 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
571 return false;
572 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
573#endif
574}
575
576
577/**
578 * Checks the specified VCPU is the owner of the critical section.
579 *
580 * @returns true if owner.
581 * @returns false if not owner.
582 * @param pCritSect The critical section.
583 * @param idCpu VCPU id
584 */
585VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
586{
587#ifdef IN_RING3
588 NOREF(idCpu);
589 return RTCritSectIsOwner(&pCritSect->s.Core);
590#else
591 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
592 AssertPtr(pVM);
593 Assert(idCpu < pVM->cCpus);
594 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
595 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
596#endif
597}
598
599
600/**
601 * Checks if somebody currently owns the critical section.
602 *
603 * @returns true if locked.
604 * @returns false if not locked.
605 *
606 * @param pCritSect The critical section.
607 *
608 * @remarks This doesn't prove that no deadlocks will occur later on; it's
609 * just a debugging tool
610 */
611VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
612{
613 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
614 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
615}
616
617
618/**
619 * Checks if anyone is waiting on the critical section we own.
620 *
621 * @returns true if someone is waitings.
622 * @returns false if no one is waiting.
623 * @param pCritSect The critical section.
624 */
625VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
626{
627 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
628 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
629 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
630}
631
632
633/**
634 * Checks if a critical section is initialized or not.
635 *
636 * @returns true if initialized.
637 * @returns false if not initialized.
638 * @param pCritSect The critical section.
639 */
640VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
641{
642 return RTCritSectIsInitialized(&pCritSect->s.Core);
643}
644
645
646/**
647 * Gets the recursion depth.
648 *
649 * @returns The recursion depth.
650 * @param pCritSect The critical section.
651 */
652VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
653{
654 return RTCritSectGetRecursion(&pCritSect->s.Core);
655}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette