VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 29450

Last change on this file since 29450 was 29250, checked in by vboxsync, 15 years ago

iprt/asm*.h: split out asm-math.h, don't include asm-*.h from asm.h, don't include asm.h from sup.h. Fixed a couple file headers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.2 KB
Line 
1/* $Id: PDMAllCritSect.cpp 29250 2010-05-09 17:53:58Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "../PDMInternal.h"
24#include <VBox/pdmcritsect.h>
25#include <VBox/mm.h>
26#include <VBox/vmm.h>
27#include <VBox/vm.h>
28#include <VBox/err.h>
29#include <VBox/hwaccm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39
40
41/*******************************************************************************
42* Defined Constants And Macros *
43*******************************************************************************/
44/** The number loops to spin for in ring-3. */
45#define PDMCRITSECT_SPIN_COUNT_R3 20
46/** The number loops to spin for in ring-0. */
47#define PDMCRITSECT_SPIN_COUNT_R0 256
48/** The number loops to spin for in the raw-mode context. */
49#define PDMCRITSECT_SPIN_COUNT_RC 256
50
51
52/* Undefine the automatic VBOX_STRICT API mappings. */
53#undef PDMCritSectEnter
54#undef PDMCritSectTryEnter
55
56
57/**
58 * Gets the ring-3 native thread handle of the calling thread.
59 *
60 * @returns native thread handle (ring-3).
61 * @param pCritSect The critical section. This is used in R0 and RC.
62 */
63DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
64{
65#ifdef IN_RING3
66 NOREF(pCritSect);
67 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
68#else
69 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
70 NIL_RTNATIVETHREAD);
71 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
72 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
73 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
74#endif
75 return hNativeSelf;
76}
77
78
79/**
80 * Tail code called when we've wont the battle for the lock.
81 *
82 * @returns VINF_SUCCESS.
83 *
84 * @param pCritSect The critical section.
85 * @param hNativeSelf The native handle of this thread.
86 */
87DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
88{
89 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
90 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
91
92 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
93 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
94
95# ifdef PDMCRITSECT_STRICT
96 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
97# endif
98
99 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
100 return VINF_SUCCESS;
101}
102
103
104#ifdef IN_RING3
105/**
106 * Deals with the contended case in ring-3.
107 *
108 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
109 * @param pCritSect The critsect.
110 * @param hNativeSelf The native thread handle.
111 */
112static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
113{
114 /*
115 * Start waiting.
116 */
117 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
118 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
119 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
120
121 /*
122 * The wait loop.
123 */
124 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
125 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
126# ifdef PDMCRITSECT_STRICT
127 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
128 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
129 if (RT_FAILURE(rc2))
130 return rc2;
131# else
132 RTTHREAD hThreadSelf = RTThreadSelf();
133# endif
134 for (;;)
135 {
136# ifdef PDMCRITSECT_STRICT
137 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
138 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
139 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
140 if (RT_FAILURE(rc9))
141 return rc9;
142# else
143 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
144# endif
145 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
146 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
147
148 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
149 return VERR_SEM_DESTROYED;
150 if (rc == VINF_SUCCESS)
151 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
152 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
153 }
154 /* won't get here */
155}
156#endif /* IN_RING3 */
157
158
159/**
160 * Common worker for the debug and normal APIs.
161 *
162 * @returns VINF_SUCCESS if entered successfully.
163 * @returns rcBusy when encountering a busy critical section in GC/R0.
164 * @returns VERR_SEM_DESTROYED if the critical section is dead.
165 *
166 * @param pCritSect The PDM critical section to enter.
167 * @param rcBusy The status code to return when we're in GC or R0
168 * and the section is busy.
169 */
170DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
171{
172 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
173
174 /*
175 * If the critical section has already been destroyed, then inform the caller.
176 */
177 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
178 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
179 VERR_SEM_DESTROYED);
180
181 /*
182 * See if we're lucky.
183 */
184 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
185 /* Not owned ... */
186 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
187 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
188
189 /* ... or nested. */
190 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
191 {
192 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
193 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
194 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
195 return VINF_SUCCESS;
196 }
197
198 /*
199 * Spin for a bit without incrementing the counter.
200 */
201 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
202 * cpu systems. */
203 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
204 while (cSpinsLeft-- > 0)
205 {
206 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
207 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
208 ASMNopPause();
209 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
210 cli'ed pendingpreemption check up front using sti w/ instruction fusing
211 for avoiding races. Hmm ... This is assuming the other party is actually
212 executing code on another CPU ... which we could keep track of if we
213 wanted. */
214 }
215
216#ifdef IN_RING3
217 /*
218 * Take the slow path.
219 */
220 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
221#else
222 /*
223 * Return busy.
224 */
225 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
226 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
227 return rcBusy;
228#endif
229}
230
231
232/**
233 * Enters a PDM critical section.
234 *
235 * @returns VINF_SUCCESS if entered successfully.
236 * @returns rcBusy when encountering a busy critical section in GC/R0.
237 * @returns VERR_SEM_DESTROYED if the critical section is dead.
238 *
239 * @param pCritSect The PDM critical section to enter.
240 * @param rcBusy The status code to return when we're in GC or R0
241 * and the section is busy.
242 */
243VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
244{
245#ifndef PDMCRITSECT_STRICT
246 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
247#else
248 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
249 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
250#endif
251}
252
253
254/**
255 * Enters a PDM critical section, with location information for debugging.
256 *
257 * @returns VINF_SUCCESS if entered successfully.
258 * @returns rcBusy when encountering a busy critical section in GC/R0.
259 * @returns VERR_SEM_DESTROYED if the critical section is dead.
260 *
261 * @param pCritSect The PDM critical section to enter.
262 * @param rcBusy The status code to return when we're in GC or R0
263 * and the section is busy.
264 * @param uId Some kind of locking location ID. Typically a
265 * return address up the stack. Optional (0).
266 * @param pszFile The file where the lock is being acquired from.
267 * Optional.
268 * @param iLine The line number in that file. Optional (0).
269 * @param pszFunction The functionn where the lock is being acquired
270 * from. Optional.
271 */
272VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
273{
274#ifdef PDMCRITSECT_STRICT
275 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
276 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
277#else
278 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
279#endif
280}
281
282
283/**
284 * Common worker for the debug and normal APIs.
285 *
286 * @retval VINF_SUCCESS on success.
287 * @retval VERR_SEM_BUSY if the critsect was owned.
288 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
289 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
290 *
291 * @param pCritSect The critical section.
292 */
293static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
294{
295 /*
296 * If the critical section has already been destroyed, then inform the caller.
297 */
298 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
299 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
300 VERR_SEM_DESTROYED);
301
302 /*
303 * See if we're lucky.
304 */
305 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
306 /* Not owned ... */
307 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
308 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
309
310 /* ... or nested. */
311 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
312 {
313 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
314 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
315 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
316 return VINF_SUCCESS;
317 }
318
319 /* no spinning */
320
321 /*
322 * Return busy.
323 */
324#ifdef IN_RING3
325 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
326#else
327 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
328#endif
329 LogFlow(("PDMCritSectTryEnter: locked\n"));
330 return VERR_SEM_BUSY;
331}
332
333
334/**
335 * Try enter a critical section.
336 *
337 * @retval VINF_SUCCESS on success.
338 * @retval VERR_SEM_BUSY if the critsect was owned.
339 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
340 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
341 *
342 * @param pCritSect The critical section.
343 */
344VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
345{
346#ifndef PDMCRITSECT_STRICT
347 return pdmCritSectTryEnter(pCritSect, NULL);
348#else
349 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
350 return pdmCritSectTryEnter(pCritSect, &SrcPos);
351#endif
352}
353
354
355/**
356 * Try enter a critical section, with location information for debugging.
357 *
358 * @retval VINF_SUCCESS on success.
359 * @retval VERR_SEM_BUSY if the critsect was owned.
360 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
361 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
362 *
363 * @param pCritSect The critical section.
364 * @param uId Some kind of locking location ID. Typically a
365 * return address up the stack. Optional (0).
366 * @param pszFile The file where the lock is being acquired from.
367 * Optional.
368 * @param iLine The line number in that file. Optional (0).
369 * @param pszFunction The functionn where the lock is being acquired
370 * from. Optional.
371 */
372VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
373{
374#ifdef PDMCRITSECT_STRICT
375 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
376 return pdmCritSectTryEnter(pCritSect, &SrcPos);
377#else
378 return pdmCritSectTryEnter(pCritSect, NULL);
379#endif
380}
381
382
383#ifdef IN_RING3
384/**
385 * Enters a PDM critical section.
386 *
387 * @returns VINF_SUCCESS if entered successfully.
388 * @returns rcBusy when encountering a busy critical section in GC/R0.
389 * @returns VERR_SEM_DESTROYED if the critical section is dead.
390 *
391 * @param pCritSect The PDM critical section to enter.
392 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
393 */
394VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
395{
396 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
397 if ( rc == VINF_SUCCESS
398 && fCallRing3
399 && pCritSect->s.Core.pValidatorRec
400 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
401 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
402 return rc;
403}
404#endif /* IN_RING3 */
405
406
407/**
408 * Leaves a critical section entered with PDMCritSectEnter().
409 *
410 * @param pCritSect The PDM critical section to leave.
411 */
412VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
413{
414 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
415 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
416 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
417 Assert(pCritSect->s.Core.cNestings >= 1);
418
419 /*
420 * Nested leave.
421 */
422 if (pCritSect->s.Core.cNestings > 1)
423 {
424 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
425 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
426 return;
427 }
428
429#ifdef IN_RING0
430# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
431 if (1) /* SUPSemEventSignal is safe */
432# else
433 if (ASMIntAreEnabled())
434# endif
435#endif
436#if defined(IN_RING3) || defined(IN_RING0)
437 {
438 /*
439 * Leave for real.
440 */
441 /* update members. */
442# ifdef IN_RING3
443 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
444 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
445# if defined(PDMCRITSECT_STRICT)
446 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
447 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
448# endif
449 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
450# endif
451 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
452 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
453 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
454
455 /* stop and decrement lockers. */
456 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
457 ASMCompilerBarrier();
458 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
459 {
460 /* Someone is waiting, wake up one of them. */
461 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
462 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
463 int rc = SUPSemEventSignal(pSession, hEvent);
464 AssertRC(rc);
465 }
466
467# ifdef IN_RING3
468 /* Signal exit event. */
469 if (hEventToSignal != NIL_RTSEMEVENT)
470 {
471 LogBird(("Signalling %#x\n", hEventToSignal));
472 int rc = RTSemEventSignal(hEventToSignal);
473 AssertRC(rc);
474 }
475# endif
476
477# if defined(DEBUG_bird) && defined(IN_RING0)
478 VMMTrashVolatileXMMRegs();
479# endif
480 }
481#endif /* IN_RING3 || IN_RING0 */
482#ifdef IN_RING0
483 else
484#endif
485#if defined(IN_RING0) || defined(IN_RC)
486 {
487 /*
488 * Try leave it.
489 */
490 if (pCritSect->s.Core.cLockers == 0)
491 {
492 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
493 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
494 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
495 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
496
497 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
498 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
499 return;
500
501 /* darn, someone raced in on us. */
502 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
503 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
504 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
505 }
506 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
507
508 /*
509 * Queue the request.
510 */
511 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
512 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
513 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
514 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
515 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
516 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
517 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
518 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
519 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
520 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
521 }
522#endif /* IN_RING0 || IN_RC */
523}
524
525
526#if defined(IN_RING3) || defined(IN_RING0)
527/**
528 * Process the critical sections queued for ring-3 'leave'.
529 *
530 * @param pVCpu The VMCPU handle.
531 */
532VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
533{
534 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
535
536 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
537 for (RTUINT i = 0; i < c; i++)
538 {
539# ifdef IN_RING3
540 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
541# else
542 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
543# endif
544
545 PDMCritSectLeave(pCritSect);
546 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
547 }
548
549 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
550 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
551}
552#endif /* IN_RING3 || IN_RING0 */
553
554
555/**
556 * Checks the caller is the owner of the critical section.
557 *
558 * @returns true if owner.
559 * @returns false if not owner.
560 * @param pCritSect The critical section.
561 */
562VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
563{
564#ifdef IN_RING3
565 return RTCritSectIsOwner(&pCritSect->s.Core);
566#else
567 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
568 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
569 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
570 return false;
571 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
572#endif
573}
574
575
576/**
577 * Checks the specified VCPU is the owner of the critical section.
578 *
579 * @returns true if owner.
580 * @returns false if not owner.
581 * @param pCritSect The critical section.
582 * @param idCpu VCPU id
583 */
584VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
585{
586#ifdef IN_RING3
587 NOREF(idCpu);
588 return RTCritSectIsOwner(&pCritSect->s.Core);
589#else
590 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
591 AssertPtr(pVM);
592 Assert(idCpu < pVM->cCpus);
593 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
594 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
595#endif
596}
597
598
599/**
600 * Checks if somebody currently owns the critical section.
601 *
602 * @returns true if locked.
603 * @returns false if not locked.
604 *
605 * @param pCritSect The critical section.
606 *
607 * @remarks This doesn't prove that no deadlocks will occur later on; it's
608 * just a debugging tool
609 */
610VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
611{
612 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
613 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
614}
615
616
617/**
618 * Checks if anyone is waiting on the critical section we own.
619 *
620 * @returns true if someone is waitings.
621 * @returns false if no one is waiting.
622 * @param pCritSect The critical section.
623 */
624VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
625{
626 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
627 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
628 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
629}
630
631
632/**
633 * Checks if a critical section is initialized or not.
634 *
635 * @returns true if initialized.
636 * @returns false if not initialized.
637 * @param pCritSect The critical section.
638 */
639VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
640{
641 return RTCritSectIsInitialized(&pCritSect->s.Core);
642}
643
644
645/**
646 * Gets the recursion depth.
647 *
648 * @returns The recursion depth.
649 * @param pCritSect The critical section.
650 */
651VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
652{
653 return RTCritSectGetRecursion(&pCritSect->s.Core);
654}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette