VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 25409

Last change on this file since 25409 was 25409, checked in by vboxsync, 15 years ago

IPRT,PDMCritSect,Main: Moved code dealing with lock counting from RTThread to RTLockValidator. Fixed thread termination assertion on windows.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.9 KB
Line 
1/* $Id: PDMAllCritSect.cpp 25409 2009-12-15 15:04:41Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdmcritsect.h>
29#include <VBox/mm.h>
30#include <VBox/vmm.h>
31#include <VBox/vm.h>
32#include <VBox/err.h>
33#include <VBox/hwaccm.h>
34
35#include <VBox/log.h>
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#ifdef IN_RING3
39# include <iprt/lockvalidator.h>
40# include <iprt/semaphore.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54#ifdef PDMCRITSECT_STRICT
55# define PDMCRITSECT_STRICT_POS_DECL RTHCUINTPTR uId, RT_SRC_POS_DECL
56# define PDMCRITSECT_STRICT_POS_ARGS uId, RT_SRC_POS_ARGS
57# define PDMCRITSECT_STRICT_BLOCK(hThread, pRec, fRecursive) \
58 RTLockValidatorCheckBlocking(pRec, (hThread), RTTHREADSTATE_CRITSECT, fRecursive, uId, RT_SRC_POS_ARGS)
59#else
60# define PDMCRITSECT_STRICT_POS_DECL int iDummy
61# define PDMCRITSECT_STRICT_POS_ARGS 0
62# define PDMCRITSECT_STRICT_BLOCK(hThread, pRec, fRecursive) \
63 RTThreadBlocking((hThread), RTTHREADSTATE_CRITSECT)
64#endif
65#define PDMCRITSECT_STRICT_UNBLOCK(hThread) RTThreadUnblocked((hThread), RTTHREADSTATE_CRITSECT)
66
67/* Undefine the automatic VBOX_STRICT API mappings. */
68#undef PDMCritSectEnter
69#undef PDMCritSectTryEnter
70
71
72/**
73 * Gets the ring-3 native thread handle of the calling thread.
74 *
75 * @returns native thread handle (ring-3).
76 * @param pCritSect The critical section. This is used in R0 and RC.
77 */
78DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
79{
80#ifdef IN_RING3
81 NOREF(pCritSect);
82 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
83#else
84 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
85 VERR_SEM_DESTROYED);
86 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
87 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
88 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
89#endif
90 return hNativeSelf;
91}
92
93
94/**
95 * Tail code called when we've wont the battle for the lock.
96 *
97 * @returns VINF_SUCCESS.
98 *
99 * @param pCritSect The critical section.
100 * @param hNativeSelf The native handle of this thread.
101 */
102DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PDMCRITSECT_STRICT_POS_DECL)
103{
104 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
105 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
106
107 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
108 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
109
110# ifdef PDMCRITSECT_STRICT
111 RTLockValidatorWriteLockInc(RTLockValidatorSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, PDMCRITSECT_STRICT_POS_ARGS));
112# endif
113
114 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
115 return VINF_SUCCESS;
116}
117
118
119#ifdef IN_RING3
120/**
121 * Deals with the contended case in ring-3.
122 *
123 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
124 * @param pCritSect The critsect.
125 * @param hNativeSelf The native thread handle.
126 */
127static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PDMCRITSECT_STRICT_POS_DECL)
128{
129 /*
130 * Start waiting.
131 */
132 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
133 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
134 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
135
136 /*
137 * The wait loop.
138 */
139 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
140 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
141# ifdef PDMCRITSECT_STRICT
142 RTTHREAD hSelf = RTThreadSelfAutoAdopt();
143 RTLockValidatorCheckOrder(pCritSect->s.Core.pValidatorRec, hSelf, 0, NULL, 0, NULL);
144# else
145 RTTHREAD hSelf = RTThreadSelf();
146# endif
147 for (;;)
148 {
149 PDMCRITSECT_STRICT_BLOCK(hSelf, pCritSect->s.Core.pValidatorRec, !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING));
150 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
151 PDMCRITSECT_STRICT_UNBLOCK(hSelf);
152 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
153 return VERR_SEM_DESTROYED;
154 if (rc == VINF_SUCCESS)
155 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
156 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
157 }
158 /* won't get here */
159}
160#endif /* IN_RING3 */
161
162
163/**
164 * Common worker for the debug and normal APIs.
165 *
166 * @returns VINF_SUCCESS if entered successfully.
167 * @returns rcBusy when encountering a busy critical section in GC/R0.
168 * @returns VERR_SEM_DESTROYED if the critical section is dead.
169 *
170 * @param pCritSect The PDM critical section to enter.
171 * @param rcBusy The status code to return when we're in GC or R0
172 * and the section is busy.
173 */
174DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PDMCRITSECT_STRICT_POS_DECL)
175{
176 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
177
178 /*
179 * If the critical section has already been destroyed, then inform the caller.
180 */
181 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
182 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
183 VERR_SEM_DESTROYED);
184
185 /*
186 * See if we're lucky.
187 */
188 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
189 /* Not owned ... */
190 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
191 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
192
193 /* ... or nested. */
194 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
195 {
196 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
197 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
198 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
199 return VINF_SUCCESS;
200 }
201
202 /*
203 * Spin for a bit without incrementing the counter.
204 */
205 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
206 * cpu systems. */
207 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
208 while (cSpinsLeft-- > 0)
209 {
210 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
211 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
212 ASMNopPause();
213 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
214 cli'ed pendingpreemption check up front using sti w/ instruction fusing
215 for avoiding races. Hmm ... This is assuming the other party is actually
216 executing code on another CPU ... which we could keep track of if we
217 wanted. */
218 }
219
220#ifdef IN_RING3
221 /*
222 * Take the slow path.
223 */
224 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
225#else
226 /*
227 * Return busy.
228 */
229 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
230 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
231 return rcBusy;
232#endif
233}
234
235
236/**
237 * Enters a PDM critical section.
238 *
239 * @returns VINF_SUCCESS if entered successfully.
240 * @returns rcBusy when encountering a busy critical section in GC/R0.
241 * @returns VERR_SEM_DESTROYED if the critical section is dead.
242 *
243 * @param pCritSect The PDM critical section to enter.
244 * @param rcBusy The status code to return when we're in GC or R0
245 * and the section is busy.
246 */
247VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
248{
249#ifndef PDMCRITSECT_STRICT
250 return pdmCritSectEnter(pCritSect, rcBusy, PDMCRITSECT_STRICT_POS_ARGS);
251#else
252 /* No need for a second code instance. */
253 return PDMCritSectEnterDebug(pCritSect, rcBusy, (uintptr_t)ASMReturnAddress(), RT_SRC_POS);
254#endif
255}
256
257
258/**
259 * Enters a PDM critical section, with location information for debugging.
260 *
261 * @returns VINF_SUCCESS if entered successfully.
262 * @returns rcBusy when encountering a busy critical section in GC/R0.
263 * @returns VERR_SEM_DESTROYED if the critical section is dead.
264 *
265 * @param pCritSect The PDM critical section to enter.
266 * @param rcBusy The status code to return when we're in GC or R0
267 * and the section is busy.
268 * @param uId Some kind of locking location ID. Typically a
269 * return address up the stack. Optional (0).
270 * @param pszFile The file where the lock is being acquired from.
271 * Optional.
272 * @param iLine The line number in that file. Optional (0).
273 * @param pszFunction The functionn where the lock is being acquired
274 * from. Optional.
275 */
276VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
277{
278#ifdef PDMCRITSECT_STRICT
279 return pdmCritSectEnter(pCritSect, rcBusy, PDMCRITSECT_STRICT_POS_ARGS);
280#else
281 /* No need for a second code instance. */
282 return PDMCritSectEnter(pCritSect, rcBusy);
283#endif
284}
285
286
287/**
288 * Common worker for the debug and normal APIs.
289 *
290 * @retval VINF_SUCCESS on success.
291 * @retval VERR_SEM_BUSY if the critsect was owned.
292 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
293 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
294 *
295 * @param pCritSect The critical section.
296 */
297static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PDMCRITSECT_STRICT_POS_DECL)
298{
299 /*
300 * If the critical section has already been destroyed, then inform the caller.
301 */
302 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
303 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
304 VERR_SEM_DESTROYED);
305
306 /*
307 * See if we're lucky.
308 */
309 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
310 /* Not owned ... */
311 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
312 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_POS_ARGS);
313
314 /* ... or nested. */
315 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
316 {
317 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
318 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
319 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
320 return VINF_SUCCESS;
321 }
322
323 /* no spinning */
324
325 /*
326 * Return busy.
327 */
328#ifdef IN_RING3
329 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
330#else
331 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
332#endif
333 LogFlow(("PDMCritSectTryEnter: locked\n"));
334 return VERR_SEM_BUSY;
335}
336
337
338/**
339 * Try enter a critical section.
340 *
341 * @retval VINF_SUCCESS on success.
342 * @retval VERR_SEM_BUSY if the critsect was owned.
343 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
344 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
345 *
346 * @param pCritSect The critical section.
347 */
348VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
349{
350#ifndef PDMCRITSECT_STRICT
351 return pdmCritSectTryEnter(pCritSect, PDMCRITSECT_STRICT_POS_ARGS);
352#else
353 /* No need for a second code instance. */
354 return PDMCritSectTryEnterDebug(pCritSect, (uintptr_t)ASMReturnAddress(), RT_SRC_POS);
355#endif
356}
357
358
359/**
360 * Try enter a critical section, with location information for debugging.
361 *
362 * @retval VINF_SUCCESS on success.
363 * @retval VERR_SEM_BUSY if the critsect was owned.
364 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
365 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
366 *
367 * @param pCritSect The critical section.
368 * @param uId Some kind of locking location ID. Typically a
369 * return address up the stack. Optional (0).
370 * @param pszFile The file where the lock is being acquired from.
371 * Optional.
372 * @param iLine The line number in that file. Optional (0).
373 * @param pszFunction The functionn where the lock is being acquired
374 * from. Optional.
375 */
376VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
377{
378#ifdef PDMCRITSECT_STRICT
379 return pdmCritSectTryEnter(pCritSect, PDMCRITSECT_STRICT_POS_ARGS);
380#else
381 /* No need for a second code instance. */
382 return PDMCritSectTryEnter(pCritSect);
383#endif
384}
385
386
387#ifdef IN_RING3
388/**
389 * Enters a PDM critical section.
390 *
391 * @returns VINF_SUCCESS if entered successfully.
392 * @returns rcBusy when encountering a busy critical section in GC/R0.
393 * @returns VERR_SEM_DESTROYED if the critical section is dead.
394 *
395 * @param pCritSect The PDM critical section to enter.
396 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
397 */
398VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
399{
400 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
401 if ( rc == VINF_SUCCESS
402 && fCallRing3
403 && pCritSect->s.Core.pValidatorRec
404 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
405 RTLockValidatorWriteLockDec(RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec));
406 return rc;
407}
408#endif /* IN_RING3 */
409
410
411/**
412 * Leaves a critical section entered with PDMCritSectEnter().
413 *
414 * @param pCritSect The PDM critical section to leave.
415 */
416VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
417{
418 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
419 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
420 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
421 Assert(pCritSect->s.Core.cNestings >= 1);
422
423 /*
424 * Nested leave.
425 */
426 if (pCritSect->s.Core.cNestings > 1)
427 {
428 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
429 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
430 return;
431 }
432
433#ifdef IN_RING0
434# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
435 if (1) /* SUPSemEventSignal is safe */
436# else
437 if (ASMIntAreEnabled())
438# endif
439#endif
440#if defined(IN_RING3) || defined(IN_RING0)
441 {
442 /*
443 * Leave for real.
444 */
445 /* update members. */
446# ifdef IN_RING3
447 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
448 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
449# if defined(PDMCRITSECT_STRICT)
450 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
451 RTLockValidatorWriteLockDec(RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec));
452# endif
453 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
454# endif
455 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
456 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
457 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
458
459 /* stop and decrement lockers. */
460 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
461 ASMCompilerBarrier();
462 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
463 {
464 /* Someone is waiting, wake up one of them. */
465 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
466 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
467 int rc = SUPSemEventSignal(pSession, hEvent);
468 AssertRC(rc);
469 }
470
471# ifdef IN_RING3
472 /* Signal exit event. */
473 if (hEventToSignal != NIL_RTSEMEVENT)
474 {
475 LogBird(("Signalling %#x\n", hEventToSignal));
476 int rc = RTSemEventSignal(hEventToSignal);
477 AssertRC(rc);
478 }
479# endif
480
481# if defined(DEBUG_bird) && defined(IN_RING0)
482 VMMTrashVolatileXMMRegs();
483# endif
484 }
485#endif /* IN_RING3 || IN_RING0 */
486#ifdef IN_RING0
487 else
488#endif
489#if defined(IN_RING0) || defined(IN_RC)
490 {
491 /*
492 * Try leave it.
493 */
494 if (pCritSect->s.Core.cLockers == 0)
495 {
496 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
497 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
498 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
499 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
500
501 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
502 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
503 return;
504
505 /* darn, someone raced in on us. */
506 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
507 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
508 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
509 }
510 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
511
512 /*
513 * Queue the request.
514 */
515 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
516 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
517 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
518 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
519 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
520 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
521 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
522 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
523 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
524 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
525 }
526#endif /* IN_RING0 || IN_RC */
527}
528
529
530#if defined(IN_RING3) || defined(IN_RING0)
531/**
532 * Process the critical sections queued for ring-3 'leave'.
533 *
534 * @param pVCpu The VMCPU handle.
535 */
536VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
537{
538 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
539
540 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
541 for (RTUINT i = 0; i < c; i++)
542 {
543# ifdef IN_RING3
544 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
545# else
546 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
547# endif
548
549 PDMCritSectLeave(pCritSect);
550 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
551 }
552
553 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
554 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
555}
556#endif /* IN_RING3 || IN_RING0 */
557
558
559/**
560 * Checks the caller is the owner of the critical section.
561 *
562 * @returns true if owner.
563 * @returns false if not owner.
564 * @param pCritSect The critical section.
565 */
566VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
567{
568#ifdef IN_RING3
569 return RTCritSectIsOwner(&pCritSect->s.Core);
570#else
571 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
572 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
573 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
574 return false;
575 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
576#endif
577}
578
579
580/**
581 * Checks the specified VCPU is the owner of the critical section.
582 *
583 * @returns true if owner.
584 * @returns false if not owner.
585 * @param pCritSect The critical section.
586 * @param idCpu VCPU id
587 */
588VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
589{
590#ifdef IN_RING3
591 NOREF(idCpu);
592 return RTCritSectIsOwner(&pCritSect->s.Core);
593#else
594 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
595 AssertPtr(pVM);
596 Assert(idCpu < pVM->cCpus);
597 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
598 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
599#endif
600}
601
602
603/**
604 * Checks if somebody currently owns the critical section.
605 *
606 * @returns true if locked.
607 * @returns false if not locked.
608 *
609 * @param pCritSect The critical section.
610 *
611 * @remarks This doesn't prove that no deadlocks will occur later on; it's
612 * just a debugging tool
613 */
614VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
615{
616 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
617 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
618}
619
620
621/**
622 * Checks if anyone is waiting on the critical section we own.
623 *
624 * @returns true if someone is waitings.
625 * @returns false if no one is waiting.
626 * @param pCritSect The critical section.
627 */
628VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
629{
630 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
631 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
632 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
633}
634
635
636/**
637 * Checks if a critical section is initialized or not.
638 *
639 * @returns true if initialized.
640 * @returns false if not initialized.
641 * @param pCritSect The critical section.
642 */
643VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
644{
645 return RTCritSectIsInitialized(&pCritSect->s.Core);
646}
647
648
649/**
650 * Gets the recursion depth.
651 *
652 * @returns The recursion depth.
653 * @param pCritSect The critical section.
654 */
655VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
656{
657 return RTCritSectGetRecursion(&pCritSect->s.Core);
658}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette