VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 38082

Last change on this file since 38082 was 38081, checked in by vboxsync, 14 years ago

PDMCritSectLeave: Release assertion if not owner.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.5 KB
Line 
1/* $Id: PDMAllCritSect.cpp 38081 2011-07-20 14:21:36Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hwaccm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/* Undefine the automatic VBOX_STRICT API mappings. */
56#undef PDMCritSectEnter
57#undef PDMCritSectTryEnter
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 NIL_RTNATIVETHREAD);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've won the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 Assert(pCritSect->s.Core.cNestings == 1);
97 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
98
99# ifdef PDMCRITSECT_STRICT
100 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
101# endif
102
103 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
104 return VINF_SUCCESS;
105}
106
107
108#if defined(IN_RING3) || defined(IN_RING0)
109/**
110 * Deals with the contended case in ring-3 and ring-0.
111 *
112 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
113 * @param pCritSect The critsect.
114 * @param hNativeSelf The native thread handle.
115 */
116static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
117{
118 /*
119 * Start waiting.
120 */
121 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
122 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
123# ifdef IN_RING3
124 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
125# else
126 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
127# endif
128
129 /*
130 * The wait loop.
131 */
132 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
133 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
134# ifdef IN_RING3
135# ifdef PDMCRITSECT_STRICT
136 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
137 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
138 if (RT_FAILURE(rc2))
139 return rc2;
140# else
141 RTTHREAD hThreadSelf = RTThreadSelf();
142# endif
143# endif
144 for (;;)
145 {
146# ifdef PDMCRITSECT_STRICT
147 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
148 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
149 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
150 if (RT_FAILURE(rc9))
151 return rc9;
152# elif defined(IN_RING3)
153 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
154# endif
155 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
156# ifdef IN_RING3
157 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
158# endif
159
160 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
161 return VERR_SEM_DESTROYED;
162 if (rc == VINF_SUCCESS)
163 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
164 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
165 }
166 /* won't get here */
167}
168#endif /* IN_RING3 || IN_RING0 */
169
170
171/**
172 * Common worker for the debug and normal APIs.
173 *
174 * @returns VINF_SUCCESS if entered successfully.
175 * @returns rcBusy when encountering a busy critical section in GC/R0.
176 * @returns VERR_SEM_DESTROYED if the critical section is dead.
177 *
178 * @param pCritSect The PDM critical section to enter.
179 * @param rcBusy The status code to return when we're in GC or R0
180 * and the section is busy.
181 */
182DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
183{
184 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
185 Assert(pCritSect->s.Core.cNestings >= 0);
186
187 /*
188 * If the critical section has already been destroyed, then inform the caller.
189 */
190 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
191 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
192 VERR_SEM_DESTROYED);
193
194 /*
195 * See if we're lucky.
196 */
197 /* NOP ... */
198 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
199 return VINF_SUCCESS;
200
201 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
202 /* ... not owned ... */
203 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
204 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
205
206 /* ... or nested. */
207 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
208 {
209 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
210 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
211 Assert(pCritSect->s.Core.cNestings > 1);
212 return VINF_SUCCESS;
213 }
214
215 /*
216 * Spin for a bit without incrementing the counter.
217 */
218 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
219 * cpu systems. */
220 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
221 while (cSpinsLeft-- > 0)
222 {
223 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
224 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
225 ASMNopPause();
226 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
227 cli'ed pendingpreemption check up front using sti w/ instruction fusing
228 for avoiding races. Hmm ... This is assuming the other party is actually
229 executing code on another CPU ... which we could keep track of if we
230 wanted. */
231 }
232
233#ifdef IN_RING3
234 /*
235 * Take the slow path.
236 */
237 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
238
239#else
240# ifdef IN_RING0
241 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
242 * and would be better off switching out of that while waiting for
243 * the lock. Several of the locks jumps back to ring-3 just to
244 * get the lock, the ring-3 code will then call the kernel to do
245 * the lock wait and when the call return it will call ring-0
246 * again and resume via in setjmp style. Not very efficient. */
247# if 0
248 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
249 * callers not prepared for longjmp/blocking to
250 * use PDMCritSectTryEnter. */
251 {
252 /*
253 * Leave HWACCM context while waiting if necessary.
254 */
255 int rc;
256 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
257 {
258 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
259 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
260 }
261 else
262 {
263 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
264 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
265 PVMCPU pVCpu = VMMGetCpu(pVM);
266 HWACCMR0Leave(pVM, pVCpu);
267 RTThreadPreemptRestore(NIL_RTTHREAD, ????);
268
269 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
270
271 RTThreadPreemptDisable(NIL_RTTHREAD, ????);
272 HWACCMR0Enter(pVM, pVCpu);
273 }
274 return rc;
275 }
276# else
277 /*
278 * We preemption hasn't been disabled, we can block here in ring-0.
279 */
280 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
281 && ASMIntAreEnabled())
282 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
283# endif
284#endif /* IN_RING0 */
285
286 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
287
288 /*
289 * Call ring-3 to acquire the critical section?
290 */
291 if (rcBusy == VINF_SUCCESS)
292 {
293 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
294 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
295 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
296 }
297
298 /*
299 * Return busy.
300 */
301 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
302 return rcBusy;
303#endif /* !IN_RING3 */
304}
305
306
307/**
308 * Enters a PDM critical section.
309 *
310 * @returns VINF_SUCCESS if entered successfully.
311 * @returns rcBusy when encountering a busy critical section in GC/R0.
312 * @returns VERR_SEM_DESTROYED if the critical section is dead.
313 *
314 * @param pCritSect The PDM critical section to enter.
315 * @param rcBusy The status code to return when we're in GC or R0
316 * and the section is busy. Pass VINF_SUCCESS to
317 * acquired the critical section thru a ring-3
318 * call if necessary.
319 */
320VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
321{
322#ifndef PDMCRITSECT_STRICT
323 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
324#else
325 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
326 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
327#endif
328}
329
330
331/**
332 * Enters a PDM critical section, with location information for debugging.
333 *
334 * @returns VINF_SUCCESS if entered successfully.
335 * @returns rcBusy when encountering a busy critical section in GC/R0.
336 * @returns VERR_SEM_DESTROYED if the critical section is dead.
337 *
338 * @param pCritSect The PDM critical section to enter.
339 * @param rcBusy The status code to return when we're in GC or R0
340 * and the section is busy. Pass VINF_SUCCESS to
341 * acquired the critical section thru a ring-3
342 * call if necessary.
343 * @param uId Some kind of locking location ID. Typically a
344 * return address up the stack. Optional (0).
345 * @param pszFile The file where the lock is being acquired from.
346 * Optional.
347 * @param iLine The line number in that file. Optional (0).
348 * @param pszFunction The function where the lock is being acquired
349 * from. Optional.
350 */
351VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
352{
353#ifdef PDMCRITSECT_STRICT
354 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
355 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
356#else
357 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
358#endif
359}
360
361
362/**
363 * Common worker for the debug and normal APIs.
364 *
365 * @retval VINF_SUCCESS on success.
366 * @retval VERR_SEM_BUSY if the critsect was owned.
367 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
368 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
369 *
370 * @param pCritSect The critical section.
371 */
372static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
373{
374 /*
375 * If the critical section has already been destroyed, then inform the caller.
376 */
377 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
378 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
379 VERR_SEM_DESTROYED);
380
381 /*
382 * See if we're lucky.
383 */
384 /* NOP ... */
385 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
386 return VINF_SUCCESS;
387
388 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
389 /* ... not owned ... */
390 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
391 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
392
393 /* ... or nested. */
394 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
395 {
396 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
397 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
398 Assert(pCritSect->s.Core.cNestings > 1);
399 return VINF_SUCCESS;
400 }
401
402 /* no spinning */
403
404 /*
405 * Return busy.
406 */
407#ifdef IN_RING3
408 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
409#else
410 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
411#endif
412 LogFlow(("PDMCritSectTryEnter: locked\n"));
413 return VERR_SEM_BUSY;
414}
415
416
417/**
418 * Try enter a critical section.
419 *
420 * @retval VINF_SUCCESS on success.
421 * @retval VERR_SEM_BUSY if the critsect was owned.
422 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
423 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
424 *
425 * @param pCritSect The critical section.
426 */
427VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
428{
429#ifndef PDMCRITSECT_STRICT
430 return pdmCritSectTryEnter(pCritSect, NULL);
431#else
432 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
433 return pdmCritSectTryEnter(pCritSect, &SrcPos);
434#endif
435}
436
437
438/**
439 * Try enter a critical section, with location information for debugging.
440 *
441 * @retval VINF_SUCCESS on success.
442 * @retval VERR_SEM_BUSY if the critsect was owned.
443 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
444 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
445 *
446 * @param pCritSect The critical section.
447 * @param uId Some kind of locking location ID. Typically a
448 * return address up the stack. Optional (0).
449 * @param pszFile The file where the lock is being acquired from.
450 * Optional.
451 * @param iLine The line number in that file. Optional (0).
452 * @param pszFunction The function where the lock is being acquired
453 * from. Optional.
454 */
455VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
456{
457#ifdef PDMCRITSECT_STRICT
458 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
459 return pdmCritSectTryEnter(pCritSect, &SrcPos);
460#else
461 return pdmCritSectTryEnter(pCritSect, NULL);
462#endif
463}
464
465
466#ifdef IN_RING3
467/**
468 * Enters a PDM critical section.
469 *
470 * @returns VINF_SUCCESS if entered successfully.
471 * @returns rcBusy when encountering a busy critical section in GC/R0.
472 * @returns VERR_SEM_DESTROYED if the critical section is dead.
473 *
474 * @param pCritSect The PDM critical section to enter.
475 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
476 */
477VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
478{
479 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
480 if ( rc == VINF_SUCCESS
481 && fCallRing3
482 && pCritSect->s.Core.pValidatorRec
483 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
484 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
485 return rc;
486}
487#endif /* IN_RING3 */
488
489
490/**
491 * Leaves a critical section entered with PDMCritSectEnter().
492 *
493 * @param pCritSect The PDM critical section to leave.
494 */
495VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
496{
497 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
498 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
499
500 /* Check for NOP sections before asserting ownership. */
501 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
502 return;
503
504 /*
505 * Always check that the caller is the owner (screw performance).
506 */
507 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
508 AssertReleaseMsgReturnVoid(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
509 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
510 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
511 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings));
512 Assert(pCritSect->s.Core.cNestings >= 1);
513
514 /*
515 * Nested leave.
516 */
517 if (pCritSect->s.Core.cNestings > 1)
518 {
519 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
520 Assert(pCritSect->s.Core.cNestings >= 1);
521 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
522 Assert(pCritSect->s.Core.cLockers >= 0);
523 return;
524 }
525
526#ifdef IN_RING0
527# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
528 if (1) /* SUPSemEventSignal is safe */
529# else
530 if (ASMIntAreEnabled())
531# endif
532#endif
533#if defined(IN_RING3) || defined(IN_RING0)
534 {
535 /*
536 * Leave for real.
537 */
538 /* update members. */
539# ifdef IN_RING3
540 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
541 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
542# if defined(PDMCRITSECT_STRICT)
543 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
544 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
545# endif
546 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
547# endif
548 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
549 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
550 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
551 Assert(pCritSect->s.Core.cNestings == 0);
552
553 /* stop and decrement lockers. */
554 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
555 ASMCompilerBarrier();
556 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
557 {
558 /* Someone is waiting, wake up one of them. */
559 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
560 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
561 int rc = SUPSemEventSignal(pSession, hEvent);
562 AssertRC(rc);
563 }
564
565# ifdef IN_RING3
566 /* Signal exit event. */
567 if (hEventToSignal != NIL_RTSEMEVENT)
568 {
569 LogBird(("Signalling %#x\n", hEventToSignal));
570 int rc = RTSemEventSignal(hEventToSignal);
571 AssertRC(rc);
572 }
573# endif
574
575# if defined(DEBUG_bird) && defined(IN_RING0)
576 VMMTrashVolatileXMMRegs();
577# endif
578 }
579#endif /* IN_RING3 || IN_RING0 */
580#ifdef IN_RING0
581 else
582#endif
583#if defined(IN_RING0) || defined(IN_RC)
584 {
585 /*
586 * Try leave it.
587 */
588 if (pCritSect->s.Core.cLockers == 0)
589 {
590 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
591 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
592 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
593 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
594
595 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
596 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
597 return;
598
599 /* darn, someone raced in on us. */
600 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
601 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
602 Assert(pCritSect->s.Core.cNestings == 0);
603 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
604 }
605 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
606
607 /*
608 * Queue the request.
609 */
610 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
611 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
612 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
613 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
614 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
615 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
616 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
617 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
618 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
619 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
620 }
621#endif /* IN_RING0 || IN_RC */
622}
623
624
625#if defined(IN_RING3) || defined(IN_RING0)
626/**
627 * Process the critical sections queued for ring-3 'leave'.
628 *
629 * @param pVCpu The VMCPU handle.
630 */
631VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
632{
633 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
634
635 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
636 for (RTUINT i = 0; i < c; i++)
637 {
638# ifdef IN_RING3
639 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
640# else
641 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
642# endif
643
644 PDMCritSectLeave(pCritSect);
645 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
646 }
647
648 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
649 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
650}
651#endif /* IN_RING3 || IN_RING0 */
652
653
654/**
655 * Checks the caller is the owner of the critical section.
656 *
657 * @returns true if owner.
658 * @returns false if not owner.
659 * @param pCritSect The critical section.
660 */
661VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
662{
663#ifdef IN_RING3
664 return RTCritSectIsOwner(&pCritSect->s.Core);
665#else
666 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
667 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
668 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
669 return false;
670 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
671 || pCritSect->s.Core.cNestings > 1;
672#endif
673}
674
675
676/**
677 * Checks the specified VCPU is the owner of the critical section.
678 *
679 * @returns true if owner.
680 * @returns false if not owner.
681 * @param pCritSect The critical section.
682 * @param pVCpu The virtual CPU handle.
683 */
684VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPU pVCpu)
685{
686#ifdef IN_RING3
687 NOREF(pVCpu);
688 return RTCritSectIsOwner(&pCritSect->s.Core);
689#else
690 Assert(&pVCpu->CTX_SUFF(pVM)->aCpus[pVCpu->idCpu] == pVCpu);
691 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
692 return false;
693 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
694 || pCritSect->s.Core.cNestings > 1;
695#endif
696}
697
698
699/**
700 * Checks if anyone is waiting on the critical section we own.
701 *
702 * @returns true if someone is waiting.
703 * @returns false if no one is waiting.
704 * @param pCritSect The critical section.
705 */
706VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
707{
708 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
709 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
710 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
711}
712
713
714/**
715 * Checks if a critical section is initialized or not.
716 *
717 * @returns true if initialized.
718 * @returns false if not initialized.
719 * @param pCritSect The critical section.
720 */
721VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
722{
723 return RTCritSectIsInitialized(&pCritSect->s.Core);
724}
725
726
727/**
728 * Gets the recursion depth.
729 *
730 * @returns The recursion depth.
731 * @param pCritSect The critical section.
732 */
733VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
734{
735 return RTCritSectGetRecursion(&pCritSect->s.Core);
736}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette