VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 38035

Last change on this file since 38035 was 38035, checked in by vboxsync, 14 years ago

PDMCritSectLeave: Don't leave if we're not the owner, panic instead (later, just ignore).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 26.2 KB
Line 
1/* $Id: PDMAllCritSect.cpp 38035 2011-07-18 16:19:51Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hwaccm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/* Undefine the automatic VBOX_STRICT API mappings. */
56#undef PDMCritSectEnter
57#undef PDMCritSectTryEnter
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 NIL_RTNATIVETHREAD);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've won the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 Assert(pCritSect->s.Core.cNestings == 1);
97 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
98
99# ifdef PDMCRITSECT_STRICT
100 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
101# endif
102
103 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
104 return VINF_SUCCESS;
105}
106
107
108#if defined(IN_RING3) || defined(IN_RING0)
109/**
110 * Deals with the contended case in ring-3 and ring-0.
111 *
112 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
113 * @param pCritSect The critsect.
114 * @param hNativeSelf The native thread handle.
115 */
116static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
117{
118 /*
119 * Start waiting.
120 */
121 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
122 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
123# ifdef IN_RING3
124 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
125# else
126 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
127# endif
128
129 /*
130 * The wait loop.
131 */
132 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
133 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
134# ifdef IN_RING3
135# ifdef PDMCRITSECT_STRICT
136 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
137 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
138 if (RT_FAILURE(rc2))
139 return rc2;
140# else
141 RTTHREAD hThreadSelf = RTThreadSelf();
142# endif
143# endif
144 for (;;)
145 {
146# ifdef PDMCRITSECT_STRICT
147 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
148 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
149 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
150 if (RT_FAILURE(rc9))
151 return rc9;
152# elif defined(IN_RING3)
153 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
154# endif
155 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
156# ifdef IN_RING3
157 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
158# endif
159
160 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
161 return VERR_SEM_DESTROYED;
162 if (rc == VINF_SUCCESS)
163 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
164 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
165 }
166 /* won't get here */
167}
168#endif /* IN_RING3 || IN_RING0 */
169
170
171/**
172 * Common worker for the debug and normal APIs.
173 *
174 * @returns VINF_SUCCESS if entered successfully.
175 * @returns rcBusy when encountering a busy critical section in GC/R0.
176 * @returns VERR_SEM_DESTROYED if the critical section is dead.
177 *
178 * @param pCritSect The PDM critical section to enter.
179 * @param rcBusy The status code to return when we're in GC or R0
180 * and the section is busy.
181 */
182DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
183{
184 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
185 Assert(pCritSect->s.Core.cNestings >= 0);
186
187 /*
188 * If the critical section has already been destroyed, then inform the caller.
189 */
190 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
191 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
192 VERR_SEM_DESTROYED);
193
194 /*
195 * See if we're lucky.
196 */
197 /* NOP ... */
198 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
199 return VINF_SUCCESS;
200
201 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
202 /* ... not owned ... */
203 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
204 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
205
206 /* ... or nested. */
207 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
208 {
209 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
210 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
211 Assert(pCritSect->s.Core.cNestings > 1);
212 return VINF_SUCCESS;
213 }
214
215 /*
216 * Spin for a bit without incrementing the counter.
217 */
218 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
219 * cpu systems. */
220 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
221 while (cSpinsLeft-- > 0)
222 {
223 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
224 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
225 ASMNopPause();
226 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
227 cli'ed pendingpreemption check up front using sti w/ instruction fusing
228 for avoiding races. Hmm ... This is assuming the other party is actually
229 executing code on another CPU ... which we could keep track of if we
230 wanted. */
231 }
232
233#ifdef IN_RING3
234 /*
235 * Take the slow path.
236 */
237 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
238
239#else
240# ifdef IN_RING0
241 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
242 * and would be better off switching out of that while waiting for
243 * the lock. Several of the locks jumps back to ring-3 just to
244 * get the lock, the ring-3 code will then call the kernel to do
245 * the lock wait and when the call return it will call ring-0
246 * again and resume via in setjmp style. Not very efficient. */
247# if 0
248 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
249 * callers not prepared for longjmp/blocking to
250 * use PDMCritSectTryEnter. */
251 {
252 /*
253 * Leave HWACCM context while waiting if necessary.
254 */
255 int rc;
256 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
257 {
258 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
259 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
260 }
261 else
262 {
263 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
264 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
265 PVMCPU pVCpu = VMMGetCpu(pVM);
266 HWACCMR0Leave(pVM, pVCpu);
267 RTThreadPreemptRestore(NIL_RTTHREAD, ????);
268
269 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
270
271 RTThreadPreemptDisable(NIL_RTTHREAD, ????);
272 HWACCMR0Enter(pVM, pVCpu);
273 }
274 return rc;
275 }
276# else
277 /*
278 * We preemption hasn't been disabled, we can block here in ring-0.
279 */
280 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
281 && ASMIntAreEnabled())
282 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
283# endif
284#endif /* IN_RING0 */
285
286 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
287
288 /*
289 * Call ring-3 to acquire the critical section?
290 */
291 if (rcBusy == VINF_SUCCESS)
292 {
293 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
294 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
295 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
296 }
297
298 /*
299 * Return busy.
300 */
301 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
302 return rcBusy;
303#endif /* !IN_RING3 */
304}
305
306
307/**
308 * Enters a PDM critical section.
309 *
310 * @returns VINF_SUCCESS if entered successfully.
311 * @returns rcBusy when encountering a busy critical section in GC/R0.
312 * @returns VERR_SEM_DESTROYED if the critical section is dead.
313 *
314 * @param pCritSect The PDM critical section to enter.
315 * @param rcBusy The status code to return when we're in GC or R0
316 * and the section is busy. Pass VINF_SUCCESS to
317 * acquired the critical section thru a ring-3
318 * call if necessary.
319 */
320VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
321{
322 int rc;
323#ifndef IN_RING3
324 if (rcBusy == VINF_SUCCESS)
325 {
326# ifndef PDMCRITSECT_STRICT
327 rc = pdmCritSectEnter(pCritSect, VERR_SEM_BUSY, NULL);
328# else
329 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
330 rc = pdmCritSectEnter(pCritSect, VERR_SEM_BUSY, &SrcPos);
331# endif
332 }
333 else
334#endif /* !IN_RING3 */
335 {
336#ifndef PDMCRITSECT_STRICT
337 rc = pdmCritSectEnter(pCritSect, rcBusy, NULL);
338#else
339 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
340 rc = pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
341#endif
342 }
343 return rc;
344}
345
346
347/**
348 * Enters a PDM critical section, with location information for debugging.
349 *
350 * @returns VINF_SUCCESS if entered successfully.
351 * @returns rcBusy when encountering a busy critical section in GC/R0.
352 * @returns VERR_SEM_DESTROYED if the critical section is dead.
353 *
354 * @param pCritSect The PDM critical section to enter.
355 * @param rcBusy The status code to return when we're in GC or R0
356 * and the section is busy. Pass VINF_SUCCESS to
357 * acquired the critical section thru a ring-3
358 * call if necessary.
359 * @param uId Some kind of locking location ID. Typically a
360 * return address up the stack. Optional (0).
361 * @param pszFile The file where the lock is being acquired from.
362 * Optional.
363 * @param iLine The line number in that file. Optional (0).
364 * @param pszFunction The function where the lock is being acquired
365 * from. Optional.
366 */
367VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
368{
369#ifdef PDMCRITSECT_STRICT
370 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
371 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
372#else
373 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
374#endif
375}
376
377
378/**
379 * Common worker for the debug and normal APIs.
380 *
381 * @retval VINF_SUCCESS on success.
382 * @retval VERR_SEM_BUSY if the critsect was owned.
383 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
384 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
385 *
386 * @param pCritSect The critical section.
387 */
388static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
389{
390 /*
391 * If the critical section has already been destroyed, then inform the caller.
392 */
393 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
394 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
395 VERR_SEM_DESTROYED);
396
397 /*
398 * See if we're lucky.
399 */
400 /* NOP ... */
401 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
402 return VINF_SUCCESS;
403
404 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
405 /* ... not owned ... */
406 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
407 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
408
409 /* ... or nested. */
410 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
411 {
412 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
413 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
414 Assert(pCritSect->s.Core.cNestings > 1);
415 return VINF_SUCCESS;
416 }
417
418 /* no spinning */
419
420 /*
421 * Return busy.
422 */
423#ifdef IN_RING3
424 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
425#else
426 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
427#endif
428 LogFlow(("PDMCritSectTryEnter: locked\n"));
429 return VERR_SEM_BUSY;
430}
431
432
433/**
434 * Try enter a critical section.
435 *
436 * @retval VINF_SUCCESS on success.
437 * @retval VERR_SEM_BUSY if the critsect was owned.
438 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
439 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
440 *
441 * @param pCritSect The critical section.
442 */
443VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
444{
445#ifndef PDMCRITSECT_STRICT
446 return pdmCritSectTryEnter(pCritSect, NULL);
447#else
448 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
449 return pdmCritSectTryEnter(pCritSect, &SrcPos);
450#endif
451}
452
453
454/**
455 * Try enter a critical section, with location information for debugging.
456 *
457 * @retval VINF_SUCCESS on success.
458 * @retval VERR_SEM_BUSY if the critsect was owned.
459 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
460 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
461 *
462 * @param pCritSect The critical section.
463 * @param uId Some kind of locking location ID. Typically a
464 * return address up the stack. Optional (0).
465 * @param pszFile The file where the lock is being acquired from.
466 * Optional.
467 * @param iLine The line number in that file. Optional (0).
468 * @param pszFunction The function where the lock is being acquired
469 * from. Optional.
470 */
471VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
472{
473#ifdef PDMCRITSECT_STRICT
474 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
475 return pdmCritSectTryEnter(pCritSect, &SrcPos);
476#else
477 return pdmCritSectTryEnter(pCritSect, NULL);
478#endif
479}
480
481
482#ifdef IN_RING3
483/**
484 * Enters a PDM critical section.
485 *
486 * @returns VINF_SUCCESS if entered successfully.
487 * @returns rcBusy when encountering a busy critical section in GC/R0.
488 * @returns VERR_SEM_DESTROYED if the critical section is dead.
489 *
490 * @param pCritSect The PDM critical section to enter.
491 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
492 */
493VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
494{
495 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
496 if ( rc == VINF_SUCCESS
497 && fCallRing3
498 && pCritSect->s.Core.pValidatorRec
499 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
500 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
501 return rc;
502}
503#endif /* IN_RING3 */
504
505
506/**
507 * Leaves a critical section entered with PDMCritSectEnter().
508 *
509 * @param pCritSect The PDM critical section to leave.
510 */
511VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
512{
513 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
514 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
515
516 /* Check for NOP sections before asserting ownership. */
517 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
518 return;
519
520 /*
521 * Always check that the caller is the owner (screw performance).
522 */
523 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
524 if (RT_UNLIKELY(pCritSect->s.Core.NativeThreadOwner != hNativeSelf))
525 {
526#if 0
527 AssertMsgFailed(("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
528 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
529 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings));
530#else
531 AssertReleaseMsgFailed(("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
532 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
533 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings));
534#endif
535 return;
536 }
537 Assert(pCritSect->s.Core.cNestings >= 1);
538
539 /*
540 * Nested leave.
541 */
542 if (pCritSect->s.Core.cNestings > 1)
543 {
544 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
545 Assert(pCritSect->s.Core.cNestings >= 1);
546 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
547 Assert(pCritSect->s.Core.cLockers >= 0);
548 return;
549 }
550
551#ifdef IN_RING0
552# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
553 if (1) /* SUPSemEventSignal is safe */
554# else
555 if (ASMIntAreEnabled())
556# endif
557#endif
558#if defined(IN_RING3) || defined(IN_RING0)
559 {
560 /*
561 * Leave for real.
562 */
563 /* update members. */
564# ifdef IN_RING3
565 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
566 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
567# if defined(PDMCRITSECT_STRICT)
568 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
569 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
570# endif
571 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
572# endif
573 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
574 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
575 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
576 Assert(pCritSect->s.Core.cNestings == 0);
577
578 /* stop and decrement lockers. */
579 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
580 ASMCompilerBarrier();
581 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
582 {
583 /* Someone is waiting, wake up one of them. */
584 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
585 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
586 int rc = SUPSemEventSignal(pSession, hEvent);
587 AssertRC(rc);
588 }
589
590# ifdef IN_RING3
591 /* Signal exit event. */
592 if (hEventToSignal != NIL_RTSEMEVENT)
593 {
594 LogBird(("Signalling %#x\n", hEventToSignal));
595 int rc = RTSemEventSignal(hEventToSignal);
596 AssertRC(rc);
597 }
598# endif
599
600# if defined(DEBUG_bird) && defined(IN_RING0)
601 VMMTrashVolatileXMMRegs();
602# endif
603 }
604#endif /* IN_RING3 || IN_RING0 */
605#ifdef IN_RING0
606 else
607#endif
608#if defined(IN_RING0) || defined(IN_RC)
609 {
610 /*
611 * Try leave it.
612 */
613 if (pCritSect->s.Core.cLockers == 0)
614 {
615 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
616 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
617 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
618 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
619
620 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
621 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
622 return;
623
624 /* darn, someone raced in on us. */
625 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
626 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
627 Assert(pCritSect->s.Core.cNestings == 0);
628 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
629 }
630 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
631
632 /*
633 * Queue the request.
634 */
635 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
636 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
637 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
638 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
639 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
640 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
641 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
642 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
643 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
644 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
645 }
646#endif /* IN_RING0 || IN_RC */
647}
648
649
650#if defined(IN_RING3) || defined(IN_RING0)
651/**
652 * Process the critical sections queued for ring-3 'leave'.
653 *
654 * @param pVCpu The VMCPU handle.
655 */
656VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
657{
658 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
659
660 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
661 for (RTUINT i = 0; i < c; i++)
662 {
663# ifdef IN_RING3
664 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
665# else
666 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
667# endif
668
669 PDMCritSectLeave(pCritSect);
670 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
671 }
672
673 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
674 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
675}
676#endif /* IN_RING3 || IN_RING0 */
677
678
679/**
680 * Checks the caller is the owner of the critical section.
681 *
682 * @returns true if owner.
683 * @returns false if not owner.
684 * @param pCritSect The critical section.
685 */
686VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
687{
688#ifdef IN_RING3
689 return RTCritSectIsOwner(&pCritSect->s.Core);
690#else
691 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
692 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
693 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
694 return false;
695 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
696 || pCritSect->s.Core.cNestings > 1;
697#endif
698}
699
700
701/**
702 * Checks the specified VCPU is the owner of the critical section.
703 *
704 * @returns true if owner.
705 * @returns false if not owner.
706 * @param pCritSect The critical section.
707 * @param pVCpu The virtual CPU handle.
708 */
709VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPU pVCpu)
710{
711#ifdef IN_RING3
712 NOREF(pVCpu);
713 return RTCritSectIsOwner(&pCritSect->s.Core);
714#else
715 Assert(&pVCpu->CTX_SUFF(pVM)->aCpus[pVCpu->idCpu] == pVCpu);
716 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
717 return false;
718 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
719 || pCritSect->s.Core.cNestings > 1;
720#endif
721}
722
723
724/**
725 * Checks if anyone is waiting on the critical section we own.
726 *
727 * @returns true if someone is waiting.
728 * @returns false if no one is waiting.
729 * @param pCritSect The critical section.
730 */
731VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
732{
733 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
734 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
735 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
736}
737
738
739/**
740 * Checks if a critical section is initialized or not.
741 *
742 * @returns true if initialized.
743 * @returns false if not initialized.
744 * @param pCritSect The critical section.
745 */
746VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
747{
748 return RTCritSectIsInitialized(&pCritSect->s.Core);
749}
750
751
752/**
753 * Gets the recursion depth.
754 *
755 * @returns The recursion depth.
756 * @param pCritSect The critical section.
757 */
758VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
759{
760 return RTCritSectGetRecursion(&pCritSect->s.Core);
761}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette