VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 48569

Last change on this file since 48569 was 45152, checked in by vboxsync, 12 years ago

PDMCritSectRw: Early morphing stage - untested, ring-3 only.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 25.4 KB
Line 
1/* $Id: PDMAllCritSect.cpp 45152 2013-03-23 20:36:23Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** The number loops to spin for in ring-3. */
48#define PDMCRITSECT_SPIN_COUNT_R3 20
49/** The number loops to spin for in ring-0. */
50#define PDMCRITSECT_SPIN_COUNT_R0 256
51/** The number loops to spin for in the raw-mode context. */
52#define PDMCRITSECT_SPIN_COUNT_RC 256
53
54
55/* Undefine the automatic VBOX_STRICT API mappings. */
56#undef PDMCritSectEnter
57#undef PDMCritSectTryEnter
58
59
60/**
61 * Gets the ring-3 native thread handle of the calling thread.
62 *
63 * @returns native thread handle (ring-3).
64 * @param pCritSect The critical section. This is used in R0 and RC.
65 */
66DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
67{
68#ifdef IN_RING3
69 NOREF(pCritSect);
70 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
71#else
72 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
73 NIL_RTNATIVETHREAD);
74 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
75 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
76 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
77#endif
78 return hNativeSelf;
79}
80
81
82/**
83 * Tail code called when we've won the battle for the lock.
84 *
85 * @returns VINF_SUCCESS.
86 *
87 * @param pCritSect The critical section.
88 * @param hNativeSelf The native handle of this thread.
89 */
90DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
91{
92 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
93 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
94
95 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
96 Assert(pCritSect->s.Core.cNestings == 1);
97 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
98
99# ifdef PDMCRITSECT_STRICT
100 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
101# else
102 NOREF(pSrcPos);
103# endif
104
105 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
106 return VINF_SUCCESS;
107}
108
109
110#if defined(IN_RING3) || defined(IN_RING0)
111/**
112 * Deals with the contended case in ring-3 and ring-0.
113 *
114 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
115 * @param pCritSect The critsect.
116 * @param hNativeSelf The native thread handle.
117 */
118static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
119{
120 /*
121 * Start waiting.
122 */
123 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
124 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
125# ifdef IN_RING3
126 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
127# else
128 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
129# endif
130
131 /*
132 * The wait loop.
133 */
134 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
135 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
136# ifdef IN_RING3
137# ifdef PDMCRITSECT_STRICT
138 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
139 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
140 if (RT_FAILURE(rc2))
141 return rc2;
142# else
143 RTTHREAD hThreadSelf = RTThreadSelf();
144# endif
145# endif
146 for (;;)
147 {
148# ifdef PDMCRITSECT_STRICT
149 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
150 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
151 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
152 if (RT_FAILURE(rc9))
153 return rc9;
154# elif defined(IN_RING3)
155 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
156# endif
157 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
158# ifdef IN_RING3
159 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
160# endif
161
162 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
163 return VERR_SEM_DESTROYED;
164 if (rc == VINF_SUCCESS)
165 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
166 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
167 }
168 /* won't get here */
169}
170#endif /* IN_RING3 || IN_RING0 */
171
172
173/**
174 * Common worker for the debug and normal APIs.
175 *
176 * @returns VINF_SUCCESS if entered successfully.
177 * @returns rcBusy when encountering a busy critical section in GC/R0.
178 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
179 * during the operation.
180 *
181 * @param pCritSect The PDM critical section to enter.
182 * @param rcBusy The status code to return when we're in GC or R0
183 * and the section is busy.
184 */
185DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
186{
187 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
188 Assert(pCritSect->s.Core.cNestings >= 0);
189
190 /*
191 * If the critical section has already been destroyed, then inform the caller.
192 */
193 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
194 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
195 VERR_SEM_DESTROYED);
196
197 /*
198 * See if we're lucky.
199 */
200 /* NOP ... */
201 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
202 return VINF_SUCCESS;
203
204 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
205 /* ... not owned ... */
206 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
207 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
208
209 /* ... or nested. */
210 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
211 {
212 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
213 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
214 Assert(pCritSect->s.Core.cNestings > 1);
215 return VINF_SUCCESS;
216 }
217
218 /*
219 * Spin for a bit without incrementing the counter.
220 */
221 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
222 * cpu systems. */
223 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
224 while (cSpinsLeft-- > 0)
225 {
226 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
227 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
228 ASMNopPause();
229 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
230 cli'ed pendingpreemption check up front using sti w/ instruction fusing
231 for avoiding races. Hmm ... This is assuming the other party is actually
232 executing code on another CPU ... which we could keep track of if we
233 wanted. */
234 }
235
236#ifdef IN_RING3
237 /*
238 * Take the slow path.
239 */
240 NOREF(rcBusy);
241 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
242
243#else
244# ifdef IN_RING0
245 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
246 * and would be better off switching out of that while waiting for
247 * the lock. Several of the locks jumps back to ring-3 just to
248 * get the lock, the ring-3 code will then call the kernel to do
249 * the lock wait and when the call return it will call ring-0
250 * again and resume via in setjmp style. Not very efficient. */
251# if 0
252 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
253 * callers not prepared for longjmp/blocking to
254 * use PDMCritSectTryEnter. */
255 {
256 /*
257 * Leave HM context while waiting if necessary.
258 */
259 int rc;
260 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
261 {
262 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
263 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
264 }
265 else
266 {
267 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
268 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
269 PVMCPU pVCpu = VMMGetCpu(pVM);
270 HMR0Leave(pVM, pVCpu);
271 RTThreadPreemptRestore(NIL_RTTHREAD, ????);
272
273 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
274
275 RTThreadPreemptDisable(NIL_RTTHREAD, ????);
276 HMR0Enter(pVM, pVCpu);
277 }
278 return rc;
279 }
280# else
281 /*
282 * We preemption hasn't been disabled, we can block here in ring-0.
283 */
284 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
285 && ASMIntAreEnabled())
286 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
287# endif
288#endif /* IN_RING0 */
289
290 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
291
292 /*
293 * Call ring-3 to acquire the critical section?
294 */
295 if (rcBusy == VINF_SUCCESS)
296 {
297 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
298 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
299 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
300 }
301
302 /*
303 * Return busy.
304 */
305 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
306 return rcBusy;
307#endif /* !IN_RING3 */
308}
309
310
311/**
312 * Enters a PDM critical section.
313 *
314 * @returns VINF_SUCCESS if entered successfully.
315 * @returns rcBusy when encountering a busy critical section in RC/R0.
316 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
317 * during the operation.
318 *
319 * @param pCritSect The PDM critical section to enter.
320 * @param rcBusy The status code to return when we're in RC or R0
321 * and the section is busy. Pass VINF_SUCCESS to
322 * acquired the critical section thru a ring-3
323 * call if necessary.
324 */
325VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
326{
327#ifndef PDMCRITSECT_STRICT
328 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
329#else
330 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
331 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
332#endif
333}
334
335
336/**
337 * Enters a PDM critical section, with location information for debugging.
338 *
339 * @returns VINF_SUCCESS if entered successfully.
340 * @returns rcBusy when encountering a busy critical section in RC/R0.
341 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
342 * during the operation.
343 *
344 * @param pCritSect The PDM critical section to enter.
345 * @param rcBusy The status code to return when we're in RC or R0
346 * and the section is busy. Pass VINF_SUCCESS to
347 * acquired the critical section thru a ring-3
348 * call if necessary.
349 * @param uId Some kind of locking location ID. Typically a
350 * return address up the stack. Optional (0).
351 * @param pszFile The file where the lock is being acquired from.
352 * Optional.
353 * @param iLine The line number in that file. Optional (0).
354 * @param pszFunction The function where the lock is being acquired
355 * from. Optional.
356 */
357VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
358{
359#ifdef PDMCRITSECT_STRICT
360 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
361 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
362#else
363 NOREF(uId); RT_SRC_POS_NOREF();
364 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
365#endif
366}
367
368
369/**
370 * Common worker for the debug and normal APIs.
371 *
372 * @retval VINF_SUCCESS on success.
373 * @retval VERR_SEM_BUSY if the critsect was owned.
374 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
375 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
376 * during the operation.
377 *
378 * @param pCritSect The critical section.
379 */
380static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
381{
382 /*
383 * If the critical section has already been destroyed, then inform the caller.
384 */
385 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
386 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
387 VERR_SEM_DESTROYED);
388
389 /*
390 * See if we're lucky.
391 */
392 /* NOP ... */
393 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
394 return VINF_SUCCESS;
395
396 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
397 /* ... not owned ... */
398 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
399 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
400
401 /* ... or nested. */
402 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
403 {
404 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
405 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
406 Assert(pCritSect->s.Core.cNestings > 1);
407 return VINF_SUCCESS;
408 }
409
410 /* no spinning */
411
412 /*
413 * Return busy.
414 */
415#ifdef IN_RING3
416 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
417#else
418 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
419#endif
420 LogFlow(("PDMCritSectTryEnter: locked\n"));
421 return VERR_SEM_BUSY;
422}
423
424
425/**
426 * Try enter a critical section.
427 *
428 * @retval VINF_SUCCESS on success.
429 * @retval VERR_SEM_BUSY if the critsect was owned.
430 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
431 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
432 * during the operation.
433 *
434 * @param pCritSect The critical section.
435 */
436VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
437{
438#ifndef PDMCRITSECT_STRICT
439 return pdmCritSectTryEnter(pCritSect, NULL);
440#else
441 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
442 return pdmCritSectTryEnter(pCritSect, &SrcPos);
443#endif
444}
445
446
447/**
448 * Try enter a critical section, with location information for debugging.
449 *
450 * @retval VINF_SUCCESS on success.
451 * @retval VERR_SEM_BUSY if the critsect was owned.
452 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
453 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
454 * during the operation.
455 *
456 * @param pCritSect The critical section.
457 * @param uId Some kind of locking location ID. Typically a
458 * return address up the stack. Optional (0).
459 * @param pszFile The file where the lock is being acquired from.
460 * Optional.
461 * @param iLine The line number in that file. Optional (0).
462 * @param pszFunction The function where the lock is being acquired
463 * from. Optional.
464 */
465VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
466{
467#ifdef PDMCRITSECT_STRICT
468 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
469 return pdmCritSectTryEnter(pCritSect, &SrcPos);
470#else
471 NOREF(uId); RT_SRC_POS_NOREF();
472 return pdmCritSectTryEnter(pCritSect, NULL);
473#endif
474}
475
476
477#ifdef IN_RING3
478/**
479 * Enters a PDM critical section.
480 *
481 * @returns VINF_SUCCESS if entered successfully.
482 * @returns rcBusy when encountering a busy critical section in GC/R0.
483 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
484 * during the operation.
485 *
486 * @param pCritSect The PDM critical section to enter.
487 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
488 */
489VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
490{
491 int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED);
492 if ( rc == VINF_SUCCESS
493 && fCallRing3
494 && pCritSect->s.Core.pValidatorRec
495 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
496 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
497 return rc;
498}
499#endif /* IN_RING3 */
500
501
502/**
503 * Leaves a critical section entered with PDMCritSectEnter().
504 *
505 * @returns Indication whether we really exited the critical section.
506 * @retval VINF_SUCCESS if we really exited.
507 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
508 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
509 *
510 * @param pCritSect The PDM critical section to leave.
511 */
512VMMDECL(int) PDMCritSectLeave(PPDMCRITSECT pCritSect)
513{
514 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
515 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
516
517 /* Check for NOP sections before asserting ownership. */
518 if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
519 return VINF_SUCCESS;
520
521 /*
522 * Always check that the caller is the owner (screw performance).
523 */
524 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
525 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
526 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
527 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
528 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
529 VERR_NOT_OWNER);
530 Assert(pCritSect->s.Core.cNestings >= 1);
531
532 /*
533 * Nested leave.
534 */
535 if (pCritSect->s.Core.cNestings > 1)
536 {
537 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
538 Assert(pCritSect->s.Core.cNestings >= 1);
539 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
540 Assert(pCritSect->s.Core.cLockers >= 0);
541 return VINF_SEM_NESTED;
542 }
543
544#ifdef IN_RING0
545# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
546 if (1) /* SUPSemEventSignal is safe */
547# else
548 if (ASMIntAreEnabled())
549# endif
550#endif
551#if defined(IN_RING3) || defined(IN_RING0)
552 {
553 /*
554 * Leave for real.
555 */
556 /* update members. */
557# ifdef IN_RING3
558 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
559 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
560# if defined(PDMCRITSECT_STRICT)
561 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
562 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
563# endif
564 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
565# endif
566 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
567 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
568 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
569 Assert(pCritSect->s.Core.cNestings == 0);
570
571 /* stop and decrement lockers. */
572 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
573 ASMCompilerBarrier();
574 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
575 {
576 /* Someone is waiting, wake up one of them. */
577 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
578 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
579 int rc = SUPSemEventSignal(pSession, hEvent);
580 AssertRC(rc);
581 }
582
583# ifdef IN_RING3
584 /* Signal exit event. */
585 if (hEventToSignal != NIL_RTSEMEVENT)
586 {
587 LogBird(("Signalling %#x\n", hEventToSignal));
588 int rc = RTSemEventSignal(hEventToSignal);
589 AssertRC(rc);
590 }
591# endif
592
593# if defined(DEBUG_bird) && defined(IN_RING0)
594 VMMTrashVolatileXMMRegs();
595# endif
596 }
597#endif /* IN_RING3 || IN_RING0 */
598#ifdef IN_RING0
599 else
600#endif
601#if defined(IN_RING0) || defined(IN_RC)
602 {
603 /*
604 * Try leave it.
605 */
606 if (pCritSect->s.Core.cLockers == 0)
607 {
608 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
609 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
610 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
611 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
612
613 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
614 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
615 return VINF_SUCCESS;
616
617 /* darn, someone raced in on us. */
618 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
619 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
620 Assert(pCritSect->s.Core.cNestings == 0);
621 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
622 }
623 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
624
625 /*
626 * Queue the request.
627 */
628 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
629 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
630 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
631 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
632 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
633 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
634 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
635 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
636 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
637 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
638 }
639#endif /* IN_RING0 || IN_RC */
640
641 return VINF_SUCCESS;
642}
643
644
645/**
646 * Checks the caller is the owner of the critical section.
647 *
648 * @returns true if owner.
649 * @returns false if not owner.
650 * @param pCritSect The critical section.
651 */
652VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
653{
654#ifdef IN_RING3
655 return RTCritSectIsOwner(&pCritSect->s.Core);
656#else
657 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
658 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
659 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
660 return false;
661 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
662 || pCritSect->s.Core.cNestings > 1;
663#endif
664}
665
666
667/**
668 * Checks the specified VCPU is the owner of the critical section.
669 *
670 * @returns true if owner.
671 * @returns false if not owner.
672 * @param pCritSect The critical section.
673 * @param pVCpu Pointer to the VMCPU.
674 */
675VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPU pVCpu)
676{
677#ifdef IN_RING3
678 NOREF(pVCpu);
679 return RTCritSectIsOwner(&pCritSect->s.Core);
680#else
681 Assert(&pVCpu->CTX_SUFF(pVM)->aCpus[pVCpu->idCpu] == pVCpu);
682 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
683 return false;
684 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
685 || pCritSect->s.Core.cNestings > 1;
686#endif
687}
688
689
690/**
691 * Checks if anyone is waiting on the critical section we own.
692 *
693 * @returns true if someone is waiting.
694 * @returns false if no one is waiting.
695 * @param pCritSect The critical section.
696 */
697VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
698{
699 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
700 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
701 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
702}
703
704
705/**
706 * Checks if a critical section is initialized or not.
707 *
708 * @returns true if initialized.
709 * @returns false if not initialized.
710 * @param pCritSect The critical section.
711 */
712VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
713{
714 return RTCritSectIsInitialized(&pCritSect->s.Core);
715}
716
717
718/**
719 * Gets the recursion depth.
720 *
721 * @returns The recursion depth.
722 * @param pCritSect The critical section.
723 */
724VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
725{
726 return RTCritSectGetRecursion(&pCritSect->s.Core);
727}
728
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette