VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 92780

Last change on this file since 92780 was 92408, checked in by vboxsync, 3 years ago

VMM: Reworked most of the call-ring-3 stuff into setjmp-longjmp-on-assert and removed the stack switching/copying/resume code. bugref:10093 bugref:10124

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 45.9 KB
Line 
1/* $Id: PDMAllCritSect.cpp 92408 2021-11-12 21:49:06Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37#endif
38#if defined(IN_RING3) || defined(IN_RING0)
39# include <iprt/semaphore.h>
40#endif
41#ifdef IN_RING0
42# include <iprt/time.h>
43#endif
44#if defined(IN_RING3) || defined(IN_RING0)
45# include <iprt/thread.h>
46#endif
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52/** The number loops to spin for in ring-3. */
53#define PDMCRITSECT_SPIN_COUNT_R3 20
54/** The number loops to spin for in ring-0. */
55#define PDMCRITSECT_SPIN_COUNT_R0 256
56/** The number loops to spin for in the raw-mode context. */
57#define PDMCRITSECT_SPIN_COUNT_RC 256
58
59
60/** Skips some of the overly paranoid atomic updates.
61 * Makes some assumptions about cache coherence, though not brave enough not to
62 * always end with an atomic update. */
63#define PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
64
65/* Undefine the automatic VBOX_STRICT API mappings. */
66#undef PDMCritSectEnter
67#undef PDMCritSectTryEnter
68
69
70/**
71 * Gets the ring-3 native thread handle of the calling thread.
72 *
73 * @returns native thread handle (ring-3).
74 * @param pVM The cross context VM structure.
75 * @param pCritSect The critical section. This is used in R0 and RC.
76 */
77DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PVMCC pVM, PCPDMCRITSECT pCritSect)
78{
79#ifdef IN_RING3
80 RT_NOREF(pVM, pCritSect);
81 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
82
83#elif defined(IN_RING0)
84 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
85 NIL_RTNATIVETHREAD);
86 RTNATIVETHREAD hNativeSelf = GVMMR0GetRing3ThreadForSelf(pVM);
87 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
88
89#else
90# error "Invalid context"
91#endif
92 return hNativeSelf;
93}
94
95
96#ifdef IN_RING0
97/**
98 * Marks the critical section as corrupted.
99 */
100DECL_NO_INLINE(static, int) pdmCritSectCorrupted(PPDMCRITSECT pCritSect, const char *pszMsg)
101{
102 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_CORRUPTED);
103 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pCritSect));
104 return VERR_PDM_CRITSECT_IPE;
105}
106#endif
107
108
109/**
110 * Tail code called when we've won the battle for the lock.
111 *
112 * @returns VINF_SUCCESS.
113 *
114 * @param pCritSect The critical section.
115 * @param hNativeSelf The native handle of this thread.
116 * @param pSrcPos The source position of the lock operation.
117 */
118DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
119{
120 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
121 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
122 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
123
124# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
125 pCritSect->s.Core.cNestings = 1;
126# else
127 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
128# endif
129 Assert(pCritSect->s.Core.cNestings == 1);
130 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
131
132# ifdef PDMCRITSECT_STRICT
133 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
134# else
135 NOREF(pSrcPos);
136# endif
137 if (pSrcPos)
138 Log12Func(("%p: uId=%p ln=%u fn=%s\n", pCritSect, pSrcPos->uId, pSrcPos->uLine, pSrcPos->pszFunction));
139 else
140 Log12Func(("%p\n", pCritSect));
141
142 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
143 return VINF_SUCCESS;
144}
145
146
147#if defined(IN_RING3) || defined(IN_RING0)
148/**
149 * Deals with the contended case in ring-3 and ring-0.
150 *
151 * @retval VINF_SUCCESS on success.
152 * @retval VERR_SEM_DESTROYED if destroyed.
153 *
154 * @param pVM The cross context VM structure.
155 * @param pVCpu The cross context virtual CPU structure if ring-0 and on
156 * an EMT, otherwise NULL.
157 * @param pCritSect The critsect.
158 * @param hNativeSelf The native thread handle.
159 * @param pSrcPos The source position of the lock operation.
160 * @param rcBusy The status code to return when we're in RC or R0
161 */
162static int pdmR3R0CritSectEnterContended(PVMCC pVM, PVMCPU pVCpu, PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf,
163 PCRTLOCKVALSRCPOS pSrcPos, int rcBusy)
164{
165# ifdef IN_RING0
166 /*
167 * If we've got queued critical section leave operations and rcBusy isn't
168 * VINF_SUCCESS, return to ring-3 immediately to avoid deadlocks.
169 */
170 if ( !pVCpu
171 || !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)
172 || rcBusy == VINF_SUCCESS )
173 { /* likely */ }
174 else
175 {
176 /** @todo statistics. */
177 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
178 return rcBusy;
179 }
180# endif
181
182 /*
183 * Start waiting.
184 */
185 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
186 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
187# ifdef IN_RING3
188 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
189# else
190 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
191# endif
192
193 /*
194 * The wait loop.
195 *
196 * This handles VERR_TIMEOUT and VERR_INTERRUPTED.
197 */
198 STAM_REL_PROFILE_START(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
199 PSUPDRVSESSION const pSession = pVM->pSession;
200 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
201# ifdef IN_RING3
202# ifdef PDMCRITSECT_STRICT
203 RTTHREAD const hThreadSelf = RTThreadSelfAutoAdopt();
204 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
205 if (RT_FAILURE(rc2))
206 return rc2;
207# else
208 RTTHREAD const hThreadSelf = RTThreadSelf();
209# endif
210# else /* IN_RING0 */
211 uint64_t const tsStart = RTTimeNanoTS();
212 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
213 uint64_t cNsMaxTotal = cNsMaxTotalDef;
214 uint64_t const cNsMaxRetry = RT_NS_15SEC;
215 uint32_t cMsMaxOne = RT_MS_5SEC;
216 bool fNonInterruptible = false;
217# endif
218 for (;;)
219 {
220 /*
221 * Do the wait.
222 *
223 * In ring-3 this gets cluttered by lock validation and thread state
224 * maintainence.
225 *
226 * In ring-0 we have to deal with the possibility that the thread has
227 * been signalled and the interruptible wait function returning
228 * immediately. In that case we do normal R0/RC rcBusy handling.
229 *
230 * We always do a timed wait here, so the event handle is revalidated
231 * regularly and we won't end up stuck waiting for a destroyed critsect.
232 */
233 /** @todo Make SUPSemEventClose wake up all waiters. */
234# ifdef IN_RING3
235# ifdef PDMCRITSECT_STRICT
236 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
237 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
238 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
239 if (RT_FAILURE(rc9))
240 return rc9;
241# else
242 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
243# endif
244 int const rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
245 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
246# else /* IN_RING0 */
247 int const rc = !fNonInterruptible
248 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
249 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
250 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hOwner=%p)\n",
251 pCritSect, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pCritSect->s.Core.NativeThreadOwner));
252# endif /* IN_RING0 */
253
254 /*
255 * Make sure the critical section hasn't been delete before continuing.
256 */
257 if (RT_LIKELY(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC))
258 { /* likely */ }
259 else
260 {
261 LogRel(("PDMCritSectEnter: Destroyed while waiting; pCritSect=%p rc=%Rrc\n", pCritSect, rc));
262 return VERR_SEM_DESTROYED;
263 }
264
265 /*
266 * Most likely we're here because we got signalled.
267 */
268 if (rc == VINF_SUCCESS)
269 {
270 STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
271 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
272 }
273
274 /*
275 * Timeout and interrupted waits needs careful handling in ring-0
276 * because we're cooperating with ring-3 on this critical section
277 * and thus need to make absolutely sure we won't get stuck here.
278 *
279 * The r0 interrupted case means something is pending (termination,
280 * signal, APC, debugger, whatever), so we must try our best to
281 * return to the caller and to ring-3 so it can be dealt with.
282 */
283 if (RT_LIKELY(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED))
284 {
285# ifdef IN_RING0
286 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
287 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
288 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
289 ("rcTerm=%Rrc\n", rcTerm));
290 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
291 cNsMaxTotal = RT_NS_1MIN;
292
293 if (rc == VERR_TIMEOUT)
294 {
295 /* Try return get out of here with a non-VINF_SUCCESS status if
296 the thread is terminating or if the timeout has been exceeded. */
297 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrTimeout);
298 if ( rcTerm != VINF_THREAD_IS_TERMINATING
299 && cNsElapsed <= cNsMaxTotal)
300 continue;
301 }
302 else
303 {
304 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
305 we will try non-interruptible sleep for a while to help resolve the issue
306 w/o guru'ing. */
307 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrInterrupted);
308 if ( rcTerm != VINF_THREAD_IS_TERMINATING
309 && rcBusy == VINF_SUCCESS
310 && pVCpu != NULL
311 && cNsElapsed <= cNsMaxTotal)
312 {
313 if (!fNonInterruptible)
314 {
315 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectNonInterruptibleWaits);
316 fNonInterruptible = true;
317 cMsMaxOne = 32;
318 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
319 if (cNsLeft > RT_NS_10SEC)
320 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
321 }
322 continue;
323 }
324 }
325
326 /*
327 * Let try get out of here. We must very carefully undo the
328 * cLockers increment we did using compare-and-exchange so that
329 * we don't race the semaphore signalling in PDMCritSectLeave
330 * and end up with spurious wakeups and two owners at once.
331 */
332 uint32_t cNoIntWaits = 0;
333 uint32_t cCmpXchgs = 0;
334 int32_t cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
335 for (;;)
336 {
337 if (pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC)
338 {
339 if (cLockers > 0 && cCmpXchgs < _64M)
340 {
341 bool fRc = ASMAtomicCmpXchgExS32(&pCritSect->s.Core.cLockers, cLockers - 1, cLockers, &cLockers);
342 if (fRc)
343 {
344 LogFunc(("Aborting wait on %p (rc=%Rrc rcTerm=%Rrc cNsElapsed=%'RU64) -> %Rrc\n", pCritSect,
345 rc, rcTerm, cNsElapsed, rcBusy != VINF_SUCCESS ? rcBusy : rc));
346 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatAbortedCritSectEnters);
347 return rcBusy != VINF_SUCCESS ? rcBusy : rc;
348 }
349 cCmpXchgs++;
350 if ((cCmpXchgs & 0xffff) == 0)
351 Log11Func(("%p: cLockers=%d cCmpXchgs=%u (hOwner=%p)\n",
352 pCritSect, cLockers, cCmpXchgs, pCritSect->s.Core.NativeThreadOwner));
353 ASMNopPause();
354 continue;
355 }
356
357 if (cLockers == 0)
358 {
359 /*
360 * We are racing someone in PDMCritSectLeave.
361 *
362 * For the VERR_TIMEOUT case we'll just retry taking it the normal
363 * way for a while. For VERR_INTERRUPTED we're in for more fun as
364 * the previous owner might not have signalled the semaphore yet,
365 * so we'll do a short non-interruptible wait instead and then guru.
366 */
367 if ( rc == VERR_TIMEOUT
368 && RTTimeNanoTS() - tsStart <= cNsMaxTotal + cNsMaxRetry)
369 break;
370
371 if ( rc == VERR_INTERRUPTED
372 && ( cNoIntWaits == 0
373 || RTTimeNanoTS() - (tsStart + cNsElapsed) < RT_NS_100MS))
374 {
375 int const rc2 = SUPSemEventWait(pSession, hEvent, 1 /*ms*/);
376 if (rc2 == VINF_SUCCESS)
377 {
378 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectEntersWhileAborting);
379 STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
380 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
381 }
382 cNoIntWaits++;
383 cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
384 continue;
385 }
386 }
387 else
388 LogFunc(("Critical section %p has a broken cLockers count. Aborting.\n", pCritSect));
389
390 /* Sabotage the critical section and return error to caller. */
391 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_FAILED_ABORT);
392 LogRel(("PDMCritSectEnter: Failed to abort wait on pCritSect=%p (rc=%Rrc rcTerm=%Rrc)\n",
393 pCritSect, rc, rcTerm));
394 return VERR_PDM_CRITSECT_ABORT_FAILED;
395 }
396 LogRel(("PDMCritSectEnter: Destroyed while aborting wait; pCritSect=%p/%#x rc=%Rrc rcTerm=%Rrc\n",
397 pCritSect, pCritSect->s.Core.u32Magic, rc, rcTerm));
398 return VERR_SEM_DESTROYED;
399 }
400
401 /* We get here if we timed out. Just retry now that it
402 appears someone left already. */
403 Assert(rc == VERR_TIMEOUT);
404 cMsMaxOne = 10 /*ms*/;
405
406# else /* IN_RING3 */
407 RT_NOREF(pVM, pVCpu, rcBusy);
408# endif /* IN_RING3 */
409 }
410 /*
411 * Any other return code is fatal.
412 */
413 else
414 {
415 AssertMsgFailed(("rc=%Rrc\n", rc));
416 return RT_FAILURE_NP(rc) ? rc : -rc;
417 }
418 }
419 /* won't get here */
420}
421#endif /* IN_RING3 || IN_RING0 */
422
423
424/**
425 * Common worker for the debug and normal APIs.
426 *
427 * @returns VINF_SUCCESS if entered successfully.
428 * @returns rcBusy when encountering a busy critical section in RC/R0.
429 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
430 * during the operation.
431 *
432 * @param pVM The cross context VM structure.
433 * @param pCritSect The PDM critical section to enter.
434 * @param rcBusy The status code to return when we're in RC or R0
435 * @param pSrcPos The source position of the lock operation.
436 */
437DECL_FORCE_INLINE(int) pdmCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
438{
439 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
440 Assert(pCritSect->s.Core.cNestings >= 0);
441#if defined(VBOX_STRICT) && defined(IN_RING0)
442 /* Hope we're not messing with critical sections while in the no-block
443 zone, that would complicate things a lot. */
444 PVMCPUCC pVCpuAssert = VMMGetCpu(pVM);
445 Assert(pVCpuAssert && VMMRZCallRing3IsEnabled(pVCpuAssert));
446#endif
447
448 /*
449 * If the critical section has already been destroyed, then inform the caller.
450 */
451 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
452 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
453 VERR_SEM_DESTROYED);
454
455 /*
456 * See if we're lucky.
457 */
458 /* NOP ... */
459 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
460 { /* We're more likely to end up here with real critsects than a NOP one. */ }
461 else
462 return VINF_SUCCESS;
463
464 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
465 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
466 /* ... not owned ... */
467 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
468 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
469
470 /* ... or nested. */
471 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
472 {
473 Assert(pCritSect->s.Core.cNestings >= 1);
474# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
475 pCritSect->s.Core.cNestings += 1;
476# else
477 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
478# endif
479 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
480 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
481 return VINF_SUCCESS;
482 }
483
484 /*
485 * Spin for a bit without incrementing the counter.
486 */
487 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
488 * cpu systems. */
489 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
490 while (cSpinsLeft-- > 0)
491 {
492 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
493 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
494 ASMNopPause();
495 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
496 cli'ed pendingpreemption check up front using sti w/ instruction fusing
497 for avoiding races. Hmm ... This is assuming the other party is actually
498 executing code on another CPU ... which we could keep track of if we
499 wanted. */
500 }
501
502#ifdef IN_RING3
503 /*
504 * Take the slow path.
505 */
506 NOREF(rcBusy);
507 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
508
509#elif defined(IN_RING0)
510# if 1 /* new code */
511 /*
512 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
513 * account when waiting on contended locks.
514 *
515 * While we usually (it can be VINF_SUCCESS) have the option of returning
516 * rcBusy and force the caller to go back to ring-3 and to re-start the work
517 * there, it's almost always more efficient to try wait for the lock here.
518 * The rcBusy will be used if we encounter an VERR_INTERRUPTED situation
519 * though.
520 */
521 PVMCPUCC pVCpu = VMMGetCpu(pVM);
522 if (pVCpu)
523 {
524 VMMR0EMTBLOCKCTX Ctx;
525 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
526 if (rc == VINF_SUCCESS)
527 {
528 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
529
530 rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy);
531
532 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
533 }
534 else
535 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
536 return rc;
537 }
538
539 /* Non-EMT. */
540 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
541 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
542
543# else /* old code: */
544 /*
545 * We preemption hasn't been disabled, we can block here in ring-0.
546 */
547 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
548 && ASMIntAreEnabled())
549 return pdmR3R0CritSectEnterContended(pVM, VMMGetCpu(pVM), pCritSect, hNativeSelf, pSrcPos, rcBusy);
550
551 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
552
553 /*
554 * Call ring-3 to acquire the critical section?
555 */
556 if (rcBusy == VINF_SUCCESS)
557 {
558 PVMCPUCC pVCpu = VMMGetCpu(pVM);
559 AssertReturn(pVCpu, VERR_PDM_CRITSECT_IPE);
560 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
561 }
562
563 /*
564 * Return busy.
565 */
566 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
567 return rcBusy;
568# endif /* old code */
569#else
570# error "Unsupported context"
571#endif
572}
573
574
575/**
576 * Enters a PDM critical section.
577 *
578 * @returns VINF_SUCCESS if entered successfully.
579 * @returns rcBusy when encountering a busy critical section in RC/R0.
580 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
581 * during the operation.
582 *
583 * @param pVM The cross context VM structure.
584 * @param pCritSect The PDM critical section to enter.
585 * @param rcBusy The status code to return when we're in RC or R0
586 * and the section is busy. Pass VINF_SUCCESS to
587 * acquired the critical section thru a ring-3
588 * call if necessary.
589 *
590 * @note Even callers setting @a rcBusy to VINF_SUCCESS must either handle
591 * possible failures in ring-0 or apply
592 * PDM_CRITSECT_RELEASE_ASSERT_RC(),
593 * PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(),
594 * PDM_CRITSECT_RELEASE_ASSERT_RC_DRV() or
595 * PDM_CRITSECT_RELEASE_ASSERT_RC_USB() to the return value of this
596 * function.
597 */
598VMMDECL(DECL_CHECK_RETURN_NOT_R3(int)) PDMCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy)
599{
600#ifndef PDMCRITSECT_STRICT
601 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
602#else
603 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
604 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
605#endif
606}
607
608
609/**
610 * Enters a PDM critical section, with location information for debugging.
611 *
612 * @returns VINF_SUCCESS if entered successfully.
613 * @returns rcBusy when encountering a busy critical section in RC/R0.
614 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
615 * during the operation.
616 *
617 * @param pVM The cross context VM structure.
618 * @param pCritSect The PDM critical section to enter.
619 * @param rcBusy The status code to return when we're in RC or R0
620 * and the section is busy. Pass VINF_SUCCESS to
621 * acquired the critical section thru a ring-3
622 * call if necessary.
623 * @param uId Some kind of locking location ID. Typically a
624 * return address up the stack. Optional (0).
625 * @param SRC_POS The source position where to lock is being
626 * acquired from. Optional.
627 */
628VMMDECL(DECL_CHECK_RETURN_NOT_R3(int))
629PDMCritSectEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
630{
631#ifdef PDMCRITSECT_STRICT
632 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
633 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
634#else
635 NOREF(uId); RT_SRC_POS_NOREF();
636 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
637#endif
638}
639
640
641/**
642 * Common worker for the debug and normal APIs.
643 *
644 * @retval VINF_SUCCESS on success.
645 * @retval VERR_SEM_BUSY if the critsect was owned.
646 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
647 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
648 * during the operation.
649 *
650 * @param pVM The cross context VM structure.
651 * @param pCritSect The critical section.
652 * @param pSrcPos The source position of the lock operation.
653 */
654static int pdmCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
655{
656 /*
657 * If the critical section has already been destroyed, then inform the caller.
658 */
659 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
660 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
661 VERR_SEM_DESTROYED);
662
663 /*
664 * See if we're lucky.
665 */
666 /* NOP ... */
667 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
668 { /* We're more likely to end up here with real critsects than a NOP one. */ }
669 else
670 return VINF_SUCCESS;
671
672 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
673 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
674 /* ... not owned ... */
675 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
676 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
677
678 /* ... or nested. */
679 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
680 {
681 Assert(pCritSect->s.Core.cNestings >= 1);
682# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
683 pCritSect->s.Core.cNestings += 1;
684# else
685 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
686# endif
687 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
688 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
689 return VINF_SUCCESS;
690 }
691
692 /* no spinning */
693
694 /*
695 * Return busy.
696 */
697#ifdef IN_RING3
698 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
699#else
700 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
701#endif
702 LogFlow(("PDMCritSectTryEnter: locked\n"));
703 return VERR_SEM_BUSY;
704}
705
706
707/**
708 * Try enter a critical section.
709 *
710 * @retval VINF_SUCCESS on success.
711 * @retval VERR_SEM_BUSY if the critsect was owned.
712 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
713 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
714 * during the operation.
715 *
716 * @param pVM The cross context VM structure.
717 * @param pCritSect The critical section.
718 */
719VMMDECL(DECL_CHECK_RETURN(int)) PDMCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect)
720{
721#ifndef PDMCRITSECT_STRICT
722 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
723#else
724 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
725 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
726#endif
727}
728
729
730/**
731 * Try enter a critical section, with location information for debugging.
732 *
733 * @retval VINF_SUCCESS on success.
734 * @retval VERR_SEM_BUSY if the critsect was owned.
735 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
736 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
737 * during the operation.
738 *
739 * @param pVM The cross context VM structure.
740 * @param pCritSect The critical section.
741 * @param uId Some kind of locking location ID. Typically a
742 * return address up the stack. Optional (0).
743 * @param SRC_POS The source position where to lock is being
744 * acquired from. Optional.
745 */
746VMMDECL(DECL_CHECK_RETURN(int))
747PDMCritSectTryEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
748{
749#ifdef PDMCRITSECT_STRICT
750 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
751 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
752#else
753 NOREF(uId); RT_SRC_POS_NOREF();
754 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
755#endif
756}
757
758
759#ifdef IN_RING3
760/**
761 * Enters a PDM critical section.
762 *
763 * @returns VINF_SUCCESS if entered successfully.
764 * @returns rcBusy when encountering a busy critical section in GC/R0.
765 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
766 * during the operation.
767 *
768 * @param pVM The cross context VM structure.
769 * @param pCritSect The PDM critical section to enter.
770 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
771 */
772VMMR3DECL(int) PDMR3CritSectEnterEx(PVM pVM, PPDMCRITSECT pCritSect, bool fCallRing3)
773{
774 int rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
775 if ( rc == VINF_SUCCESS
776 && fCallRing3
777 && pCritSect->s.Core.pValidatorRec
778 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
779 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
780 return rc;
781}
782#endif /* IN_RING3 */
783
784
785/**
786 * Leaves a critical section entered with PDMCritSectEnter().
787 *
788 * @returns Indication whether we really exited the critical section.
789 * @retval VINF_SUCCESS if we really exited.
790 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
791 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
792 *
793 * @param pVM The cross context VM structure.
794 * @param pCritSect The PDM critical section to leave.
795 *
796 * @remarks Can be called from no-ring-3-call context in ring-0 (TM/VirtualSync)
797 * where we'll queue leaving operation for ring-3 processing.
798 */
799VMMDECL(int) PDMCritSectLeave(PVMCC pVM, PPDMCRITSECT pCritSect)
800{
801 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
802 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
803
804 /*
805 * Check for NOP sections before asserting ownership.
806 */
807 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
808 { /* We're more likely to end up here with real critsects than a NOP one. */ }
809 else
810 return VINF_SUCCESS;
811
812 /*
813 * Always check that the caller is the owner (screw performance).
814 */
815 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
816 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, pCritSect->s.Core.NativeThreadOwner == hNativeSelf && hNativeSelf != NIL_RTNATIVETHREAD,
817 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
818 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
819 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
820 VERR_NOT_OWNER);
821
822 /*
823 * Nested leave.
824 */
825 int32_t const cNestings = pCritSect->s.Core.cNestings;
826 Assert(cNestings >= 1);
827 if (cNestings > 1)
828 {
829#ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
830 pCritSect->s.Core.cNestings = cNestings - 1;
831#else
832 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
833#endif
834 int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
835 Assert(cLockers >= 0); RT_NOREF(cLockers);
836 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, cNestings - 1, cLockers));
837 return VINF_SEM_NESTED;
838 }
839
840 Log12Func(("%p: cNestings=%d cLockers=%d hOwner=%p - leave for real\n",
841 pCritSect, cNestings, pCritSect->s.Core.cLockers, pCritSect->s.Core.NativeThreadOwner));
842
843#ifdef IN_RING3
844 /*
845 * Ring-3: Leave for real.
846 */
847 SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
848 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
849
850# if defined(PDMCRITSECT_STRICT)
851 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
852 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
853# endif
854 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
855
856# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
857 //pCritSect->s.Core.cNestings = 0; /* not really needed */
858 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
859# else
860 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
861 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
862# endif
863 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
864
865 /* Stop profiling and decrement lockers. */
866 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
867 ASMCompilerBarrier();
868 int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
869 if (cLockers < 0)
870 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
871 else
872 {
873 /* Someone is waiting, wake up one of them. */
874 Assert(cLockers < _8K);
875 Log8(("PDMCritSectLeave: Waking up %p (cLockers=%u)\n", pCritSect, cLockers));
876 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
877 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
878 AssertRC(rc);
879 }
880
881 /* Signal exit event. */
882 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
883 { /* likely */ }
884 else
885 {
886 Log8(("PDMCritSectLeave: Signalling %#p (%p)\n", hEventToSignal, pCritSect));
887 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
888 AssertRC(rc);
889 }
890
891 return VINF_SUCCESS;
892
893
894#elif defined(IN_RING0)
895 /*
896 * Ring-0: Try leave for real, depends on host and context.
897 */
898 SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
899 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
900 PVMCPUCC pVCpu = VMMGetCpu(pVM);
901 bool fQueueOnTrouble = false; /* Set this to true to test queueing. */
902 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
903 || VMMRZCallRing3IsEnabled(pVCpu)
904 || RTSemEventIsSignalSafe()
905 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
906 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
907 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
908 || (fQueueOnTrouble = ( hEventToSignal == NIL_SUPSEMEVENT
909 && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) == 0)) )
910 {
911 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
912
913# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
914 //pCritSect->s.Core.cNestings = 0; /* not really needed */
915 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
916# else
917 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
918 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
919# endif
920 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
921
922 /*
923 * Stop profiling and decrement lockers.
924 */
925 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
926 ASMCompilerBarrier();
927
928 bool fQueueIt = false;
929 int32_t cLockers;
930 if (!fQueueOnTrouble)
931 cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
932 else
933 {
934 cLockers = -1;
935 if (!ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
936 fQueueIt = true;
937 }
938 if (!fQueueIt)
939 {
940 VMMR0EMTBLOCKCTX Ctx;
941 bool fLeaveCtx = false;
942 if (cLockers < 0)
943 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
944 else
945 {
946 /* Someone is waiting, wake up one of them. */
947 Assert(cLockers < _8K);
948 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
949 if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
950 {
951 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
952 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
953 fLeaveCtx = true;
954 }
955 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
956 AssertRC(rc);
957 }
958
959 /*
960 * Signal exit event.
961 */
962 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
963 { /* likely */ }
964 else
965 {
966 if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
967 {
968 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
969 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
970 fLeaveCtx = true;
971 }
972 Log8(("Signalling %#p\n", hEventToSignal));
973 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
974 AssertRC(rc);
975 }
976
977 /*
978 * Restore HM context if needed.
979 */
980 if (!fLeaveCtx)
981 { /* contention should be unlikely */ }
982 else
983 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
984
985# ifdef DEBUG_bird
986 VMMTrashVolatileXMMRegs();
987# endif
988 return VINF_SUCCESS;
989 }
990
991 /*
992 * Darn, someone raced in on us. Restore the state (this works only
993 * because the semaphore is effectively controlling ownership).
994 */
995 bool fRc;
996 RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
997 ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
998 AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
999 pdmCritSectCorrupted(pCritSect, "owner race"));
1000 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
1001# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1002 //pCritSect->s.Core.cNestings = 1;
1003 Assert(pCritSect->s.Core.cNestings == 1);
1004# else
1005 //Assert(pCritSect->s.Core.cNestings == 0);
1006 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
1007# endif
1008 Assert(hEventToSignal == NIL_SUPSEMEVENT);
1009 }
1010
1011
1012#else /* IN_RC */
1013 /*
1014 * Raw-mode: Try leave it.
1015 */
1016# error "This context is not use..."
1017 if (pCritSect->s.Core.cLockers == 0)
1018 {
1019# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1020 //pCritSect->s.Core.cNestings = 0; /* not really needed */
1021# else
1022 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
1023# endif
1024 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1025 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
1026
1027 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
1028 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
1029 return VINF_SUCCESS;
1030
1031 /*
1032 * Darn, someone raced in on us. Restore the state (this works only
1033 * because the semaphore is effectively controlling ownership).
1034 */
1035 bool fRc;
1036 RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
1037 ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
1038 AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
1039 pdmCritSectCorrupted(pCritSect, "owner race"));
1040 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
1041# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1042 //pCritSect->s.Core.cNestings = 1;
1043 Assert(pCritSect->s.Core.cNestings == 1);
1044# else
1045 //Assert(pCritSect->s.Core.cNestings == 0);
1046 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
1047# endif
1048 }
1049#endif /* IN_RC */
1050
1051
1052#ifndef IN_RING3
1053 /*
1054 * Ring-0/raw-mode: Unable to leave. Queue the leave for ring-3.
1055 */
1056 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1057# ifndef IN_RING0
1058 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1059# endif
1060 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
1061 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
1062 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves), ("%d\n", i), VERR_PDM_CRITSECT_IPE);
1063 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = pCritSect->s.pSelfR3;
1064 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1065 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectLeaves[i])
1066 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectLeaves[i] & PAGE_OFFSET_MASK)
1067 == ((uintptr_t)pCritSect & PAGE_OFFSET_MASK),
1068 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectLeaves[i], pCritSect),
1069 pdmCritSectCorrupted(pCritSect, "Invalid pSelfR3 value"));
1070 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); /** @todo handle VMCPU_FF_PDM_CRITSECT in ring-0 outside the no-call-ring-3 part. */
1071 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* unnecessary paranoia */
1072 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1073 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
1074
1075 return VINF_SUCCESS;
1076#endif /* IN_RING3 */
1077}
1078
1079
1080#if defined(IN_RING0) || defined(IN_RING3)
1081/**
1082 * Schedule a event semaphore for signalling upon critsect exit.
1083 *
1084 * @returns VINF_SUCCESS on success.
1085 * @returns VERR_TOO_MANY_SEMAPHORES if an event was already scheduled.
1086 * @returns VERR_NOT_OWNER if we're not the critsect owner (ring-3 only).
1087 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
1088 *
1089 * @param pCritSect The critical section.
1090 * @param hEventToSignal The support driver event semaphore that should be
1091 * signalled.
1092 */
1093VMMDECL(int) PDMHCCritSectScheduleExitEvent(PPDMCRITSECT pCritSect, SUPSEMEVENT hEventToSignal)
1094{
1095 AssertPtr(pCritSect);
1096 Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
1097 Assert(hEventToSignal != NIL_SUPSEMEVENT);
1098# ifdef IN_RING3
1099 if (RT_UNLIKELY(!RTCritSectIsOwner(&pCritSect->s.Core)))
1100 return VERR_NOT_OWNER;
1101# endif
1102 if (RT_LIKELY( pCritSect->s.hEventToSignal == NIL_RTSEMEVENT
1103 || pCritSect->s.hEventToSignal == hEventToSignal))
1104 {
1105 pCritSect->s.hEventToSignal = hEventToSignal;
1106 return VINF_SUCCESS;
1107 }
1108 return VERR_TOO_MANY_SEMAPHORES;
1109}
1110#endif /* IN_RING0 || IN_RING3 */
1111
1112
1113/**
1114 * Checks the caller is the owner of the critical section.
1115 *
1116 * @returns true if owner.
1117 * @returns false if not owner.
1118 * @param pVM The cross context VM structure.
1119 * @param pCritSect The critical section.
1120 */
1121VMMDECL(bool) PDMCritSectIsOwner(PVMCC pVM, PCPDMCRITSECT pCritSect)
1122{
1123#ifdef IN_RING3
1124 RT_NOREF(pVM);
1125 return RTCritSectIsOwner(&pCritSect->s.Core);
1126#else
1127 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1128 if ( !pVCpu
1129 || pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
1130 return false;
1131 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
1132 || pCritSect->s.Core.cNestings > 1;
1133#endif
1134}
1135
1136
1137/**
1138 * Checks the specified VCPU is the owner of the critical section.
1139 *
1140 * @returns true if owner.
1141 * @returns false if not owner.
1142 * @param pVCpu The cross context virtual CPU structure.
1143 * @param pCritSect The critical section.
1144 */
1145VMMDECL(bool) PDMCritSectIsOwnerEx(PVMCPUCC pVCpu, PCPDMCRITSECT pCritSect)
1146{
1147#ifdef IN_RING3
1148 NOREF(pVCpu);
1149 return RTCritSectIsOwner(&pCritSect->s.Core);
1150#else
1151 Assert(VMCC_GET_CPU(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu) == pVCpu);
1152 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
1153 return false;
1154 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
1155 || pCritSect->s.Core.cNestings > 1;
1156#endif
1157}
1158
1159
1160/**
1161 * Checks if anyone is waiting on the critical section we own.
1162 *
1163 * @returns true if someone is waiting.
1164 * @returns false if no one is waiting.
1165 * @param pVM The cross context VM structure.
1166 * @param pCritSect The critical section.
1167 */
1168VMMDECL(bool) PDMCritSectHasWaiters(PVMCC pVM, PCPDMCRITSECT pCritSect)
1169{
1170 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
1171 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pVM, pCritSect)); RT_NOREF(pVM);
1172 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
1173}
1174
1175
1176/**
1177 * Checks if a critical section is initialized or not.
1178 *
1179 * @returns true if initialized.
1180 * @returns false if not initialized.
1181 * @param pCritSect The critical section.
1182 */
1183VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
1184{
1185 return RTCritSectIsInitialized(&pCritSect->s.Core);
1186}
1187
1188
1189/**
1190 * Gets the recursion depth.
1191 *
1192 * @returns The recursion depth.
1193 * @param pCritSect The critical section.
1194 */
1195VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
1196{
1197 return RTCritSectGetRecursion(&pCritSect->s.Core);
1198}
1199
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette