VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 99813

Last change on this file since 99813 was 98743, checked in by vboxsync, 22 months ago

VMM/PDMAllCritSect: comment typo

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.1 KB
Line 
1/* $Id: PDMAllCritSect.cpp 98743 2023-02-26 14:08:49Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
33#include "PDMInternal.h"
34#include <VBox/vmm/pdmcritsect.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/err.h>
39#include <VBox/vmm/hm.h>
40
41#include <VBox/log.h>
42#include <iprt/asm.h>
43#include <iprt/assert.h>
44#ifdef IN_RING3
45# include <iprt/lockvalidator.h>
46#endif
47#if defined(IN_RING3) || defined(IN_RING0)
48# include <iprt/semaphore.h>
49#endif
50#ifdef IN_RING0
51# include <iprt/time.h>
52#endif
53#if defined(IN_RING3) || defined(IN_RING0)
54# include <iprt/thread.h>
55#endif
56
57
58/*********************************************************************************************************************************
59* Defined Constants And Macros *
60*********************************************************************************************************************************/
61/** The number loops to spin for in ring-3. */
62#define PDMCRITSECT_SPIN_COUNT_R3 20
63/** The number loops to spin for in ring-0. */
64#define PDMCRITSECT_SPIN_COUNT_R0 256
65/** The number loops to spin for in the raw-mode context. */
66#define PDMCRITSECT_SPIN_COUNT_RC 256
67
68
69/** Skips some of the overly paranoid atomic updates.
70 * Makes some assumptions about cache coherence, though not brave enough not to
71 * always end with an atomic update. */
72#define PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
73
74/* Undefine the automatic VBOX_STRICT API mappings. */
75#undef PDMCritSectEnter
76#undef PDMCritSectTryEnter
77
78
79/**
80 * Gets the ring-3 native thread handle of the calling thread.
81 *
82 * @returns native thread handle (ring-3).
83 * @param pVM The cross context VM structure.
84 * @param pCritSect The critical section. This is used in R0 and RC.
85 */
86DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PVMCC pVM, PCPDMCRITSECT pCritSect)
87{
88#ifdef IN_RING3
89 RT_NOREF(pVM, pCritSect);
90 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
91
92#elif defined(IN_RING0)
93 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
94 NIL_RTNATIVETHREAD);
95 RTNATIVETHREAD hNativeSelf = GVMMR0GetRing3ThreadForSelf(pVM);
96 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
97
98#else
99# error "Invalid context"
100#endif
101 return hNativeSelf;
102}
103
104
105#ifdef IN_RING0
106/**
107 * Marks the critical section as corrupted.
108 */
109DECL_NO_INLINE(static, int) pdmCritSectCorrupted(PPDMCRITSECT pCritSect, const char *pszMsg)
110{
111 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_CORRUPTED);
112 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pCritSect));
113 return VERR_PDM_CRITSECT_IPE;
114}
115#endif
116
117
118/**
119 * Tail code called when we've won the battle for the lock.
120 *
121 * @returns VINF_SUCCESS.
122 *
123 * @param pCritSect The critical section.
124 * @param hNativeSelf The native handle of this thread.
125 * @param pSrcPos The source position of the lock operation.
126 */
127DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
128{
129 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
130 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
131 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
132
133# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
134 pCritSect->s.Core.cNestings = 1;
135# else
136 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
137# endif
138 Assert(pCritSect->s.Core.cNestings == 1);
139 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
140
141# ifdef PDMCRITSECT_STRICT
142 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
143# else
144 NOREF(pSrcPos);
145# endif
146 if (pSrcPos)
147 Log12Func(("%p: uId=%p ln=%u fn=%s\n", pCritSect, pSrcPos->uId, pSrcPos->uLine, pSrcPos->pszFunction));
148 else
149 Log12Func(("%p\n", pCritSect));
150
151 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
152 return VINF_SUCCESS;
153}
154
155
156#if defined(IN_RING3) || defined(IN_RING0)
157/**
158 * Deals with the contended case in ring-3 and ring-0.
159 *
160 * @retval VINF_SUCCESS on success.
161 * @retval VERR_SEM_DESTROYED if destroyed.
162 *
163 * @param pVM The cross context VM structure.
164 * @param pVCpu The cross context virtual CPU structure if ring-0 and on
165 * an EMT, otherwise NULL.
166 * @param pCritSect The critsect.
167 * @param hNativeSelf The native thread handle.
168 * @param pSrcPos The source position of the lock operation.
169 * @param rcBusy The status code to return when we're in RC or R0
170 */
171static int pdmR3R0CritSectEnterContended(PVMCC pVM, PVMCPU pVCpu, PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf,
172 PCRTLOCKVALSRCPOS pSrcPos, int rcBusy)
173{
174# ifdef IN_RING0
175 /*
176 * If we've got queued critical section leave operations and rcBusy isn't
177 * VINF_SUCCESS, return to ring-3 immediately to avoid deadlocks.
178 */
179 if ( !pVCpu
180 || !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)
181 || rcBusy == VINF_SUCCESS )
182 { /* likely */ }
183 else
184 {
185 /** @todo statistics. */
186 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
187 return rcBusy;
188 }
189# endif
190
191 /*
192 * Start waiting.
193 */
194 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
195 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
196# ifdef IN_RING3
197 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
198# else
199 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
200# endif
201
202 /*
203 * The wait loop.
204 *
205 * This handles VERR_TIMEOUT and VERR_INTERRUPTED.
206 */
207 STAM_REL_PROFILE_START(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
208 PSUPDRVSESSION const pSession = pVM->pSession;
209 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
210# ifdef IN_RING3
211# ifdef PDMCRITSECT_STRICT
212 RTTHREAD const hThreadSelf = RTThreadSelfAutoAdopt();
213 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
214 if (RT_FAILURE(rc2))
215 return rc2;
216# else
217 RTTHREAD const hThreadSelf = RTThreadSelf();
218# endif
219# else /* IN_RING0 */
220 uint64_t const tsStart = RTTimeNanoTS();
221 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
222 uint64_t cNsMaxTotal = cNsMaxTotalDef;
223 uint64_t const cNsMaxRetry = RT_NS_15SEC;
224 uint32_t cMsMaxOne = RT_MS_5SEC;
225 bool fNonInterruptible = false;
226# endif
227 for (;;)
228 {
229 /*
230 * Do the wait.
231 *
232 * In ring-3 this gets cluttered by lock validation and thread state
233 * maintenance.
234 *
235 * In ring-0 we have to deal with the possibility that the thread has
236 * been signalled and the interruptible wait function returning
237 * immediately. In that case we do normal R0/RC rcBusy handling.
238 *
239 * We always do a timed wait here, so the event handle is revalidated
240 * regularly and we won't end up stuck waiting for a destroyed critsect.
241 */
242 /** @todo Make SUPSemEventClose wake up all waiters. */
243# ifdef IN_RING3
244# ifdef PDMCRITSECT_STRICT
245 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
246 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
247 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
248 if (RT_FAILURE(rc9))
249 return rc9;
250# else
251 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
252# endif
253 int const rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
254 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
255# else /* IN_RING0 */
256 int const rc = !fNonInterruptible
257 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
258 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
259 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hOwner=%p)\n",
260 pCritSect, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pCritSect->s.Core.NativeThreadOwner));
261# endif /* IN_RING0 */
262
263 /*
264 * Make sure the critical section hasn't been delete before continuing.
265 */
266 if (RT_LIKELY(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC))
267 { /* likely */ }
268 else
269 {
270 LogRel(("PDMCritSectEnter: Destroyed while waiting; pCritSect=%p rc=%Rrc\n", pCritSect, rc));
271 return VERR_SEM_DESTROYED;
272 }
273
274 /*
275 * Most likely we're here because we got signalled.
276 */
277 if (rc == VINF_SUCCESS)
278 {
279 STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
280 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
281 }
282
283 /*
284 * Timeout and interrupted waits needs careful handling in ring-0
285 * because we're cooperating with ring-3 on this critical section
286 * and thus need to make absolutely sure we won't get stuck here.
287 *
288 * The r0 interrupted case means something is pending (termination,
289 * signal, APC, debugger, whatever), so we must try our best to
290 * return to the caller and to ring-3 so it can be dealt with.
291 */
292 if (RT_LIKELY(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED))
293 {
294# ifdef IN_RING0
295 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
296 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
297 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
298 ("rcTerm=%Rrc\n", rcTerm));
299 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
300 cNsMaxTotal = RT_NS_1MIN;
301
302 if (rc == VERR_TIMEOUT)
303 {
304 /* Try return get out of here with a non-VINF_SUCCESS status if
305 the thread is terminating or if the timeout has been exceeded. */
306 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrTimeout);
307 if ( rcTerm != VINF_THREAD_IS_TERMINATING
308 && cNsElapsed <= cNsMaxTotal)
309 continue;
310 }
311 else
312 {
313 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
314 we will try non-interruptible sleep for a while to help resolve the issue
315 w/o guru'ing. */
316 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrInterrupted);
317 if ( rcTerm != VINF_THREAD_IS_TERMINATING
318 && rcBusy == VINF_SUCCESS
319 && pVCpu != NULL
320 && cNsElapsed <= cNsMaxTotal)
321 {
322 if (!fNonInterruptible)
323 {
324 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectNonInterruptibleWaits);
325 fNonInterruptible = true;
326 cMsMaxOne = 32;
327 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
328 if (cNsLeft > RT_NS_10SEC)
329 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
330 }
331 continue;
332 }
333 }
334
335 /*
336 * Let try get out of here. We must very carefully undo the
337 * cLockers increment we did using compare-and-exchange so that
338 * we don't race the semaphore signalling in PDMCritSectLeave
339 * and end up with spurious wakeups and two owners at once.
340 */
341 uint32_t cNoIntWaits = 0;
342 uint32_t cCmpXchgs = 0;
343 int32_t cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
344 for (;;)
345 {
346 if (pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC)
347 {
348 if (cLockers > 0 && cCmpXchgs < _64M)
349 {
350 bool fRc = ASMAtomicCmpXchgExS32(&pCritSect->s.Core.cLockers, cLockers - 1, cLockers, &cLockers);
351 if (fRc)
352 {
353 LogFunc(("Aborting wait on %p (rc=%Rrc rcTerm=%Rrc cNsElapsed=%'RU64) -> %Rrc\n", pCritSect,
354 rc, rcTerm, cNsElapsed, rcBusy != VINF_SUCCESS ? rcBusy : rc));
355 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatAbortedCritSectEnters);
356 return rcBusy != VINF_SUCCESS ? rcBusy : rc;
357 }
358 cCmpXchgs++;
359 if ((cCmpXchgs & 0xffff) == 0)
360 Log11Func(("%p: cLockers=%d cCmpXchgs=%u (hOwner=%p)\n",
361 pCritSect, cLockers, cCmpXchgs, pCritSect->s.Core.NativeThreadOwner));
362 ASMNopPause();
363 continue;
364 }
365
366 if (cLockers == 0)
367 {
368 /*
369 * We are racing someone in PDMCritSectLeave.
370 *
371 * For the VERR_TIMEOUT case we'll just retry taking it the normal
372 * way for a while. For VERR_INTERRUPTED we're in for more fun as
373 * the previous owner might not have signalled the semaphore yet,
374 * so we'll do a short non-interruptible wait instead and then guru.
375 */
376 if ( rc == VERR_TIMEOUT
377 && RTTimeNanoTS() - tsStart <= cNsMaxTotal + cNsMaxRetry)
378 break;
379
380 if ( rc == VERR_INTERRUPTED
381 && ( cNoIntWaits == 0
382 || RTTimeNanoTS() - (tsStart + cNsElapsed) < RT_NS_100MS))
383 {
384 int const rc2 = SUPSemEventWait(pSession, hEvent, 1 /*ms*/);
385 if (rc2 == VINF_SUCCESS)
386 {
387 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectEntersWhileAborting);
388 STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
389 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
390 }
391 cNoIntWaits++;
392 cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
393 continue;
394 }
395 }
396 else
397 LogFunc(("Critical section %p has a broken cLockers count. Aborting.\n", pCritSect));
398
399 /* Sabotage the critical section and return error to caller. */
400 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_FAILED_ABORT);
401 LogRel(("PDMCritSectEnter: Failed to abort wait on pCritSect=%p (rc=%Rrc rcTerm=%Rrc)\n",
402 pCritSect, rc, rcTerm));
403 return VERR_PDM_CRITSECT_ABORT_FAILED;
404 }
405 LogRel(("PDMCritSectEnter: Destroyed while aborting wait; pCritSect=%p/%#x rc=%Rrc rcTerm=%Rrc\n",
406 pCritSect, pCritSect->s.Core.u32Magic, rc, rcTerm));
407 return VERR_SEM_DESTROYED;
408 }
409
410 /* We get here if we timed out. Just retry now that it
411 appears someone left already. */
412 Assert(rc == VERR_TIMEOUT);
413 cMsMaxOne = 10 /*ms*/;
414
415# else /* IN_RING3 */
416 RT_NOREF(pVM, pVCpu, rcBusy);
417# endif /* IN_RING3 */
418 }
419 /*
420 * Any other return code is fatal.
421 */
422 else
423 {
424 AssertMsgFailed(("rc=%Rrc\n", rc));
425 return RT_FAILURE_NP(rc) ? rc : -rc;
426 }
427 }
428 /* won't get here */
429}
430#endif /* IN_RING3 || IN_RING0 */
431
432
433/**
434 * Common worker for the debug and normal APIs.
435 *
436 * @returns VINF_SUCCESS if entered successfully.
437 * @returns rcBusy when encountering a busy critical section in RC/R0.
438 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
439 * during the operation.
440 *
441 * @param pVM The cross context VM structure.
442 * @param pCritSect The PDM critical section to enter.
443 * @param rcBusy The status code to return when we're in RC or R0
444 * @param pSrcPos The source position of the lock operation.
445 */
446DECL_FORCE_INLINE(int) pdmCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
447{
448 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
449 Assert(pCritSect->s.Core.cNestings >= 0);
450#if defined(VBOX_STRICT) && defined(IN_RING0)
451 /* Hope we're not messing with critical sections while in the no-block
452 zone, that would complicate things a lot. */
453 PVMCPUCC pVCpuAssert = VMMGetCpu(pVM);
454 Assert(pVCpuAssert && VMMRZCallRing3IsEnabled(pVCpuAssert));
455#endif
456
457 /*
458 * If the critical section has already been destroyed, then inform the caller.
459 */
460 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
461 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
462 VERR_SEM_DESTROYED);
463
464 /*
465 * See if we're lucky.
466 */
467 /* NOP ... */
468 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
469 { /* We're more likely to end up here with real critsects than a NOP one. */ }
470 else
471 return VINF_SUCCESS;
472
473 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
474 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
475 /* ... not owned ... */
476 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
477 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
478
479 /* ... or nested. */
480 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
481 {
482 Assert(pCritSect->s.Core.cNestings >= 1);
483# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
484 pCritSect->s.Core.cNestings += 1;
485# else
486 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
487# endif
488 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
489 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
490 return VINF_SUCCESS;
491 }
492
493 /*
494 * Spin for a bit without incrementing the counter.
495 */
496 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
497 * cpu systems. */
498 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
499 while (cSpinsLeft-- > 0)
500 {
501 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
502 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
503 ASMNopPause();
504 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
505 cli'ed pendingpreemption check up front using sti w/ instruction fusing
506 for avoiding races. Hmm ... This is assuming the other party is actually
507 executing code on another CPU ... which we could keep track of if we
508 wanted. */
509 }
510
511#ifdef IN_RING3
512 /*
513 * Take the slow path.
514 */
515 NOREF(rcBusy);
516 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
517
518#elif defined(IN_RING0)
519# if 1 /* new code */
520 /*
521 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
522 * account when waiting on contended locks.
523 *
524 * While we usually (it can be VINF_SUCCESS) have the option of returning
525 * rcBusy and force the caller to go back to ring-3 and to re-start the work
526 * there, it's almost always more efficient to try wait for the lock here.
527 * The rcBusy will be used if we encounter an VERR_INTERRUPTED situation
528 * though.
529 */
530 PVMCPUCC pVCpu = VMMGetCpu(pVM);
531 if (pVCpu)
532 {
533 VMMR0EMTBLOCKCTX Ctx;
534 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
535 if (rc == VINF_SUCCESS)
536 {
537 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
538
539 rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy);
540
541 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
542 }
543 else
544 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
545 return rc;
546 }
547
548 /* Non-EMT. */
549 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
550 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
551
552# else /* old code: */
553 /*
554 * We preemption hasn't been disabled, we can block here in ring-0.
555 */
556 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
557 && ASMIntAreEnabled())
558 return pdmR3R0CritSectEnterContended(pVM, VMMGetCpu(pVM), pCritSect, hNativeSelf, pSrcPos, rcBusy);
559
560 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
561
562 /*
563 * Call ring-3 to acquire the critical section?
564 */
565 if (rcBusy == VINF_SUCCESS)
566 {
567 PVMCPUCC pVCpu = VMMGetCpu(pVM);
568 AssertReturn(pVCpu, VERR_PDM_CRITSECT_IPE);
569 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
570 }
571
572 /*
573 * Return busy.
574 */
575 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
576 return rcBusy;
577# endif /* old code */
578#else
579# error "Unsupported context"
580#endif
581}
582
583
584/**
585 * Enters a PDM critical section.
586 *
587 * @returns VINF_SUCCESS if entered successfully.
588 * @returns rcBusy when encountering a busy critical section in RC/R0.
589 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
590 * during the operation.
591 *
592 * @param pVM The cross context VM structure.
593 * @param pCritSect The PDM critical section to enter.
594 * @param rcBusy The status code to return when we're in RC or R0
595 * and the section is busy. Pass VINF_SUCCESS to
596 * acquired the critical section thru a ring-3
597 * call if necessary.
598 *
599 * @note Even callers setting @a rcBusy to VINF_SUCCESS must either handle
600 * possible failures in ring-0 or apply
601 * PDM_CRITSECT_RELEASE_ASSERT_RC(),
602 * PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(),
603 * PDM_CRITSECT_RELEASE_ASSERT_RC_DRV() or
604 * PDM_CRITSECT_RELEASE_ASSERT_RC_USB() to the return value of this
605 * function.
606 */
607VMMDECL(DECL_CHECK_RETURN_NOT_R3(int)) PDMCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy)
608{
609#ifndef PDMCRITSECT_STRICT
610 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
611#else
612 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
613 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
614#endif
615}
616
617
618/**
619 * Enters a PDM critical section, with location information for debugging.
620 *
621 * @returns VINF_SUCCESS if entered successfully.
622 * @returns rcBusy when encountering a busy critical section in RC/R0.
623 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
624 * during the operation.
625 *
626 * @param pVM The cross context VM structure.
627 * @param pCritSect The PDM critical section to enter.
628 * @param rcBusy The status code to return when we're in RC or R0
629 * and the section is busy. Pass VINF_SUCCESS to
630 * acquired the critical section thru a ring-3
631 * call if necessary.
632 * @param uId Some kind of locking location ID. Typically a
633 * return address up the stack. Optional (0).
634 * @param SRC_POS The source position where to lock is being
635 * acquired from. Optional.
636 */
637VMMDECL(DECL_CHECK_RETURN_NOT_R3(int))
638PDMCritSectEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
639{
640#ifdef PDMCRITSECT_STRICT
641 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
642 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
643#else
644 NOREF(uId); RT_SRC_POS_NOREF();
645 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
646#endif
647}
648
649
650/**
651 * Common worker for the debug and normal APIs.
652 *
653 * @retval VINF_SUCCESS on success.
654 * @retval VERR_SEM_BUSY if the critsect was owned.
655 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
656 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
657 * during the operation.
658 *
659 * @param pVM The cross context VM structure.
660 * @param pCritSect The critical section.
661 * @param pSrcPos The source position of the lock operation.
662 */
663static int pdmCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
664{
665 /*
666 * If the critical section has already been destroyed, then inform the caller.
667 */
668 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
669 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
670 VERR_SEM_DESTROYED);
671
672 /*
673 * See if we're lucky.
674 */
675 /* NOP ... */
676 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
677 { /* We're more likely to end up here with real critsects than a NOP one. */ }
678 else
679 return VINF_SUCCESS;
680
681 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
682 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
683 /* ... not owned ... */
684 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
685 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
686
687 /* ... or nested. */
688 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
689 {
690 Assert(pCritSect->s.Core.cNestings >= 1);
691# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
692 pCritSect->s.Core.cNestings += 1;
693# else
694 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
695# endif
696 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
697 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
698 return VINF_SUCCESS;
699 }
700
701 /* no spinning */
702
703 /*
704 * Return busy.
705 */
706#ifdef IN_RING3
707 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
708#else
709 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
710#endif
711 LogFlow(("PDMCritSectTryEnter: locked\n"));
712 return VERR_SEM_BUSY;
713}
714
715
716/**
717 * Try enter a critical section.
718 *
719 * @retval VINF_SUCCESS on success.
720 * @retval VERR_SEM_BUSY if the critsect was owned.
721 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
722 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
723 * during the operation.
724 *
725 * @param pVM The cross context VM structure.
726 * @param pCritSect The critical section.
727 */
728VMMDECL(DECL_CHECK_RETURN(int)) PDMCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect)
729{
730#ifndef PDMCRITSECT_STRICT
731 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
732#else
733 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
734 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
735#endif
736}
737
738
739/**
740 * Try enter a critical section, with location information for debugging.
741 *
742 * @retval VINF_SUCCESS on success.
743 * @retval VERR_SEM_BUSY if the critsect was owned.
744 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
745 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
746 * during the operation.
747 *
748 * @param pVM The cross context VM structure.
749 * @param pCritSect The critical section.
750 * @param uId Some kind of locking location ID. Typically a
751 * return address up the stack. Optional (0).
752 * @param SRC_POS The source position where to lock is being
753 * acquired from. Optional.
754 */
755VMMDECL(DECL_CHECK_RETURN(int))
756PDMCritSectTryEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
757{
758#ifdef PDMCRITSECT_STRICT
759 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
760 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
761#else
762 NOREF(uId); RT_SRC_POS_NOREF();
763 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
764#endif
765}
766
767
768#ifdef IN_RING3
769/**
770 * Enters a PDM critical section.
771 *
772 * @returns VINF_SUCCESS if entered successfully.
773 * @returns rcBusy when encountering a busy critical section in GC/R0.
774 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
775 * during the operation.
776 *
777 * @param pVM The cross context VM structure.
778 * @param pCritSect The PDM critical section to enter.
779 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
780 */
781VMMR3DECL(int) PDMR3CritSectEnterEx(PVM pVM, PPDMCRITSECT pCritSect, bool fCallRing3)
782{
783 int rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
784 if ( rc == VINF_SUCCESS
785 && fCallRing3
786 && pCritSect->s.Core.pValidatorRec
787 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
788 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
789 return rc;
790}
791#endif /* IN_RING3 */
792
793
794/**
795 * Leaves a critical section entered with PDMCritSectEnter().
796 *
797 * @returns Indication whether we really exited the critical section.
798 * @retval VINF_SUCCESS if we really exited.
799 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
800 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
801 *
802 * @param pVM The cross context VM structure.
803 * @param pCritSect The PDM critical section to leave.
804 *
805 * @remarks Can be called from no-ring-3-call context in ring-0 (TM/VirtualSync)
806 * where we'll queue leaving operation for ring-3 processing.
807 */
808VMMDECL(int) PDMCritSectLeave(PVMCC pVM, PPDMCRITSECT pCritSect)
809{
810 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
811 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
812
813 /*
814 * Check for NOP sections before asserting ownership.
815 */
816 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
817 { /* We're more likely to end up here with real critsects than a NOP one. */ }
818 else
819 return VINF_SUCCESS;
820
821 /*
822 * Always check that the caller is the owner (screw performance).
823 */
824 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
825 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, pCritSect->s.Core.NativeThreadOwner == hNativeSelf && hNativeSelf != NIL_RTNATIVETHREAD,
826 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
827 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
828 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
829 VERR_NOT_OWNER);
830
831 /*
832 * Nested leave.
833 */
834 int32_t const cNestings = pCritSect->s.Core.cNestings;
835 Assert(cNestings >= 1);
836 if (cNestings > 1)
837 {
838#ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
839 pCritSect->s.Core.cNestings = cNestings - 1;
840#else
841 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
842#endif
843 int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
844 Assert(cLockers >= 0); RT_NOREF(cLockers);
845 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, cNestings - 1, cLockers));
846 return VINF_SEM_NESTED;
847 }
848
849 Log12Func(("%p: cNestings=%d cLockers=%d hOwner=%p - leave for real\n",
850 pCritSect, cNestings, pCritSect->s.Core.cLockers, pCritSect->s.Core.NativeThreadOwner));
851
852#ifdef IN_RING3
853 /*
854 * Ring-3: Leave for real.
855 */
856 SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
857 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
858
859# if defined(PDMCRITSECT_STRICT)
860 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
861 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
862# endif
863 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
864
865# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
866 //pCritSect->s.Core.cNestings = 0; /* not really needed */
867 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
868# else
869 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
870 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
871# endif
872 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
873
874 /* Stop profiling and decrement lockers. */
875 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
876 ASMCompilerBarrier();
877 int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
878 if (cLockers < 0)
879 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
880 else
881 {
882 /* Someone is waiting, wake up one of them. */
883 Assert(cLockers < _8K);
884 Log8(("PDMCritSectLeave: Waking up %p (cLockers=%u)\n", pCritSect, cLockers));
885 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
886 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
887 AssertRC(rc);
888 }
889
890 /* Signal exit event. */
891 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
892 { /* likely */ }
893 else
894 {
895 Log8(("PDMCritSectLeave: Signalling %#p (%p)\n", hEventToSignal, pCritSect));
896 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
897 AssertRC(rc);
898 }
899
900 return VINF_SUCCESS;
901
902
903#elif defined(IN_RING0)
904 /*
905 * Ring-0: Try leave for real, depends on host and context.
906 */
907 SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
908 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
909 PVMCPUCC pVCpu = VMMGetCpu(pVM);
910 bool fQueueOnTrouble = false; /* Set this to true to test queueing. */
911 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
912 || VMMRZCallRing3IsEnabled(pVCpu)
913 || RTSemEventIsSignalSafe()
914 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
915 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
916 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
917 || (fQueueOnTrouble = ( hEventToSignal == NIL_SUPSEMEVENT
918 && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) == 0)) )
919 {
920 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
921
922# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
923 //pCritSect->s.Core.cNestings = 0; /* not really needed */
924 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
925# else
926 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
927 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
928# endif
929 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
930
931 /*
932 * Stop profiling and decrement lockers.
933 */
934 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
935 ASMCompilerBarrier();
936
937 bool fQueueIt = false;
938 int32_t cLockers;
939 if (!fQueueOnTrouble)
940 cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
941 else
942 {
943 cLockers = -1;
944 if (!ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
945 fQueueIt = true;
946 }
947 if (!fQueueIt)
948 {
949 VMMR0EMTBLOCKCTX Ctx;
950 bool fLeaveCtx = false;
951 if (cLockers < 0)
952 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
953 else
954 {
955 /* Someone is waiting, wake up one of them. */
956 Assert(cLockers < _8K);
957 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
958 if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
959 {
960 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
961 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
962 fLeaveCtx = true;
963 }
964 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
965 AssertRC(rc);
966 }
967
968 /*
969 * Signal exit event.
970 */
971 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
972 { /* likely */ }
973 else
974 {
975 if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
976 {
977 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
978 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
979 fLeaveCtx = true;
980 }
981 Log8(("Signalling %#p\n", hEventToSignal));
982 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
983 AssertRC(rc);
984 }
985
986 /*
987 * Restore HM context if needed.
988 */
989 if (!fLeaveCtx)
990 { /* contention should be unlikely */ }
991 else
992 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
993
994# ifdef DEBUG_bird
995 VMMTrashVolatileXMMRegs();
996# endif
997 return VINF_SUCCESS;
998 }
999
1000 /*
1001 * Darn, someone raced in on us. Restore the state (this works only
1002 * because the semaphore is effectively controlling ownership).
1003 */
1004 bool fRc;
1005 RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
1006 ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
1007 AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
1008 pdmCritSectCorrupted(pCritSect, "owner race"));
1009 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
1010# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1011 //pCritSect->s.Core.cNestings = 1;
1012 Assert(pCritSect->s.Core.cNestings == 1);
1013# else
1014 //Assert(pCritSect->s.Core.cNestings == 0);
1015 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
1016# endif
1017 Assert(hEventToSignal == NIL_SUPSEMEVENT);
1018 }
1019
1020
1021#else /* IN_RC */
1022 /*
1023 * Raw-mode: Try leave it.
1024 */
1025# error "This context is not use..."
1026 if (pCritSect->s.Core.cLockers == 0)
1027 {
1028# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1029 //pCritSect->s.Core.cNestings = 0; /* not really needed */
1030# else
1031 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
1032# endif
1033 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1034 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
1035
1036 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
1037 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
1038 return VINF_SUCCESS;
1039
1040 /*
1041 * Darn, someone raced in on us. Restore the state (this works only
1042 * because the semaphore is effectively controlling ownership).
1043 */
1044 bool fRc;
1045 RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
1046 ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
1047 AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
1048 pdmCritSectCorrupted(pCritSect, "owner race"));
1049 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
1050# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1051 //pCritSect->s.Core.cNestings = 1;
1052 Assert(pCritSect->s.Core.cNestings == 1);
1053# else
1054 //Assert(pCritSect->s.Core.cNestings == 0);
1055 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
1056# endif
1057 }
1058#endif /* IN_RC */
1059
1060
1061#ifndef IN_RING3
1062 /*
1063 * Ring-0/raw-mode: Unable to leave. Queue the leave for ring-3.
1064 */
1065 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1066# ifndef IN_RING0
1067 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1068# endif
1069 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
1070 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
1071 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves), ("%d\n", i), VERR_PDM_CRITSECT_IPE);
1072 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = pCritSect->s.pSelfR3;
1073 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1074 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectLeaves[i])
1075 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectLeaves[i] & HOST_PAGE_OFFSET_MASK)
1076 == ((uintptr_t)pCritSect & HOST_PAGE_OFFSET_MASK),
1077 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectLeaves[i], pCritSect),
1078 pdmCritSectCorrupted(pCritSect, "Invalid pSelfR3 value"));
1079 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); /** @todo handle VMCPU_FF_PDM_CRITSECT in ring-0 outside the no-call-ring-3 part. */
1080 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* unnecessary paranoia */
1081 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1082 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
1083
1084 return VINF_SUCCESS;
1085#endif /* IN_RING3 */
1086}
1087
1088
1089#if defined(IN_RING0) || defined(IN_RING3)
1090/**
1091 * Schedule a event semaphore for signalling upon critsect exit.
1092 *
1093 * @returns VINF_SUCCESS on success.
1094 * @returns VERR_TOO_MANY_SEMAPHORES if an event was already scheduled.
1095 * @returns VERR_NOT_OWNER if we're not the critsect owner (ring-3 only).
1096 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
1097 *
1098 * @param pCritSect The critical section.
1099 * @param hEventToSignal The support driver event semaphore that should be
1100 * signalled.
1101 */
1102VMMDECL(int) PDMHCCritSectScheduleExitEvent(PPDMCRITSECT pCritSect, SUPSEMEVENT hEventToSignal)
1103{
1104 AssertPtr(pCritSect);
1105 Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
1106 Assert(hEventToSignal != NIL_SUPSEMEVENT);
1107# ifdef IN_RING3
1108 if (RT_UNLIKELY(!RTCritSectIsOwner(&pCritSect->s.Core)))
1109 return VERR_NOT_OWNER;
1110# endif
1111 if (RT_LIKELY( pCritSect->s.hEventToSignal == NIL_RTSEMEVENT
1112 || pCritSect->s.hEventToSignal == hEventToSignal))
1113 {
1114 pCritSect->s.hEventToSignal = hEventToSignal;
1115 return VINF_SUCCESS;
1116 }
1117 return VERR_TOO_MANY_SEMAPHORES;
1118}
1119#endif /* IN_RING0 || IN_RING3 */
1120
1121
1122/**
1123 * Checks the caller is the owner of the critical section.
1124 *
1125 * @returns true if owner.
1126 * @returns false if not owner.
1127 * @param pVM The cross context VM structure.
1128 * @param pCritSect The critical section.
1129 */
1130VMMDECL(bool) PDMCritSectIsOwner(PVMCC pVM, PCPDMCRITSECT pCritSect)
1131{
1132#ifdef IN_RING3
1133 RT_NOREF(pVM);
1134 return RTCritSectIsOwner(&pCritSect->s.Core);
1135#else
1136 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1137 if ( !pVCpu
1138 || pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
1139 return false;
1140 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
1141 || pCritSect->s.Core.cNestings > 1;
1142#endif
1143}
1144
1145
1146/**
1147 * Checks the specified VCPU is the owner of the critical section.
1148 *
1149 * @returns true if owner.
1150 * @returns false if not owner.
1151 * @param pVCpu The cross context virtual CPU structure.
1152 * @param pCritSect The critical section.
1153 */
1154VMMDECL(bool) PDMCritSectIsOwnerEx(PVMCPUCC pVCpu, PCPDMCRITSECT pCritSect)
1155{
1156#ifdef IN_RING3
1157 NOREF(pVCpu);
1158 return RTCritSectIsOwner(&pCritSect->s.Core);
1159#else
1160 Assert(VMCC_GET_CPU(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu) == pVCpu);
1161 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
1162 return false;
1163 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
1164 || pCritSect->s.Core.cNestings > 1;
1165#endif
1166}
1167
1168
1169/**
1170 * Checks if anyone is waiting on the critical section we own.
1171 *
1172 * @returns true if someone is waiting.
1173 * @returns false if no one is waiting.
1174 * @param pVM The cross context VM structure.
1175 * @param pCritSect The critical section.
1176 */
1177VMMDECL(bool) PDMCritSectHasWaiters(PVMCC pVM, PCPDMCRITSECT pCritSect)
1178{
1179 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
1180 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pVM, pCritSect)); RT_NOREF(pVM);
1181 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
1182}
1183
1184
1185/**
1186 * Checks if a critical section is initialized or not.
1187 *
1188 * @returns true if initialized.
1189 * @returns false if not initialized.
1190 * @param pCritSect The critical section.
1191 */
1192VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
1193{
1194 return RTCritSectIsInitialized(&pCritSect->s.Core);
1195}
1196
1197
1198/**
1199 * Gets the recursion depth.
1200 *
1201 * @returns The recursion depth.
1202 * @param pCritSect The critical section.
1203 */
1204VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
1205{
1206 return RTCritSectGetRecursion(&pCritSect->s.Core);
1207}
1208
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette