VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 93762

Last change on this file since 93762 was 93725, checked in by vboxsync, 3 years ago

VMM: More arm64 adjustments. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 45.8 KB
Line 
1/* $Id: PDMAllCritSect.cpp 93725 2022-02-14 13:46:16Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/assert.h>
34#ifdef IN_RING3
35# include <iprt/lockvalidator.h>
36#endif
37#if defined(IN_RING3) || defined(IN_RING0)
38# include <iprt/semaphore.h>
39#endif
40#ifdef IN_RING0
41# include <iprt/time.h>
42#endif
43#if defined(IN_RING3) || defined(IN_RING0)
44# include <iprt/thread.h>
45#endif
46
47
48/*********************************************************************************************************************************
49* Defined Constants And Macros *
50*********************************************************************************************************************************/
51/** The number loops to spin for in ring-3. */
52#define PDMCRITSECT_SPIN_COUNT_R3 20
53/** The number loops to spin for in ring-0. */
54#define PDMCRITSECT_SPIN_COUNT_R0 256
55/** The number loops to spin for in the raw-mode context. */
56#define PDMCRITSECT_SPIN_COUNT_RC 256
57
58
59/** Skips some of the overly paranoid atomic updates.
60 * Makes some assumptions about cache coherence, though not brave enough not to
61 * always end with an atomic update. */
62#define PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
63
64/* Undefine the automatic VBOX_STRICT API mappings. */
65#undef PDMCritSectEnter
66#undef PDMCritSectTryEnter
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pVM The cross context VM structure.
74 * @param pCritSect The critical section. This is used in R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PVMCC pVM, PCPDMCRITSECT pCritSect)
77{
78#ifdef IN_RING3
79 RT_NOREF(pVM, pCritSect);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81
82#elif defined(IN_RING0)
83 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
84 NIL_RTNATIVETHREAD);
85 RTNATIVETHREAD hNativeSelf = GVMMR0GetRing3ThreadForSelf(pVM);
86 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87
88#else
89# error "Invalid context"
90#endif
91 return hNativeSelf;
92}
93
94
95#ifdef IN_RING0
96/**
97 * Marks the critical section as corrupted.
98 */
99DECL_NO_INLINE(static, int) pdmCritSectCorrupted(PPDMCRITSECT pCritSect, const char *pszMsg)
100{
101 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_CORRUPTED);
102 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pCritSect));
103 return VERR_PDM_CRITSECT_IPE;
104}
105#endif
106
107
108/**
109 * Tail code called when we've won the battle for the lock.
110 *
111 * @returns VINF_SUCCESS.
112 *
113 * @param pCritSect The critical section.
114 * @param hNativeSelf The native handle of this thread.
115 * @param pSrcPos The source position of the lock operation.
116 */
117DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
118{
119 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
120 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
121 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
122
123# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
124 pCritSect->s.Core.cNestings = 1;
125# else
126 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
127# endif
128 Assert(pCritSect->s.Core.cNestings == 1);
129 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
130
131# ifdef PDMCRITSECT_STRICT
132 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
133# else
134 NOREF(pSrcPos);
135# endif
136 if (pSrcPos)
137 Log12Func(("%p: uId=%p ln=%u fn=%s\n", pCritSect, pSrcPos->uId, pSrcPos->uLine, pSrcPos->pszFunction));
138 else
139 Log12Func(("%p\n", pCritSect));
140
141 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
142 return VINF_SUCCESS;
143}
144
145
146#if defined(IN_RING3) || defined(IN_RING0)
147/**
148 * Deals with the contended case in ring-3 and ring-0.
149 *
150 * @retval VINF_SUCCESS on success.
151 * @retval VERR_SEM_DESTROYED if destroyed.
152 *
153 * @param pVM The cross context VM structure.
154 * @param pVCpu The cross context virtual CPU structure if ring-0 and on
155 * an EMT, otherwise NULL.
156 * @param pCritSect The critsect.
157 * @param hNativeSelf The native thread handle.
158 * @param pSrcPos The source position of the lock operation.
159 * @param rcBusy The status code to return when we're in RC or R0
160 */
161static int pdmR3R0CritSectEnterContended(PVMCC pVM, PVMCPU pVCpu, PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf,
162 PCRTLOCKVALSRCPOS pSrcPos, int rcBusy)
163{
164# ifdef IN_RING0
165 /*
166 * If we've got queued critical section leave operations and rcBusy isn't
167 * VINF_SUCCESS, return to ring-3 immediately to avoid deadlocks.
168 */
169 if ( !pVCpu
170 || !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)
171 || rcBusy == VINF_SUCCESS )
172 { /* likely */ }
173 else
174 {
175 /** @todo statistics. */
176 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
177 return rcBusy;
178 }
179# endif
180
181 /*
182 * Start waiting.
183 */
184 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
185 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
186# ifdef IN_RING3
187 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
188# else
189 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
190# endif
191
192 /*
193 * The wait loop.
194 *
195 * This handles VERR_TIMEOUT and VERR_INTERRUPTED.
196 */
197 STAM_REL_PROFILE_START(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
198 PSUPDRVSESSION const pSession = pVM->pSession;
199 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
200# ifdef IN_RING3
201# ifdef PDMCRITSECT_STRICT
202 RTTHREAD const hThreadSelf = RTThreadSelfAutoAdopt();
203 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
204 if (RT_FAILURE(rc2))
205 return rc2;
206# else
207 RTTHREAD const hThreadSelf = RTThreadSelf();
208# endif
209# else /* IN_RING0 */
210 uint64_t const tsStart = RTTimeNanoTS();
211 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
212 uint64_t cNsMaxTotal = cNsMaxTotalDef;
213 uint64_t const cNsMaxRetry = RT_NS_15SEC;
214 uint32_t cMsMaxOne = RT_MS_5SEC;
215 bool fNonInterruptible = false;
216# endif
217 for (;;)
218 {
219 /*
220 * Do the wait.
221 *
222 * In ring-3 this gets cluttered by lock validation and thread state
223 * maintainence.
224 *
225 * In ring-0 we have to deal with the possibility that the thread has
226 * been signalled and the interruptible wait function returning
227 * immediately. In that case we do normal R0/RC rcBusy handling.
228 *
229 * We always do a timed wait here, so the event handle is revalidated
230 * regularly and we won't end up stuck waiting for a destroyed critsect.
231 */
232 /** @todo Make SUPSemEventClose wake up all waiters. */
233# ifdef IN_RING3
234# ifdef PDMCRITSECT_STRICT
235 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
236 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
237 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
238 if (RT_FAILURE(rc9))
239 return rc9;
240# else
241 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
242# endif
243 int const rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
244 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
245# else /* IN_RING0 */
246 int const rc = !fNonInterruptible
247 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
248 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
249 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hOwner=%p)\n",
250 pCritSect, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pCritSect->s.Core.NativeThreadOwner));
251# endif /* IN_RING0 */
252
253 /*
254 * Make sure the critical section hasn't been delete before continuing.
255 */
256 if (RT_LIKELY(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC))
257 { /* likely */ }
258 else
259 {
260 LogRel(("PDMCritSectEnter: Destroyed while waiting; pCritSect=%p rc=%Rrc\n", pCritSect, rc));
261 return VERR_SEM_DESTROYED;
262 }
263
264 /*
265 * Most likely we're here because we got signalled.
266 */
267 if (rc == VINF_SUCCESS)
268 {
269 STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
270 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
271 }
272
273 /*
274 * Timeout and interrupted waits needs careful handling in ring-0
275 * because we're cooperating with ring-3 on this critical section
276 * and thus need to make absolutely sure we won't get stuck here.
277 *
278 * The r0 interrupted case means something is pending (termination,
279 * signal, APC, debugger, whatever), so we must try our best to
280 * return to the caller and to ring-3 so it can be dealt with.
281 */
282 if (RT_LIKELY(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED))
283 {
284# ifdef IN_RING0
285 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
286 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
287 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
288 ("rcTerm=%Rrc\n", rcTerm));
289 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
290 cNsMaxTotal = RT_NS_1MIN;
291
292 if (rc == VERR_TIMEOUT)
293 {
294 /* Try return get out of here with a non-VINF_SUCCESS status if
295 the thread is terminating or if the timeout has been exceeded. */
296 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrTimeout);
297 if ( rcTerm != VINF_THREAD_IS_TERMINATING
298 && cNsElapsed <= cNsMaxTotal)
299 continue;
300 }
301 else
302 {
303 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
304 we will try non-interruptible sleep for a while to help resolve the issue
305 w/o guru'ing. */
306 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrInterrupted);
307 if ( rcTerm != VINF_THREAD_IS_TERMINATING
308 && rcBusy == VINF_SUCCESS
309 && pVCpu != NULL
310 && cNsElapsed <= cNsMaxTotal)
311 {
312 if (!fNonInterruptible)
313 {
314 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectNonInterruptibleWaits);
315 fNonInterruptible = true;
316 cMsMaxOne = 32;
317 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
318 if (cNsLeft > RT_NS_10SEC)
319 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
320 }
321 continue;
322 }
323 }
324
325 /*
326 * Let try get out of here. We must very carefully undo the
327 * cLockers increment we did using compare-and-exchange so that
328 * we don't race the semaphore signalling in PDMCritSectLeave
329 * and end up with spurious wakeups and two owners at once.
330 */
331 uint32_t cNoIntWaits = 0;
332 uint32_t cCmpXchgs = 0;
333 int32_t cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
334 for (;;)
335 {
336 if (pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC)
337 {
338 if (cLockers > 0 && cCmpXchgs < _64M)
339 {
340 bool fRc = ASMAtomicCmpXchgExS32(&pCritSect->s.Core.cLockers, cLockers - 1, cLockers, &cLockers);
341 if (fRc)
342 {
343 LogFunc(("Aborting wait on %p (rc=%Rrc rcTerm=%Rrc cNsElapsed=%'RU64) -> %Rrc\n", pCritSect,
344 rc, rcTerm, cNsElapsed, rcBusy != VINF_SUCCESS ? rcBusy : rc));
345 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatAbortedCritSectEnters);
346 return rcBusy != VINF_SUCCESS ? rcBusy : rc;
347 }
348 cCmpXchgs++;
349 if ((cCmpXchgs & 0xffff) == 0)
350 Log11Func(("%p: cLockers=%d cCmpXchgs=%u (hOwner=%p)\n",
351 pCritSect, cLockers, cCmpXchgs, pCritSect->s.Core.NativeThreadOwner));
352 ASMNopPause();
353 continue;
354 }
355
356 if (cLockers == 0)
357 {
358 /*
359 * We are racing someone in PDMCritSectLeave.
360 *
361 * For the VERR_TIMEOUT case we'll just retry taking it the normal
362 * way for a while. For VERR_INTERRUPTED we're in for more fun as
363 * the previous owner might not have signalled the semaphore yet,
364 * so we'll do a short non-interruptible wait instead and then guru.
365 */
366 if ( rc == VERR_TIMEOUT
367 && RTTimeNanoTS() - tsStart <= cNsMaxTotal + cNsMaxRetry)
368 break;
369
370 if ( rc == VERR_INTERRUPTED
371 && ( cNoIntWaits == 0
372 || RTTimeNanoTS() - (tsStart + cNsElapsed) < RT_NS_100MS))
373 {
374 int const rc2 = SUPSemEventWait(pSession, hEvent, 1 /*ms*/);
375 if (rc2 == VINF_SUCCESS)
376 {
377 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectEntersWhileAborting);
378 STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
379 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
380 }
381 cNoIntWaits++;
382 cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
383 continue;
384 }
385 }
386 else
387 LogFunc(("Critical section %p has a broken cLockers count. Aborting.\n", pCritSect));
388
389 /* Sabotage the critical section and return error to caller. */
390 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_FAILED_ABORT);
391 LogRel(("PDMCritSectEnter: Failed to abort wait on pCritSect=%p (rc=%Rrc rcTerm=%Rrc)\n",
392 pCritSect, rc, rcTerm));
393 return VERR_PDM_CRITSECT_ABORT_FAILED;
394 }
395 LogRel(("PDMCritSectEnter: Destroyed while aborting wait; pCritSect=%p/%#x rc=%Rrc rcTerm=%Rrc\n",
396 pCritSect, pCritSect->s.Core.u32Magic, rc, rcTerm));
397 return VERR_SEM_DESTROYED;
398 }
399
400 /* We get here if we timed out. Just retry now that it
401 appears someone left already. */
402 Assert(rc == VERR_TIMEOUT);
403 cMsMaxOne = 10 /*ms*/;
404
405# else /* IN_RING3 */
406 RT_NOREF(pVM, pVCpu, rcBusy);
407# endif /* IN_RING3 */
408 }
409 /*
410 * Any other return code is fatal.
411 */
412 else
413 {
414 AssertMsgFailed(("rc=%Rrc\n", rc));
415 return RT_FAILURE_NP(rc) ? rc : -rc;
416 }
417 }
418 /* won't get here */
419}
420#endif /* IN_RING3 || IN_RING0 */
421
422
423/**
424 * Common worker for the debug and normal APIs.
425 *
426 * @returns VINF_SUCCESS if entered successfully.
427 * @returns rcBusy when encountering a busy critical section in RC/R0.
428 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
429 * during the operation.
430 *
431 * @param pVM The cross context VM structure.
432 * @param pCritSect The PDM critical section to enter.
433 * @param rcBusy The status code to return when we're in RC or R0
434 * @param pSrcPos The source position of the lock operation.
435 */
436DECL_FORCE_INLINE(int) pdmCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
437{
438 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
439 Assert(pCritSect->s.Core.cNestings >= 0);
440#if defined(VBOX_STRICT) && defined(IN_RING0)
441 /* Hope we're not messing with critical sections while in the no-block
442 zone, that would complicate things a lot. */
443 PVMCPUCC pVCpuAssert = VMMGetCpu(pVM);
444 Assert(pVCpuAssert && VMMRZCallRing3IsEnabled(pVCpuAssert));
445#endif
446
447 /*
448 * If the critical section has already been destroyed, then inform the caller.
449 */
450 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
451 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
452 VERR_SEM_DESTROYED);
453
454 /*
455 * See if we're lucky.
456 */
457 /* NOP ... */
458 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
459 { /* We're more likely to end up here with real critsects than a NOP one. */ }
460 else
461 return VINF_SUCCESS;
462
463 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
464 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
465 /* ... not owned ... */
466 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
467 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
468
469 /* ... or nested. */
470 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
471 {
472 Assert(pCritSect->s.Core.cNestings >= 1);
473# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
474 pCritSect->s.Core.cNestings += 1;
475# else
476 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
477# endif
478 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
479 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
480 return VINF_SUCCESS;
481 }
482
483 /*
484 * Spin for a bit without incrementing the counter.
485 */
486 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
487 * cpu systems. */
488 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
489 while (cSpinsLeft-- > 0)
490 {
491 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
492 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
493 ASMNopPause();
494 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
495 cli'ed pendingpreemption check up front using sti w/ instruction fusing
496 for avoiding races. Hmm ... This is assuming the other party is actually
497 executing code on another CPU ... which we could keep track of if we
498 wanted. */
499 }
500
501#ifdef IN_RING3
502 /*
503 * Take the slow path.
504 */
505 NOREF(rcBusy);
506 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
507
508#elif defined(IN_RING0)
509# if 1 /* new code */
510 /*
511 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
512 * account when waiting on contended locks.
513 *
514 * While we usually (it can be VINF_SUCCESS) have the option of returning
515 * rcBusy and force the caller to go back to ring-3 and to re-start the work
516 * there, it's almost always more efficient to try wait for the lock here.
517 * The rcBusy will be used if we encounter an VERR_INTERRUPTED situation
518 * though.
519 */
520 PVMCPUCC pVCpu = VMMGetCpu(pVM);
521 if (pVCpu)
522 {
523 VMMR0EMTBLOCKCTX Ctx;
524 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
525 if (rc == VINF_SUCCESS)
526 {
527 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
528
529 rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy);
530
531 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
532 }
533 else
534 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
535 return rc;
536 }
537
538 /* Non-EMT. */
539 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
540 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
541
542# else /* old code: */
543 /*
544 * We preemption hasn't been disabled, we can block here in ring-0.
545 */
546 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
547 && ASMIntAreEnabled())
548 return pdmR3R0CritSectEnterContended(pVM, VMMGetCpu(pVM), pCritSect, hNativeSelf, pSrcPos, rcBusy);
549
550 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
551
552 /*
553 * Call ring-3 to acquire the critical section?
554 */
555 if (rcBusy == VINF_SUCCESS)
556 {
557 PVMCPUCC pVCpu = VMMGetCpu(pVM);
558 AssertReturn(pVCpu, VERR_PDM_CRITSECT_IPE);
559 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
560 }
561
562 /*
563 * Return busy.
564 */
565 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
566 return rcBusy;
567# endif /* old code */
568#else
569# error "Unsupported context"
570#endif
571}
572
573
574/**
575 * Enters a PDM critical section.
576 *
577 * @returns VINF_SUCCESS if entered successfully.
578 * @returns rcBusy when encountering a busy critical section in RC/R0.
579 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
580 * during the operation.
581 *
582 * @param pVM The cross context VM structure.
583 * @param pCritSect The PDM critical section to enter.
584 * @param rcBusy The status code to return when we're in RC or R0
585 * and the section is busy. Pass VINF_SUCCESS to
586 * acquired the critical section thru a ring-3
587 * call if necessary.
588 *
589 * @note Even callers setting @a rcBusy to VINF_SUCCESS must either handle
590 * possible failures in ring-0 or apply
591 * PDM_CRITSECT_RELEASE_ASSERT_RC(),
592 * PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(),
593 * PDM_CRITSECT_RELEASE_ASSERT_RC_DRV() or
594 * PDM_CRITSECT_RELEASE_ASSERT_RC_USB() to the return value of this
595 * function.
596 */
597VMMDECL(DECL_CHECK_RETURN_NOT_R3(int)) PDMCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy)
598{
599#ifndef PDMCRITSECT_STRICT
600 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
601#else
602 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
603 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
604#endif
605}
606
607
608/**
609 * Enters a PDM critical section, with location information for debugging.
610 *
611 * @returns VINF_SUCCESS if entered successfully.
612 * @returns rcBusy when encountering a busy critical section in RC/R0.
613 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
614 * during the operation.
615 *
616 * @param pVM The cross context VM structure.
617 * @param pCritSect The PDM critical section to enter.
618 * @param rcBusy The status code to return when we're in RC or R0
619 * and the section is busy. Pass VINF_SUCCESS to
620 * acquired the critical section thru a ring-3
621 * call if necessary.
622 * @param uId Some kind of locking location ID. Typically a
623 * return address up the stack. Optional (0).
624 * @param SRC_POS The source position where to lock is being
625 * acquired from. Optional.
626 */
627VMMDECL(DECL_CHECK_RETURN_NOT_R3(int))
628PDMCritSectEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
629{
630#ifdef PDMCRITSECT_STRICT
631 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
632 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
633#else
634 NOREF(uId); RT_SRC_POS_NOREF();
635 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
636#endif
637}
638
639
640/**
641 * Common worker for the debug and normal APIs.
642 *
643 * @retval VINF_SUCCESS on success.
644 * @retval VERR_SEM_BUSY if the critsect was owned.
645 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
646 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
647 * during the operation.
648 *
649 * @param pVM The cross context VM structure.
650 * @param pCritSect The critical section.
651 * @param pSrcPos The source position of the lock operation.
652 */
653static int pdmCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
654{
655 /*
656 * If the critical section has already been destroyed, then inform the caller.
657 */
658 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
659 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
660 VERR_SEM_DESTROYED);
661
662 /*
663 * See if we're lucky.
664 */
665 /* NOP ... */
666 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
667 { /* We're more likely to end up here with real critsects than a NOP one. */ }
668 else
669 return VINF_SUCCESS;
670
671 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
672 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
673 /* ... not owned ... */
674 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
675 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
676
677 /* ... or nested. */
678 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
679 {
680 Assert(pCritSect->s.Core.cNestings >= 1);
681# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
682 pCritSect->s.Core.cNestings += 1;
683# else
684 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
685# endif
686 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
687 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
688 return VINF_SUCCESS;
689 }
690
691 /* no spinning */
692
693 /*
694 * Return busy.
695 */
696#ifdef IN_RING3
697 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
698#else
699 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
700#endif
701 LogFlow(("PDMCritSectTryEnter: locked\n"));
702 return VERR_SEM_BUSY;
703}
704
705
706/**
707 * Try enter a critical section.
708 *
709 * @retval VINF_SUCCESS on success.
710 * @retval VERR_SEM_BUSY if the critsect was owned.
711 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
712 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
713 * during the operation.
714 *
715 * @param pVM The cross context VM structure.
716 * @param pCritSect The critical section.
717 */
718VMMDECL(DECL_CHECK_RETURN(int)) PDMCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect)
719{
720#ifndef PDMCRITSECT_STRICT
721 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
722#else
723 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
724 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
725#endif
726}
727
728
729/**
730 * Try enter a critical section, with location information for debugging.
731 *
732 * @retval VINF_SUCCESS on success.
733 * @retval VERR_SEM_BUSY if the critsect was owned.
734 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
735 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
736 * during the operation.
737 *
738 * @param pVM The cross context VM structure.
739 * @param pCritSect The critical section.
740 * @param uId Some kind of locking location ID. Typically a
741 * return address up the stack. Optional (0).
742 * @param SRC_POS The source position where to lock is being
743 * acquired from. Optional.
744 */
745VMMDECL(DECL_CHECK_RETURN(int))
746PDMCritSectTryEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
747{
748#ifdef PDMCRITSECT_STRICT
749 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
750 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
751#else
752 NOREF(uId); RT_SRC_POS_NOREF();
753 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
754#endif
755}
756
757
758#ifdef IN_RING3
759/**
760 * Enters a PDM critical section.
761 *
762 * @returns VINF_SUCCESS if entered successfully.
763 * @returns rcBusy when encountering a busy critical section in GC/R0.
764 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
765 * during the operation.
766 *
767 * @param pVM The cross context VM structure.
768 * @param pCritSect The PDM critical section to enter.
769 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
770 */
771VMMR3DECL(int) PDMR3CritSectEnterEx(PVM pVM, PPDMCRITSECT pCritSect, bool fCallRing3)
772{
773 int rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
774 if ( rc == VINF_SUCCESS
775 && fCallRing3
776 && pCritSect->s.Core.pValidatorRec
777 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
778 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
779 return rc;
780}
781#endif /* IN_RING3 */
782
783
784/**
785 * Leaves a critical section entered with PDMCritSectEnter().
786 *
787 * @returns Indication whether we really exited the critical section.
788 * @retval VINF_SUCCESS if we really exited.
789 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
790 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
791 *
792 * @param pVM The cross context VM structure.
793 * @param pCritSect The PDM critical section to leave.
794 *
795 * @remarks Can be called from no-ring-3-call context in ring-0 (TM/VirtualSync)
796 * where we'll queue leaving operation for ring-3 processing.
797 */
798VMMDECL(int) PDMCritSectLeave(PVMCC pVM, PPDMCRITSECT pCritSect)
799{
800 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
801 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
802
803 /*
804 * Check for NOP sections before asserting ownership.
805 */
806 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
807 { /* We're more likely to end up here with real critsects than a NOP one. */ }
808 else
809 return VINF_SUCCESS;
810
811 /*
812 * Always check that the caller is the owner (screw performance).
813 */
814 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
815 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, pCritSect->s.Core.NativeThreadOwner == hNativeSelf && hNativeSelf != NIL_RTNATIVETHREAD,
816 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
817 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
818 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
819 VERR_NOT_OWNER);
820
821 /*
822 * Nested leave.
823 */
824 int32_t const cNestings = pCritSect->s.Core.cNestings;
825 Assert(cNestings >= 1);
826 if (cNestings > 1)
827 {
828#ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
829 pCritSect->s.Core.cNestings = cNestings - 1;
830#else
831 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
832#endif
833 int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
834 Assert(cLockers >= 0); RT_NOREF(cLockers);
835 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, cNestings - 1, cLockers));
836 return VINF_SEM_NESTED;
837 }
838
839 Log12Func(("%p: cNestings=%d cLockers=%d hOwner=%p - leave for real\n",
840 pCritSect, cNestings, pCritSect->s.Core.cLockers, pCritSect->s.Core.NativeThreadOwner));
841
842#ifdef IN_RING3
843 /*
844 * Ring-3: Leave for real.
845 */
846 SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
847 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
848
849# if defined(PDMCRITSECT_STRICT)
850 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
851 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
852# endif
853 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
854
855# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
856 //pCritSect->s.Core.cNestings = 0; /* not really needed */
857 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
858# else
859 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
860 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
861# endif
862 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
863
864 /* Stop profiling and decrement lockers. */
865 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
866 ASMCompilerBarrier();
867 int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
868 if (cLockers < 0)
869 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
870 else
871 {
872 /* Someone is waiting, wake up one of them. */
873 Assert(cLockers < _8K);
874 Log8(("PDMCritSectLeave: Waking up %p (cLockers=%u)\n", pCritSect, cLockers));
875 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
876 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
877 AssertRC(rc);
878 }
879
880 /* Signal exit event. */
881 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
882 { /* likely */ }
883 else
884 {
885 Log8(("PDMCritSectLeave: Signalling %#p (%p)\n", hEventToSignal, pCritSect));
886 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
887 AssertRC(rc);
888 }
889
890 return VINF_SUCCESS;
891
892
893#elif defined(IN_RING0)
894 /*
895 * Ring-0: Try leave for real, depends on host and context.
896 */
897 SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
898 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
899 PVMCPUCC pVCpu = VMMGetCpu(pVM);
900 bool fQueueOnTrouble = false; /* Set this to true to test queueing. */
901 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
902 || VMMRZCallRing3IsEnabled(pVCpu)
903 || RTSemEventIsSignalSafe()
904 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
905 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
906 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
907 || (fQueueOnTrouble = ( hEventToSignal == NIL_SUPSEMEVENT
908 && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) == 0)) )
909 {
910 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
911
912# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
913 //pCritSect->s.Core.cNestings = 0; /* not really needed */
914 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
915# else
916 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
917 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
918# endif
919 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
920
921 /*
922 * Stop profiling and decrement lockers.
923 */
924 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
925 ASMCompilerBarrier();
926
927 bool fQueueIt = false;
928 int32_t cLockers;
929 if (!fQueueOnTrouble)
930 cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
931 else
932 {
933 cLockers = -1;
934 if (!ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
935 fQueueIt = true;
936 }
937 if (!fQueueIt)
938 {
939 VMMR0EMTBLOCKCTX Ctx;
940 bool fLeaveCtx = false;
941 if (cLockers < 0)
942 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
943 else
944 {
945 /* Someone is waiting, wake up one of them. */
946 Assert(cLockers < _8K);
947 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
948 if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
949 {
950 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
951 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
952 fLeaveCtx = true;
953 }
954 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
955 AssertRC(rc);
956 }
957
958 /*
959 * Signal exit event.
960 */
961 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
962 { /* likely */ }
963 else
964 {
965 if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
966 {
967 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
968 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
969 fLeaveCtx = true;
970 }
971 Log8(("Signalling %#p\n", hEventToSignal));
972 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
973 AssertRC(rc);
974 }
975
976 /*
977 * Restore HM context if needed.
978 */
979 if (!fLeaveCtx)
980 { /* contention should be unlikely */ }
981 else
982 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
983
984# ifdef DEBUG_bird
985 VMMTrashVolatileXMMRegs();
986# endif
987 return VINF_SUCCESS;
988 }
989
990 /*
991 * Darn, someone raced in on us. Restore the state (this works only
992 * because the semaphore is effectively controlling ownership).
993 */
994 bool fRc;
995 RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
996 ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
997 AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
998 pdmCritSectCorrupted(pCritSect, "owner race"));
999 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
1000# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1001 //pCritSect->s.Core.cNestings = 1;
1002 Assert(pCritSect->s.Core.cNestings == 1);
1003# else
1004 //Assert(pCritSect->s.Core.cNestings == 0);
1005 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
1006# endif
1007 Assert(hEventToSignal == NIL_SUPSEMEVENT);
1008 }
1009
1010
1011#else /* IN_RC */
1012 /*
1013 * Raw-mode: Try leave it.
1014 */
1015# error "This context is not use..."
1016 if (pCritSect->s.Core.cLockers == 0)
1017 {
1018# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1019 //pCritSect->s.Core.cNestings = 0; /* not really needed */
1020# else
1021 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
1022# endif
1023 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1024 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
1025
1026 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
1027 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
1028 return VINF_SUCCESS;
1029
1030 /*
1031 * Darn, someone raced in on us. Restore the state (this works only
1032 * because the semaphore is effectively controlling ownership).
1033 */
1034 bool fRc;
1035 RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
1036 ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
1037 AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
1038 pdmCritSectCorrupted(pCritSect, "owner race"));
1039 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
1040# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1041 //pCritSect->s.Core.cNestings = 1;
1042 Assert(pCritSect->s.Core.cNestings == 1);
1043# else
1044 //Assert(pCritSect->s.Core.cNestings == 0);
1045 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
1046# endif
1047 }
1048#endif /* IN_RC */
1049
1050
1051#ifndef IN_RING3
1052 /*
1053 * Ring-0/raw-mode: Unable to leave. Queue the leave for ring-3.
1054 */
1055 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1056# ifndef IN_RING0
1057 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1058# endif
1059 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
1060 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
1061 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves), ("%d\n", i), VERR_PDM_CRITSECT_IPE);
1062 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = pCritSect->s.pSelfR3;
1063 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1064 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectLeaves[i])
1065 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectLeaves[i] & HOST_PAGE_OFFSET_MASK)
1066 == ((uintptr_t)pCritSect & HOST_PAGE_OFFSET_MASK),
1067 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectLeaves[i], pCritSect),
1068 pdmCritSectCorrupted(pCritSect, "Invalid pSelfR3 value"));
1069 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); /** @todo handle VMCPU_FF_PDM_CRITSECT in ring-0 outside the no-call-ring-3 part. */
1070 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* unnecessary paranoia */
1071 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1072 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
1073
1074 return VINF_SUCCESS;
1075#endif /* IN_RING3 */
1076}
1077
1078
1079#if defined(IN_RING0) || defined(IN_RING3)
1080/**
1081 * Schedule a event semaphore for signalling upon critsect exit.
1082 *
1083 * @returns VINF_SUCCESS on success.
1084 * @returns VERR_TOO_MANY_SEMAPHORES if an event was already scheduled.
1085 * @returns VERR_NOT_OWNER if we're not the critsect owner (ring-3 only).
1086 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
1087 *
1088 * @param pCritSect The critical section.
1089 * @param hEventToSignal The support driver event semaphore that should be
1090 * signalled.
1091 */
1092VMMDECL(int) PDMHCCritSectScheduleExitEvent(PPDMCRITSECT pCritSect, SUPSEMEVENT hEventToSignal)
1093{
1094 AssertPtr(pCritSect);
1095 Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
1096 Assert(hEventToSignal != NIL_SUPSEMEVENT);
1097# ifdef IN_RING3
1098 if (RT_UNLIKELY(!RTCritSectIsOwner(&pCritSect->s.Core)))
1099 return VERR_NOT_OWNER;
1100# endif
1101 if (RT_LIKELY( pCritSect->s.hEventToSignal == NIL_RTSEMEVENT
1102 || pCritSect->s.hEventToSignal == hEventToSignal))
1103 {
1104 pCritSect->s.hEventToSignal = hEventToSignal;
1105 return VINF_SUCCESS;
1106 }
1107 return VERR_TOO_MANY_SEMAPHORES;
1108}
1109#endif /* IN_RING0 || IN_RING3 */
1110
1111
1112/**
1113 * Checks the caller is the owner of the critical section.
1114 *
1115 * @returns true if owner.
1116 * @returns false if not owner.
1117 * @param pVM The cross context VM structure.
1118 * @param pCritSect The critical section.
1119 */
1120VMMDECL(bool) PDMCritSectIsOwner(PVMCC pVM, PCPDMCRITSECT pCritSect)
1121{
1122#ifdef IN_RING3
1123 RT_NOREF(pVM);
1124 return RTCritSectIsOwner(&pCritSect->s.Core);
1125#else
1126 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1127 if ( !pVCpu
1128 || pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
1129 return false;
1130 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
1131 || pCritSect->s.Core.cNestings > 1;
1132#endif
1133}
1134
1135
1136/**
1137 * Checks the specified VCPU is the owner of the critical section.
1138 *
1139 * @returns true if owner.
1140 * @returns false if not owner.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pCritSect The critical section.
1143 */
1144VMMDECL(bool) PDMCritSectIsOwnerEx(PVMCPUCC pVCpu, PCPDMCRITSECT pCritSect)
1145{
1146#ifdef IN_RING3
1147 NOREF(pVCpu);
1148 return RTCritSectIsOwner(&pCritSect->s.Core);
1149#else
1150 Assert(VMCC_GET_CPU(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu) == pVCpu);
1151 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
1152 return false;
1153 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
1154 || pCritSect->s.Core.cNestings > 1;
1155#endif
1156}
1157
1158
1159/**
1160 * Checks if anyone is waiting on the critical section we own.
1161 *
1162 * @returns true if someone is waiting.
1163 * @returns false if no one is waiting.
1164 * @param pVM The cross context VM structure.
1165 * @param pCritSect The critical section.
1166 */
1167VMMDECL(bool) PDMCritSectHasWaiters(PVMCC pVM, PCPDMCRITSECT pCritSect)
1168{
1169 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
1170 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pVM, pCritSect)); RT_NOREF(pVM);
1171 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
1172}
1173
1174
1175/**
1176 * Checks if a critical section is initialized or not.
1177 *
1178 * @returns true if initialized.
1179 * @returns false if not initialized.
1180 * @param pCritSect The critical section.
1181 */
1182VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
1183{
1184 return RTCritSectIsInitialized(&pCritSect->s.Core);
1185}
1186
1187
1188/**
1189 * Gets the recursion depth.
1190 *
1191 * @returns The recursion depth.
1192 * @param pCritSect The critical section.
1193 */
1194VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
1195{
1196 return RTCritSectGetRecursion(&pCritSect->s.Core);
1197}
1198
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette